commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
839db09465562e80a9d5612a0fba3a779e99eeb0
|
postgres/forms/interval_field.py
|
postgres/forms/interval_field.py
|
import datetime
import re
from django import forms
INTERVAL_RE = re.compile(
r'^((?P<days>\d+) days)?,?\W*'
r'((?P<hours>\d\d?)(:(?P<minutes>\d\d)(:(?P<seconds>\d\d))?)?)?'
)
def build_interval(data):
match = INTERVAL_RE.match(data)
if match:
return datetime.timedelta(**dict(
(key,int(value)) for key,value in match.groupdict().items() if value is not None
))
class IntervalField(forms.CharField):
def clean(self, value):
if value:
if not INTERVAL_RE.match(value):
raise forms.ValidationError('Does not match interval format.')
return build_interval(value)
|
Add an INTERVAL form field.
|
Add an INTERVAL form field.
|
Python
|
bsd-3-clause
|
wlanslovenija/django-postgres
|
Add an INTERVAL form field.
|
import datetime
import re
from django import forms
INTERVAL_RE = re.compile(
r'^((?P<days>\d+) days)?,?\W*'
r'((?P<hours>\d\d?)(:(?P<minutes>\d\d)(:(?P<seconds>\d\d))?)?)?'
)
def build_interval(data):
match = INTERVAL_RE.match(data)
if match:
return datetime.timedelta(**dict(
(key,int(value)) for key,value in match.groupdict().items() if value is not None
))
class IntervalField(forms.CharField):
def clean(self, value):
if value:
if not INTERVAL_RE.match(value):
raise forms.ValidationError('Does not match interval format.')
return build_interval(value)
|
<commit_before><commit_msg>Add an INTERVAL form field.<commit_after>
|
import datetime
import re
from django import forms
INTERVAL_RE = re.compile(
r'^((?P<days>\d+) days)?,?\W*'
r'((?P<hours>\d\d?)(:(?P<minutes>\d\d)(:(?P<seconds>\d\d))?)?)?'
)
def build_interval(data):
match = INTERVAL_RE.match(data)
if match:
return datetime.timedelta(**dict(
(key,int(value)) for key,value in match.groupdict().items() if value is not None
))
class IntervalField(forms.CharField):
def clean(self, value):
if value:
if not INTERVAL_RE.match(value):
raise forms.ValidationError('Does not match interval format.')
return build_interval(value)
|
Add an INTERVAL form field.import datetime
import re
from django import forms
INTERVAL_RE = re.compile(
r'^((?P<days>\d+) days)?,?\W*'
r'((?P<hours>\d\d?)(:(?P<minutes>\d\d)(:(?P<seconds>\d\d))?)?)?'
)
def build_interval(data):
match = INTERVAL_RE.match(data)
if match:
return datetime.timedelta(**dict(
(key,int(value)) for key,value in match.groupdict().items() if value is not None
))
class IntervalField(forms.CharField):
def clean(self, value):
if value:
if not INTERVAL_RE.match(value):
raise forms.ValidationError('Does not match interval format.')
return build_interval(value)
|
<commit_before><commit_msg>Add an INTERVAL form field.<commit_after>import datetime
import re
from django import forms
INTERVAL_RE = re.compile(
r'^((?P<days>\d+) days)?,?\W*'
r'((?P<hours>\d\d?)(:(?P<minutes>\d\d)(:(?P<seconds>\d\d))?)?)?'
)
def build_interval(data):
match = INTERVAL_RE.match(data)
if match:
return datetime.timedelta(**dict(
(key,int(value)) for key,value in match.groupdict().items() if value is not None
))
class IntervalField(forms.CharField):
def clean(self, value):
if value:
if not INTERVAL_RE.match(value):
raise forms.ValidationError('Does not match interval format.')
return build_interval(value)
|
|
87a316584c20c3a6589156ad304e9e81f784d726
|
libtaxii/test/to_text_11_test.py
|
libtaxii/test/to_text_11_test.py
|
import unittest
import os
import glob
import libtaxii.messages_11 as tm11
import libtaxii.taxii_default_query as tdq
# from libtaxii.validation import SchemaValidator
class To_text_11_test(unittest.TestCase):
input_path = os.path.join('input','1.1')
output_path = os.path.join('output','1.1')
def test_to_text_11_test(self):
input_filenames = glob.glob(os.path.join(self.input_path,'*.xml'))
for input_filename in input_filenames:
input_file = open(input_filename, 'r')
input_text = input_file.read()
# parse the file to a TAXII message/object
msg_from_xml = tm11.get_message_from_xml(input_text)
# serialize the object to XML and text
xml_from_msg = msg_from_xml.to_xml(True)
txt_from_msg = msg_from_xml.to_text()
# create the output files
basename = os.path.basename(input_filename)
name_no_ext = os.path.splitext(basename)[0]
txt_output_filename = os.path.join(self.output_path, name_no_ext + ".txt")
xml_output_filename = os.path.join(self.output_path, name_no_ext + ".xml")
txt_output_file = open(txt_output_filename, 'w')
xml_output_file = open(xml_output_filename, 'w')
# write XML and text to files.
txt_output_file.write(txt_from_msg)
xml_output_file.write(xml_from_msg)
txt_output_file.close()
xml_output_file.close()
if __name__ == '__main__':
unittest.main()
|
Test to parse & serialize test documents to XML and text.
|
Test to parse & serialize test documents to XML and text.
|
Python
|
bsd-3-clause
|
stkyle/libtaxii,Intelworks/libtaxii,TAXIIProject/libtaxii
|
Test to parse & serialize test documents to XML and text.
|
import unittest
import os
import glob
import libtaxii.messages_11 as tm11
import libtaxii.taxii_default_query as tdq
# from libtaxii.validation import SchemaValidator
class To_text_11_test(unittest.TestCase):
input_path = os.path.join('input','1.1')
output_path = os.path.join('output','1.1')
def test_to_text_11_test(self):
input_filenames = glob.glob(os.path.join(self.input_path,'*.xml'))
for input_filename in input_filenames:
input_file = open(input_filename, 'r')
input_text = input_file.read()
# parse the file to a TAXII message/object
msg_from_xml = tm11.get_message_from_xml(input_text)
# serialize the object to XML and text
xml_from_msg = msg_from_xml.to_xml(True)
txt_from_msg = msg_from_xml.to_text()
# create the output files
basename = os.path.basename(input_filename)
name_no_ext = os.path.splitext(basename)[0]
txt_output_filename = os.path.join(self.output_path, name_no_ext + ".txt")
xml_output_filename = os.path.join(self.output_path, name_no_ext + ".xml")
txt_output_file = open(txt_output_filename, 'w')
xml_output_file = open(xml_output_filename, 'w')
# write XML and text to files.
txt_output_file.write(txt_from_msg)
xml_output_file.write(xml_from_msg)
txt_output_file.close()
xml_output_file.close()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test to parse & serialize test documents to XML and text.<commit_after>
|
import unittest
import os
import glob
import libtaxii.messages_11 as tm11
import libtaxii.taxii_default_query as tdq
# from libtaxii.validation import SchemaValidator
class To_text_11_test(unittest.TestCase):
input_path = os.path.join('input','1.1')
output_path = os.path.join('output','1.1')
def test_to_text_11_test(self):
input_filenames = glob.glob(os.path.join(self.input_path,'*.xml'))
for input_filename in input_filenames:
input_file = open(input_filename, 'r')
input_text = input_file.read()
# parse the file to a TAXII message/object
msg_from_xml = tm11.get_message_from_xml(input_text)
# serialize the object to XML and text
xml_from_msg = msg_from_xml.to_xml(True)
txt_from_msg = msg_from_xml.to_text()
# create the output files
basename = os.path.basename(input_filename)
name_no_ext = os.path.splitext(basename)[0]
txt_output_filename = os.path.join(self.output_path, name_no_ext + ".txt")
xml_output_filename = os.path.join(self.output_path, name_no_ext + ".xml")
txt_output_file = open(txt_output_filename, 'w')
xml_output_file = open(xml_output_filename, 'w')
# write XML and text to files.
txt_output_file.write(txt_from_msg)
xml_output_file.write(xml_from_msg)
txt_output_file.close()
xml_output_file.close()
if __name__ == '__main__':
unittest.main()
|
Test to parse & serialize test documents to XML and text.
import unittest
import os
import glob
import libtaxii.messages_11 as tm11
import libtaxii.taxii_default_query as tdq
# from libtaxii.validation import SchemaValidator
class To_text_11_test(unittest.TestCase):
input_path = os.path.join('input','1.1')
output_path = os.path.join('output','1.1')
def test_to_text_11_test(self):
input_filenames = glob.glob(os.path.join(self.input_path,'*.xml'))
for input_filename in input_filenames:
input_file = open(input_filename, 'r')
input_text = input_file.read()
# parse the file to a TAXII message/object
msg_from_xml = tm11.get_message_from_xml(input_text)
# serialize the object to XML and text
xml_from_msg = msg_from_xml.to_xml(True)
txt_from_msg = msg_from_xml.to_text()
# create the output files
basename = os.path.basename(input_filename)
name_no_ext = os.path.splitext(basename)[0]
txt_output_filename = os.path.join(self.output_path, name_no_ext + ".txt")
xml_output_filename = os.path.join(self.output_path, name_no_ext + ".xml")
txt_output_file = open(txt_output_filename, 'w')
xml_output_file = open(xml_output_filename, 'w')
# write XML and text to files.
txt_output_file.write(txt_from_msg)
xml_output_file.write(xml_from_msg)
txt_output_file.close()
xml_output_file.close()
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test to parse & serialize test documents to XML and text.<commit_after>
import unittest
import os
import glob
import libtaxii.messages_11 as tm11
import libtaxii.taxii_default_query as tdq
# from libtaxii.validation import SchemaValidator
class To_text_11_test(unittest.TestCase):
input_path = os.path.join('input','1.1')
output_path = os.path.join('output','1.1')
def test_to_text_11_test(self):
input_filenames = glob.glob(os.path.join(self.input_path,'*.xml'))
for input_filename in input_filenames:
input_file = open(input_filename, 'r')
input_text = input_file.read()
# parse the file to a TAXII message/object
msg_from_xml = tm11.get_message_from_xml(input_text)
# serialize the object to XML and text
xml_from_msg = msg_from_xml.to_xml(True)
txt_from_msg = msg_from_xml.to_text()
# create the output files
basename = os.path.basename(input_filename)
name_no_ext = os.path.splitext(basename)[0]
txt_output_filename = os.path.join(self.output_path, name_no_ext + ".txt")
xml_output_filename = os.path.join(self.output_path, name_no_ext + ".xml")
txt_output_file = open(txt_output_filename, 'w')
xml_output_file = open(xml_output_filename, 'w')
# write XML and text to files.
txt_output_file.write(txt_from_msg)
xml_output_file.write(xml_from_msg)
txt_output_file.close()
xml_output_file.close()
if __name__ == '__main__':
unittest.main()
|
|
ff8e52beb234fdd7050206b33bb4728512688b65
|
python/itertools/combinations.py
|
python/itertools/combinations.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import itertools
def main():
"""Main function"""
# All possible permutations
# combinations(iterable, r) --> combinations object
#
# Return successive r-length combinations of elements in the iterable.
# combinations(range(4), 3) --> (0,1,2), (0,1,3), (0,2,3), (1,2,3)
for combination in itertools.combinations(range(4), 3):
print(combination)
if __name__ == '__main__':
main()
|
Add a snippet (Python itertools).
|
Add a snippet (Python itertools).
|
Python
|
mit
|
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
|
Add a snippet (Python itertools).
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import itertools
def main():
"""Main function"""
# All possible permutations
# combinations(iterable, r) --> combinations object
#
# Return successive r-length combinations of elements in the iterable.
# combinations(range(4), 3) --> (0,1,2), (0,1,3), (0,2,3), (1,2,3)
for combination in itertools.combinations(range(4), 3):
print(combination)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python itertools).<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import itertools
def main():
"""Main function"""
# All possible permutations
# combinations(iterable, r) --> combinations object
#
# Return successive r-length combinations of elements in the iterable.
# combinations(range(4), 3) --> (0,1,2), (0,1,3), (0,2,3), (1,2,3)
for combination in itertools.combinations(range(4), 3):
print(combination)
if __name__ == '__main__':
main()
|
Add a snippet (Python itertools).#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import itertools
def main():
"""Main function"""
# All possible permutations
# combinations(iterable, r) --> combinations object
#
# Return successive r-length combinations of elements in the iterable.
# combinations(range(4), 3) --> (0,1,2), (0,1,3), (0,2,3), (1,2,3)
for combination in itertools.combinations(range(4), 3):
print(combination)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a snippet (Python itertools).<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import itertools
def main():
"""Main function"""
# All possible permutations
# combinations(iterable, r) --> combinations object
#
# Return successive r-length combinations of elements in the iterable.
# combinations(range(4), 3) --> (0,1,2), (0,1,3), (0,2,3), (1,2,3)
for combination in itertools.combinations(range(4), 3):
print(combination)
if __name__ == '__main__':
main()
|
|
f6dd79b19f5cf63c32db1d07bc1712185a6cbb6d
|
tests/api/test_lifecycle.py
|
tests/api/test_lifecycle.py
|
class TestShutdown:
async def test(self, mocker, monkeypatch, do_get):
"""
Test that a ``GET /api/lifecycle/shutdown`` results in :meth:`aiohttp.web.Application.shutdown` being called on
the Virtool application object. We check this be seeing if
"""
await do_get.init_client()
stub = mocker.stub(name="shutdown")
async def mock_shutdown():
return stub()
monkeypatch.setattr(do_get.server.app, "shutdown", mock_shutdown)
await do_get("/api/lifecycle/shutdown", authorize=True)
assert stub.called
async def test_job_manager(self, mocker, monkeypatch, do_get):
"""
Test that a ``GET /api/lifecycle/shutdown`` results in :meth:`virtool.job_manager.Manager.close` being called as
part of the shutdown process.
"""
await do_get.init_client()
stub = mocker.stub(name="close")
async def mock_close():
return stub()
monkeypatch.setattr(do_get.server.app["job_manager"], "close", mock_close)
await do_get("/api/lifecycle/shutdown", authorize=True)
assert stub.called
|
Test the lifecycle shutdown API endpoint
|
Test the lifecycle shutdown API endpoint
|
Python
|
mit
|
virtool/virtool,igboyes/virtool,igboyes/virtool,virtool/virtool
|
Test the lifecycle shutdown API endpoint
|
class TestShutdown:
async def test(self, mocker, monkeypatch, do_get):
"""
Test that a ``GET /api/lifecycle/shutdown`` results in :meth:`aiohttp.web.Application.shutdown` being called on
the Virtool application object. We check this be seeing if
"""
await do_get.init_client()
stub = mocker.stub(name="shutdown")
async def mock_shutdown():
return stub()
monkeypatch.setattr(do_get.server.app, "shutdown", mock_shutdown)
await do_get("/api/lifecycle/shutdown", authorize=True)
assert stub.called
async def test_job_manager(self, mocker, monkeypatch, do_get):
"""
Test that a ``GET /api/lifecycle/shutdown`` results in :meth:`virtool.job_manager.Manager.close` being called as
part of the shutdown process.
"""
await do_get.init_client()
stub = mocker.stub(name="close")
async def mock_close():
return stub()
monkeypatch.setattr(do_get.server.app["job_manager"], "close", mock_close)
await do_get("/api/lifecycle/shutdown", authorize=True)
assert stub.called
|
<commit_before><commit_msg>Test the lifecycle shutdown API endpoint<commit_after>
|
class TestShutdown:
async def test(self, mocker, monkeypatch, do_get):
"""
Test that a ``GET /api/lifecycle/shutdown`` results in :meth:`aiohttp.web.Application.shutdown` being called on
the Virtool application object. We check this be seeing if
"""
await do_get.init_client()
stub = mocker.stub(name="shutdown")
async def mock_shutdown():
return stub()
monkeypatch.setattr(do_get.server.app, "shutdown", mock_shutdown)
await do_get("/api/lifecycle/shutdown", authorize=True)
assert stub.called
async def test_job_manager(self, mocker, monkeypatch, do_get):
"""
Test that a ``GET /api/lifecycle/shutdown`` results in :meth:`virtool.job_manager.Manager.close` being called as
part of the shutdown process.
"""
await do_get.init_client()
stub = mocker.stub(name="close")
async def mock_close():
return stub()
monkeypatch.setattr(do_get.server.app["job_manager"], "close", mock_close)
await do_get("/api/lifecycle/shutdown", authorize=True)
assert stub.called
|
Test the lifecycle shutdown API endpointclass TestShutdown:
async def test(self, mocker, monkeypatch, do_get):
"""
Test that a ``GET /api/lifecycle/shutdown`` results in :meth:`aiohttp.web.Application.shutdown` being called on
the Virtool application object. We check this be seeing if
"""
await do_get.init_client()
stub = mocker.stub(name="shutdown")
async def mock_shutdown():
return stub()
monkeypatch.setattr(do_get.server.app, "shutdown", mock_shutdown)
await do_get("/api/lifecycle/shutdown", authorize=True)
assert stub.called
async def test_job_manager(self, mocker, monkeypatch, do_get):
"""
Test that a ``GET /api/lifecycle/shutdown`` results in :meth:`virtool.job_manager.Manager.close` being called as
part of the shutdown process.
"""
await do_get.init_client()
stub = mocker.stub(name="close")
async def mock_close():
return stub()
monkeypatch.setattr(do_get.server.app["job_manager"], "close", mock_close)
await do_get("/api/lifecycle/shutdown", authorize=True)
assert stub.called
|
<commit_before><commit_msg>Test the lifecycle shutdown API endpoint<commit_after>class TestShutdown:
async def test(self, mocker, monkeypatch, do_get):
"""
Test that a ``GET /api/lifecycle/shutdown`` results in :meth:`aiohttp.web.Application.shutdown` being called on
the Virtool application object. We check this be seeing if
"""
await do_get.init_client()
stub = mocker.stub(name="shutdown")
async def mock_shutdown():
return stub()
monkeypatch.setattr(do_get.server.app, "shutdown", mock_shutdown)
await do_get("/api/lifecycle/shutdown", authorize=True)
assert stub.called
async def test_job_manager(self, mocker, monkeypatch, do_get):
"""
Test that a ``GET /api/lifecycle/shutdown`` results in :meth:`virtool.job_manager.Manager.close` being called as
part of the shutdown process.
"""
await do_get.init_client()
stub = mocker.stub(name="close")
async def mock_close():
return stub()
monkeypatch.setattr(do_get.server.app["job_manager"], "close", mock_close)
await do_get("/api/lifecycle/shutdown", authorize=True)
assert stub.called
|
|
25210e9fd08b24f3eb9a6f661ac3c060fcb6e43e
|
tests/test_build_failure.py
|
tests/test_build_failure.py
|
"""Tests for handling failed builds"""
from dxr.testing import SingleFileTestCase, CommandFailure
class BuildFailureTests(SingleFileTestCase):
source = r"""A bunch of garbage"""
@classmethod
def setup_class(cls):
"""Make sure a failed build returns a non-zero status code."""
try:
super(BuildFailureTests, cls).setup_class()
except CommandFailure:
pass
else:
raise AssertionError('A failed build returned an exit code of 0.')
def test_nothing(self):
"""A null test just to make the setup method run"""
|
Add a test that proves build_instance raises an exception if `make` (etc.) fails.
|
Add a test that proves build_instance raises an exception if `make` (etc.) fails.
From here, it's trivial to observe that dxr-build.py itself exits with a non-zero status code on build failure. Thus, our spurious deployment of a new instance even after the moz-central build failed must be the fault of a calling shell script or something.
|
Python
|
mit
|
pelmers/dxr,kleintom/dxr,jbradberry/dxr,gartung/dxr,jay-z007/dxr,pombredanne/dxr,KiemVM/Mozilla--dxr,pelmers/dxr,pombredanne/dxr,KiemVM/Mozilla--dxr,KiemVM/Mozilla--dxr,bozzmob/dxr,srenatus/dxr,srenatus/dxr,jbradberry/dxr,erikrose/dxr,pombredanne/dxr,gartung/dxr,nrc/dxr,nrc/dxr,kleintom/dxr,nrc/dxr,jay-z007/dxr,nrc/dxr,kleintom/dxr,nrc/dxr,KiemVM/Mozilla--dxr,bozzmob/dxr,erikrose/dxr,gartung/dxr,jbradberry/dxr,jbradberry/dxr,KiemVM/Mozilla--dxr,jay-z007/dxr,srenatus/dxr,bozzmob/dxr,kleintom/dxr,jay-z007/dxr,jay-z007/dxr,gartung/dxr,pombredanne/dxr,srenatus/dxr,gartung/dxr,pelmers/dxr,srenatus/dxr,kleintom/dxr,bozzmob/dxr,kleintom/dxr,pombredanne/dxr,gartung/dxr,jbradberry/dxr,erikrose/dxr,pombredanne/dxr,bozzmob/dxr,kleintom/dxr,KiemVM/Mozilla--dxr,pelmers/dxr,pelmers/dxr,jay-z007/dxr,erikrose/dxr,pelmers/dxr,gartung/dxr,srenatus/dxr,jay-z007/dxr,nrc/dxr,pelmers/dxr,erikrose/dxr,bozzmob/dxr,jbradberry/dxr,jbradberry/dxr,pombredanne/dxr,bozzmob/dxr
|
Add a test that proves build_instance raises an exception if `make` (etc.) fails.
From here, it's trivial to observe that dxr-build.py itself exits with a non-zero status code on build failure. Thus, our spurious deployment of a new instance even after the moz-central build failed must be the fault of a calling shell script or something.
|
"""Tests for handling failed builds"""
from dxr.testing import SingleFileTestCase, CommandFailure
class BuildFailureTests(SingleFileTestCase):
source = r"""A bunch of garbage"""
@classmethod
def setup_class(cls):
"""Make sure a failed build returns a non-zero status code."""
try:
super(BuildFailureTests, cls).setup_class()
except CommandFailure:
pass
else:
raise AssertionError('A failed build returned an exit code of 0.')
def test_nothing(self):
"""A null test just to make the setup method run"""
|
<commit_before><commit_msg>Add a test that proves build_instance raises an exception if `make` (etc.) fails.
From here, it's trivial to observe that dxr-build.py itself exits with a non-zero status code on build failure. Thus, our spurious deployment of a new instance even after the moz-central build failed must be the fault of a calling shell script or something.<commit_after>
|
"""Tests for handling failed builds"""
from dxr.testing import SingleFileTestCase, CommandFailure
class BuildFailureTests(SingleFileTestCase):
source = r"""A bunch of garbage"""
@classmethod
def setup_class(cls):
"""Make sure a failed build returns a non-zero status code."""
try:
super(BuildFailureTests, cls).setup_class()
except CommandFailure:
pass
else:
raise AssertionError('A failed build returned an exit code of 0.')
def test_nothing(self):
"""A null test just to make the setup method run"""
|
Add a test that proves build_instance raises an exception if `make` (etc.) fails.
From here, it's trivial to observe that dxr-build.py itself exits with a non-zero status code on build failure. Thus, our spurious deployment of a new instance even after the moz-central build failed must be the fault of a calling shell script or something."""Tests for handling failed builds"""
from dxr.testing import SingleFileTestCase, CommandFailure
class BuildFailureTests(SingleFileTestCase):
source = r"""A bunch of garbage"""
@classmethod
def setup_class(cls):
"""Make sure a failed build returns a non-zero status code."""
try:
super(BuildFailureTests, cls).setup_class()
except CommandFailure:
pass
else:
raise AssertionError('A failed build returned an exit code of 0.')
def test_nothing(self):
"""A null test just to make the setup method run"""
|
<commit_before><commit_msg>Add a test that proves build_instance raises an exception if `make` (etc.) fails.
From here, it's trivial to observe that dxr-build.py itself exits with a non-zero status code on build failure. Thus, our spurious deployment of a new instance even after the moz-central build failed must be the fault of a calling shell script or something.<commit_after>"""Tests for handling failed builds"""
from dxr.testing import SingleFileTestCase, CommandFailure
class BuildFailureTests(SingleFileTestCase):
source = r"""A bunch of garbage"""
@classmethod
def setup_class(cls):
"""Make sure a failed build returns a non-zero status code."""
try:
super(BuildFailureTests, cls).setup_class()
except CommandFailure:
pass
else:
raise AssertionError('A failed build returned an exit code of 0.')
def test_nothing(self):
"""A null test just to make the setup method run"""
|
|
6a56d00b409798fbeccf84e940847b6e2705bfdf
|
app/soc/modules/gci/logic/profile.py
|
app/soc/modules/gci/logic/profile.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCI logic for profiles.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from soc.modules.gci.models.profile import GCIProfile
def queryAllMentorsKeysForOrg(org, limit=1000):
"""Returns a list of keys of all the mentors for the organization
Args:
org: the organization entity for which we need to get all the mentors
limit: the maximum number of entities that must be fetched
returns:
List of all the mentors for the organization
"""
# get all mentors keys first
query = GCIProfile.all(keys_only=True)
query.filter('mentor_for', org)
mentors_keys = query.fetch(limit=limit)
# get all org admins keys first
query = GCIProfile.all(keys_only=True)
query.filter('org_admin_for', org)
oa_keys = query.fetch(limit=limit)
return set(mentors_keys + oa_keys)
|
Implement a method that fetches all the mentors for a given organization.
|
Implement a method that fetches all the mentors for a given organization.
|
Python
|
apache-2.0
|
rhyolight/nupic.son,rhyolight/nupic.son,rhyolight/nupic.son
|
Implement a method that fetches all the mentors for a given organization.
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCI logic for profiles.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from soc.modules.gci.models.profile import GCIProfile
def queryAllMentorsKeysForOrg(org, limit=1000):
"""Returns a list of keys of all the mentors for the organization
Args:
org: the organization entity for which we need to get all the mentors
limit: the maximum number of entities that must be fetched
returns:
List of all the mentors for the organization
"""
# get all mentors keys first
query = GCIProfile.all(keys_only=True)
query.filter('mentor_for', org)
mentors_keys = query.fetch(limit=limit)
# get all org admins keys first
query = GCIProfile.all(keys_only=True)
query.filter('org_admin_for', org)
oa_keys = query.fetch(limit=limit)
return set(mentors_keys + oa_keys)
|
<commit_before><commit_msg>Implement a method that fetches all the mentors for a given organization.<commit_after>
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCI logic for profiles.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from soc.modules.gci.models.profile import GCIProfile
def queryAllMentorsKeysForOrg(org, limit=1000):
"""Returns a list of keys of all the mentors for the organization
Args:
org: the organization entity for which we need to get all the mentors
limit: the maximum number of entities that must be fetched
returns:
List of all the mentors for the organization
"""
# get all mentors keys first
query = GCIProfile.all(keys_only=True)
query.filter('mentor_for', org)
mentors_keys = query.fetch(limit=limit)
# get all org admins keys first
query = GCIProfile.all(keys_only=True)
query.filter('org_admin_for', org)
oa_keys = query.fetch(limit=limit)
return set(mentors_keys + oa_keys)
|
Implement a method that fetches all the mentors for a given organization.#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCI logic for profiles.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from soc.modules.gci.models.profile import GCIProfile
def queryAllMentorsKeysForOrg(org, limit=1000):
"""Returns a list of keys of all the mentors for the organization
Args:
org: the organization entity for which we need to get all the mentors
limit: the maximum number of entities that must be fetched
returns:
List of all the mentors for the organization
"""
# get all mentors keys first
query = GCIProfile.all(keys_only=True)
query.filter('mentor_for', org)
mentors_keys = query.fetch(limit=limit)
# get all org admins keys first
query = GCIProfile.all(keys_only=True)
query.filter('org_admin_for', org)
oa_keys = query.fetch(limit=limit)
return set(mentors_keys + oa_keys)
|
<commit_before><commit_msg>Implement a method that fetches all the mentors for a given organization.<commit_after>#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GCI logic for profiles.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from soc.modules.gci.models.profile import GCIProfile
def queryAllMentorsKeysForOrg(org, limit=1000):
"""Returns a list of keys of all the mentors for the organization
Args:
org: the organization entity for which we need to get all the mentors
limit: the maximum number of entities that must be fetched
returns:
List of all the mentors for the organization
"""
# get all mentors keys first
query = GCIProfile.all(keys_only=True)
query.filter('mentor_for', org)
mentors_keys = query.fetch(limit=limit)
# get all org admins keys first
query = GCIProfile.all(keys_only=True)
query.filter('org_admin_for', org)
oa_keys = query.fetch(limit=limit)
return set(mentors_keys + oa_keys)
|
|
489dd26fd22c3e7820cf74d1582adfcff050a8bb
|
test/test_packages.py
|
test/test_packages.py
|
import pytest
@pytest.mark.parametrize("name", [
("bash-completion"),
("bind-utils"),
("bridge-utils"),
("docker"),
("epel-release"),
("git"),
("iptables-services"),
("libnfsidmap"),
("net-tools"),
("nfs-utils"),
("pyOpenSSL"),
("screen"),
("strace"),
("tcpdump"),
("wget"),
])
def test_packages(Package, name):
assert Package(name).is_installed
|
Add test for installed packages
|
Add test for installed packages
|
Python
|
mit
|
wicksy/vagrant-openshift,wicksy/vagrant-openshift,wicksy/vagrant-openshift
|
Add test for installed packages
|
import pytest
@pytest.mark.parametrize("name", [
("bash-completion"),
("bind-utils"),
("bridge-utils"),
("docker"),
("epel-release"),
("git"),
("iptables-services"),
("libnfsidmap"),
("net-tools"),
("nfs-utils"),
("pyOpenSSL"),
("screen"),
("strace"),
("tcpdump"),
("wget"),
])
def test_packages(Package, name):
assert Package(name).is_installed
|
<commit_before><commit_msg>Add test for installed packages<commit_after>
|
import pytest
@pytest.mark.parametrize("name", [
("bash-completion"),
("bind-utils"),
("bridge-utils"),
("docker"),
("epel-release"),
("git"),
("iptables-services"),
("libnfsidmap"),
("net-tools"),
("nfs-utils"),
("pyOpenSSL"),
("screen"),
("strace"),
("tcpdump"),
("wget"),
])
def test_packages(Package, name):
assert Package(name).is_installed
|
Add test for installed packagesimport pytest
@pytest.mark.parametrize("name", [
("bash-completion"),
("bind-utils"),
("bridge-utils"),
("docker"),
("epel-release"),
("git"),
("iptables-services"),
("libnfsidmap"),
("net-tools"),
("nfs-utils"),
("pyOpenSSL"),
("screen"),
("strace"),
("tcpdump"),
("wget"),
])
def test_packages(Package, name):
assert Package(name).is_installed
|
<commit_before><commit_msg>Add test for installed packages<commit_after>import pytest
@pytest.mark.parametrize("name", [
("bash-completion"),
("bind-utils"),
("bridge-utils"),
("docker"),
("epel-release"),
("git"),
("iptables-services"),
("libnfsidmap"),
("net-tools"),
("nfs-utils"),
("pyOpenSSL"),
("screen"),
("strace"),
("tcpdump"),
("wget"),
])
def test_packages(Package, name):
assert Package(name).is_installed
|
|
cce6e292631d795a1d73fba8252f06b654d2f8fe
|
tests/test_timer.py
|
tests/test_timer.py
|
# -*- coding: utf-8 -*-
"""
tests.test_timer
~~~~~~~~~~~~~~~~~~~~~
"""
import time
import re
from yaspin import yaspin
def test_no_timer():
sp = yaspin(timer=False)
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is None
def test_timer_idle():
sp = yaspin(timer=True)
assert sp.elapsed_time == 0
sp._freeze("")
assert "(0:00:00)" in sp._last_frame
def test_timer_in_progress():
sp = yaspin(timer=True)
sp.start()
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
sp.stop()
assert t2 - t1 >= 0.001
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is not None
def test_timer_finished():
sp = yaspin(timer=True)
sp.start()
time.sleep(0.001)
sp.stop()
assert sp.elapsed_time >= 0.001
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
assert t1 == t2
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is not None
|
Add tests for timer functionality
|
Add tests for timer functionality
|
Python
|
mit
|
pavdmyt/yaspin
|
Add tests for timer functionality
|
# -*- coding: utf-8 -*-
"""
tests.test_timer
~~~~~~~~~~~~~~~~~~~~~
"""
import time
import re
from yaspin import yaspin
def test_no_timer():
sp = yaspin(timer=False)
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is None
def test_timer_idle():
sp = yaspin(timer=True)
assert sp.elapsed_time == 0
sp._freeze("")
assert "(0:00:00)" in sp._last_frame
def test_timer_in_progress():
sp = yaspin(timer=True)
sp.start()
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
sp.stop()
assert t2 - t1 >= 0.001
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is not None
def test_timer_finished():
sp = yaspin(timer=True)
sp.start()
time.sleep(0.001)
sp.stop()
assert sp.elapsed_time >= 0.001
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
assert t1 == t2
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is not None
|
<commit_before><commit_msg>Add tests for timer functionality<commit_after>
|
# -*- coding: utf-8 -*-
"""
tests.test_timer
~~~~~~~~~~~~~~~~~~~~~
"""
import time
import re
from yaspin import yaspin
def test_no_timer():
sp = yaspin(timer=False)
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is None
def test_timer_idle():
sp = yaspin(timer=True)
assert sp.elapsed_time == 0
sp._freeze("")
assert "(0:00:00)" in sp._last_frame
def test_timer_in_progress():
sp = yaspin(timer=True)
sp.start()
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
sp.stop()
assert t2 - t1 >= 0.001
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is not None
def test_timer_finished():
sp = yaspin(timer=True)
sp.start()
time.sleep(0.001)
sp.stop()
assert sp.elapsed_time >= 0.001
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
assert t1 == t2
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is not None
|
Add tests for timer functionality# -*- coding: utf-8 -*-
"""
tests.test_timer
~~~~~~~~~~~~~~~~~~~~~
"""
import time
import re
from yaspin import yaspin
def test_no_timer():
sp = yaspin(timer=False)
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is None
def test_timer_idle():
sp = yaspin(timer=True)
assert sp.elapsed_time == 0
sp._freeze("")
assert "(0:00:00)" in sp._last_frame
def test_timer_in_progress():
sp = yaspin(timer=True)
sp.start()
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
sp.stop()
assert t2 - t1 >= 0.001
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is not None
def test_timer_finished():
sp = yaspin(timer=True)
sp.start()
time.sleep(0.001)
sp.stop()
assert sp.elapsed_time >= 0.001
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
assert t1 == t2
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is not None
|
<commit_before><commit_msg>Add tests for timer functionality<commit_after># -*- coding: utf-8 -*-
"""
tests.test_timer
~~~~~~~~~~~~~~~~~~~~~
"""
import time
import re
from yaspin import yaspin
def test_no_timer():
sp = yaspin(timer=False)
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is None
def test_timer_idle():
sp = yaspin(timer=True)
assert sp.elapsed_time == 0
sp._freeze("")
assert "(0:00:00)" in sp._last_frame
def test_timer_in_progress():
sp = yaspin(timer=True)
sp.start()
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
sp.stop()
assert t2 - t1 >= 0.001
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is not None
def test_timer_finished():
sp = yaspin(timer=True)
sp.start()
time.sleep(0.001)
sp.stop()
assert sp.elapsed_time >= 0.001
t1 = sp.elapsed_time
time.sleep(0.001)
t2 = sp.elapsed_time
assert t1 == t2
sp._freeze("")
assert re.search(r"\(\d+:\d{2}:\d{2}\)", sp._last_frame) is not None
|
|
a1fd670f25141b176d276514adbce6e110b49e38
|
comics/crawlers/slagoon.py
|
comics/crawlers/slagoon.py
|
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = "Sherman's Lagoon"
language = 'en'
url = 'http://www.slagoon.com/'
start_date = '1991-05-13'
history_capable_days = 32
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -5
rights = 'Jim Toomey'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.url = 'http://www.slagoon.com/dailies/SL%(date)s.gif' % {
'date': self.pub_date.strftime('%y%m%d'),
}
|
Add crawler for 'Sherman's Lagoon'
|
Add crawler for 'Sherman's Lagoon'
|
Python
|
agpl-3.0
|
jodal/comics,jodal/comics,klette/comics,datagutten/comics,datagutten/comics,jodal/comics,datagutten/comics,klette/comics,klette/comics,datagutten/comics,jodal/comics
|
Add crawler for 'Sherman's Lagoon'
|
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = "Sherman's Lagoon"
language = 'en'
url = 'http://www.slagoon.com/'
start_date = '1991-05-13'
history_capable_days = 32
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -5
rights = 'Jim Toomey'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.url = 'http://www.slagoon.com/dailies/SL%(date)s.gif' % {
'date': self.pub_date.strftime('%y%m%d'),
}
|
<commit_before><commit_msg>Add crawler for 'Sherman's Lagoon'<commit_after>
|
from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = "Sherman's Lagoon"
language = 'en'
url = 'http://www.slagoon.com/'
start_date = '1991-05-13'
history_capable_days = 32
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -5
rights = 'Jim Toomey'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.url = 'http://www.slagoon.com/dailies/SL%(date)s.gif' % {
'date': self.pub_date.strftime('%y%m%d'),
}
|
Add crawler for 'Sherman's Lagoon'from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = "Sherman's Lagoon"
language = 'en'
url = 'http://www.slagoon.com/'
start_date = '1991-05-13'
history_capable_days = 32
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -5
rights = 'Jim Toomey'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.url = 'http://www.slagoon.com/dailies/SL%(date)s.gif' % {
'date': self.pub_date.strftime('%y%m%d'),
}
|
<commit_before><commit_msg>Add crawler for 'Sherman's Lagoon'<commit_after>from comics.crawler.base import BaseComicCrawler
from comics.crawler.meta import BaseComicMeta
class ComicMeta(BaseComicMeta):
name = "Sherman's Lagoon"
language = 'en'
url = 'http://www.slagoon.com/'
start_date = '1991-05-13'
history_capable_days = 32
schedule = 'Mo,Tu,We,Th,Fr,Sa,Su'
time_zone = -5
rights = 'Jim Toomey'
class ComicCrawler(BaseComicCrawler):
def _get_url(self):
self.url = 'http://www.slagoon.com/dailies/SL%(date)s.gif' % {
'date': self.pub_date.strftime('%y%m%d'),
}
|
|
ceea7edd9162e9a834be8888fea18dcd0da43561
|
test/test_rendering_dot_files.py
|
test/test_rendering_dot_files.py
|
from __future__ import division
from __future__ import print_function
import glob
import os
import subprocess
import sys
from hashlib import sha256
import pydot_ng as pydot
PY3 = not sys.version_info < (3, 0, 0)
if PY3:
NULL_SEP = b''
xrange = range
else:
NULL_SEP = ''
bytes = str
DOT_BINARY_PATH = pydot.find_graphviz()['dot']
TEST_DIR = os.path.dirname(__file__)
REGRESSION_TESTS_DIR = os.path.join(TEST_DIR, "graphs")
MY_REGRESSION_TESTS_DIR = os.path.join(TEST_DIR, "my_tests")
TESTS_DIRS = (
("regressions", REGRESSION_TESTS_DIR),
("my_regressions", MY_REGRESSION_TESTS_DIR),
)
def list_dots(path):
searchpath = os.path.join(path, "*.dot")
return [f for f in glob.glob(searchpath)]
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for name, dir in TESTS_DIRS:
for filepath in list_dots(dir):
filename = os.path.basename(filepath)
idlist.append("{}-{}".format(name, filename))
argvalues.append((filepath,))
metafunc.parametrize(
argnames=["filepath"],
argvalues=argvalues,
ids=idlist,
scope="function",
)
def test_render_and_compare_dot_files(filepath):
def _render_with_graphviz(filename):
p = subprocess.Popen(
(DOT_BINARY_PATH, '-Tjpe'),
cwd=os.path.dirname(filename),
stdin=open(filename, 'rt'),
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stdout = p.stdout
stdout_output = list()
while True:
data = stdout.read()
if not data:
break
stdout_output.append(data)
stdout.close()
if stdout_output:
stdout_output = NULL_SEP.join(stdout_output)
# this returns a status code we should check
p.wait()
return sha256(stdout_output).hexdigest()
def _render_with_pydot(filename):
g = pydot.graph_from_dot_file(filename)
if not isinstance(g, list):
g = [g]
jpe_data = NULL_SEP.join([_g.create(format='jpe') for _g in g])
return sha256(jpe_data).hexdigest()
parsed_data_hexdigest = _render_with_pydot(filepath)
original_data_hexdigest = _render_with_graphviz(filepath)
assert original_data_hexdigest == parsed_data_hexdigest
|
Add testing regressions of pydot vs graphviz
|
Add testing regressions of pydot vs graphviz
|
Python
|
mit
|
pydot/pydot-ng
|
Add testing regressions of pydot vs graphviz
|
from __future__ import division
from __future__ import print_function
import glob
import os
import subprocess
import sys
from hashlib import sha256
import pydot_ng as pydot
PY3 = not sys.version_info < (3, 0, 0)
if PY3:
NULL_SEP = b''
xrange = range
else:
NULL_SEP = ''
bytes = str
DOT_BINARY_PATH = pydot.find_graphviz()['dot']
TEST_DIR = os.path.dirname(__file__)
REGRESSION_TESTS_DIR = os.path.join(TEST_DIR, "graphs")
MY_REGRESSION_TESTS_DIR = os.path.join(TEST_DIR, "my_tests")
TESTS_DIRS = (
("regressions", REGRESSION_TESTS_DIR),
("my_regressions", MY_REGRESSION_TESTS_DIR),
)
def list_dots(path):
searchpath = os.path.join(path, "*.dot")
return [f for f in glob.glob(searchpath)]
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for name, dir in TESTS_DIRS:
for filepath in list_dots(dir):
filename = os.path.basename(filepath)
idlist.append("{}-{}".format(name, filename))
argvalues.append((filepath,))
metafunc.parametrize(
argnames=["filepath"],
argvalues=argvalues,
ids=idlist,
scope="function",
)
def test_render_and_compare_dot_files(filepath):
def _render_with_graphviz(filename):
p = subprocess.Popen(
(DOT_BINARY_PATH, '-Tjpe'),
cwd=os.path.dirname(filename),
stdin=open(filename, 'rt'),
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stdout = p.stdout
stdout_output = list()
while True:
data = stdout.read()
if not data:
break
stdout_output.append(data)
stdout.close()
if stdout_output:
stdout_output = NULL_SEP.join(stdout_output)
# this returns a status code we should check
p.wait()
return sha256(stdout_output).hexdigest()
def _render_with_pydot(filename):
g = pydot.graph_from_dot_file(filename)
if not isinstance(g, list):
g = [g]
jpe_data = NULL_SEP.join([_g.create(format='jpe') for _g in g])
return sha256(jpe_data).hexdigest()
parsed_data_hexdigest = _render_with_pydot(filepath)
original_data_hexdigest = _render_with_graphviz(filepath)
assert original_data_hexdigest == parsed_data_hexdigest
|
<commit_before><commit_msg>Add testing regressions of pydot vs graphviz<commit_after>
|
from __future__ import division
from __future__ import print_function
import glob
import os
import subprocess
import sys
from hashlib import sha256
import pydot_ng as pydot
PY3 = not sys.version_info < (3, 0, 0)
if PY3:
NULL_SEP = b''
xrange = range
else:
NULL_SEP = ''
bytes = str
DOT_BINARY_PATH = pydot.find_graphviz()['dot']
TEST_DIR = os.path.dirname(__file__)
REGRESSION_TESTS_DIR = os.path.join(TEST_DIR, "graphs")
MY_REGRESSION_TESTS_DIR = os.path.join(TEST_DIR, "my_tests")
TESTS_DIRS = (
("regressions", REGRESSION_TESTS_DIR),
("my_regressions", MY_REGRESSION_TESTS_DIR),
)
def list_dots(path):
searchpath = os.path.join(path, "*.dot")
return [f for f in glob.glob(searchpath)]
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for name, dir in TESTS_DIRS:
for filepath in list_dots(dir):
filename = os.path.basename(filepath)
idlist.append("{}-{}".format(name, filename))
argvalues.append((filepath,))
metafunc.parametrize(
argnames=["filepath"],
argvalues=argvalues,
ids=idlist,
scope="function",
)
def test_render_and_compare_dot_files(filepath):
def _render_with_graphviz(filename):
p = subprocess.Popen(
(DOT_BINARY_PATH, '-Tjpe'),
cwd=os.path.dirname(filename),
stdin=open(filename, 'rt'),
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stdout = p.stdout
stdout_output = list()
while True:
data = stdout.read()
if not data:
break
stdout_output.append(data)
stdout.close()
if stdout_output:
stdout_output = NULL_SEP.join(stdout_output)
# this returns a status code we should check
p.wait()
return sha256(stdout_output).hexdigest()
def _render_with_pydot(filename):
g = pydot.graph_from_dot_file(filename)
if not isinstance(g, list):
g = [g]
jpe_data = NULL_SEP.join([_g.create(format='jpe') for _g in g])
return sha256(jpe_data).hexdigest()
parsed_data_hexdigest = _render_with_pydot(filepath)
original_data_hexdigest = _render_with_graphviz(filepath)
assert original_data_hexdigest == parsed_data_hexdigest
|
Add testing regressions of pydot vs graphvizfrom __future__ import division
from __future__ import print_function
import glob
import os
import subprocess
import sys
from hashlib import sha256
import pydot_ng as pydot
PY3 = not sys.version_info < (3, 0, 0)
if PY3:
NULL_SEP = b''
xrange = range
else:
NULL_SEP = ''
bytes = str
DOT_BINARY_PATH = pydot.find_graphviz()['dot']
TEST_DIR = os.path.dirname(__file__)
REGRESSION_TESTS_DIR = os.path.join(TEST_DIR, "graphs")
MY_REGRESSION_TESTS_DIR = os.path.join(TEST_DIR, "my_tests")
TESTS_DIRS = (
("regressions", REGRESSION_TESTS_DIR),
("my_regressions", MY_REGRESSION_TESTS_DIR),
)
def list_dots(path):
searchpath = os.path.join(path, "*.dot")
return [f for f in glob.glob(searchpath)]
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for name, dir in TESTS_DIRS:
for filepath in list_dots(dir):
filename = os.path.basename(filepath)
idlist.append("{}-{}".format(name, filename))
argvalues.append((filepath,))
metafunc.parametrize(
argnames=["filepath"],
argvalues=argvalues,
ids=idlist,
scope="function",
)
def test_render_and_compare_dot_files(filepath):
def _render_with_graphviz(filename):
p = subprocess.Popen(
(DOT_BINARY_PATH, '-Tjpe'),
cwd=os.path.dirname(filename),
stdin=open(filename, 'rt'),
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stdout = p.stdout
stdout_output = list()
while True:
data = stdout.read()
if not data:
break
stdout_output.append(data)
stdout.close()
if stdout_output:
stdout_output = NULL_SEP.join(stdout_output)
# this returns a status code we should check
p.wait()
return sha256(stdout_output).hexdigest()
def _render_with_pydot(filename):
g = pydot.graph_from_dot_file(filename)
if not isinstance(g, list):
g = [g]
jpe_data = NULL_SEP.join([_g.create(format='jpe') for _g in g])
return sha256(jpe_data).hexdigest()
parsed_data_hexdigest = _render_with_pydot(filepath)
original_data_hexdigest = _render_with_graphviz(filepath)
assert original_data_hexdigest == parsed_data_hexdigest
|
<commit_before><commit_msg>Add testing regressions of pydot vs graphviz<commit_after>from __future__ import division
from __future__ import print_function
import glob
import os
import subprocess
import sys
from hashlib import sha256
import pydot_ng as pydot
PY3 = not sys.version_info < (3, 0, 0)
if PY3:
NULL_SEP = b''
xrange = range
else:
NULL_SEP = ''
bytes = str
DOT_BINARY_PATH = pydot.find_graphviz()['dot']
TEST_DIR = os.path.dirname(__file__)
REGRESSION_TESTS_DIR = os.path.join(TEST_DIR, "graphs")
MY_REGRESSION_TESTS_DIR = os.path.join(TEST_DIR, "my_tests")
TESTS_DIRS = (
("regressions", REGRESSION_TESTS_DIR),
("my_regressions", MY_REGRESSION_TESTS_DIR),
)
def list_dots(path):
searchpath = os.path.join(path, "*.dot")
return [f for f in glob.glob(searchpath)]
def pytest_generate_tests(metafunc):
idlist = []
argvalues = []
for name, dir in TESTS_DIRS:
for filepath in list_dots(dir):
filename = os.path.basename(filepath)
idlist.append("{}-{}".format(name, filename))
argvalues.append((filepath,))
metafunc.parametrize(
argnames=["filepath"],
argvalues=argvalues,
ids=idlist,
scope="function",
)
def test_render_and_compare_dot_files(filepath):
def _render_with_graphviz(filename):
p = subprocess.Popen(
(DOT_BINARY_PATH, '-Tjpe'),
cwd=os.path.dirname(filename),
stdin=open(filename, 'rt'),
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stdout = p.stdout
stdout_output = list()
while True:
data = stdout.read()
if not data:
break
stdout_output.append(data)
stdout.close()
if stdout_output:
stdout_output = NULL_SEP.join(stdout_output)
# this returns a status code we should check
p.wait()
return sha256(stdout_output).hexdigest()
def _render_with_pydot(filename):
g = pydot.graph_from_dot_file(filename)
if not isinstance(g, list):
g = [g]
jpe_data = NULL_SEP.join([_g.create(format='jpe') for _g in g])
return sha256(jpe_data).hexdigest()
parsed_data_hexdigest = _render_with_pydot(filepath)
original_data_hexdigest = _render_with_graphviz(filepath)
assert original_data_hexdigest == parsed_data_hexdigest
|
|
bc21a64589535a3ed5689ff04f826893d45f4c45
|
pygraphc/abstraction/mymethod.py
|
pygraphc/abstraction/mymethod.py
|
class MyMethod(object):
def __init__(self, graph, clusters):
self.graph = graph
self.clusters = clusters
self.count_partitions = {}
def __get_count(self):
abstraction = []
for cluster_id, nodes in self.clusters.iteritems():
if len(nodes) > 1:
for node_id in nodes:
message = self.graph.node[node_id]['preprocessed_event']
# get count
tokens = message.strip().split()
token_count = len(tokens)
partition_keys = self.count_partitions.keys()
if token_count not in partition_keys:
self.count_partitions[token_count] = []
self.count_partitions[token_count].append(message)
elif len(nodes) == 1:
abstraction[cluster_id] = self.graph.node[nodes[0]]['preprocessed_event']
|
Add new method for abstraction
|
Add new method for abstraction
|
Python
|
mit
|
studiawan/pygraphc
|
Add new method for abstraction
|
class MyMethod(object):
def __init__(self, graph, clusters):
self.graph = graph
self.clusters = clusters
self.count_partitions = {}
def __get_count(self):
abstraction = []
for cluster_id, nodes in self.clusters.iteritems():
if len(nodes) > 1:
for node_id in nodes:
message = self.graph.node[node_id]['preprocessed_event']
# get count
tokens = message.strip().split()
token_count = len(tokens)
partition_keys = self.count_partitions.keys()
if token_count not in partition_keys:
self.count_partitions[token_count] = []
self.count_partitions[token_count].append(message)
elif len(nodes) == 1:
abstraction[cluster_id] = self.graph.node[nodes[0]]['preprocessed_event']
|
<commit_before><commit_msg>Add new method for abstraction<commit_after>
|
class MyMethod(object):
def __init__(self, graph, clusters):
self.graph = graph
self.clusters = clusters
self.count_partitions = {}
def __get_count(self):
abstraction = []
for cluster_id, nodes in self.clusters.iteritems():
if len(nodes) > 1:
for node_id in nodes:
message = self.graph.node[node_id]['preprocessed_event']
# get count
tokens = message.strip().split()
token_count = len(tokens)
partition_keys = self.count_partitions.keys()
if token_count not in partition_keys:
self.count_partitions[token_count] = []
self.count_partitions[token_count].append(message)
elif len(nodes) == 1:
abstraction[cluster_id] = self.graph.node[nodes[0]]['preprocessed_event']
|
Add new method for abstraction
class MyMethod(object):
def __init__(self, graph, clusters):
self.graph = graph
self.clusters = clusters
self.count_partitions = {}
def __get_count(self):
abstraction = []
for cluster_id, nodes in self.clusters.iteritems():
if len(nodes) > 1:
for node_id in nodes:
message = self.graph.node[node_id]['preprocessed_event']
# get count
tokens = message.strip().split()
token_count = len(tokens)
partition_keys = self.count_partitions.keys()
if token_count not in partition_keys:
self.count_partitions[token_count] = []
self.count_partitions[token_count].append(message)
elif len(nodes) == 1:
abstraction[cluster_id] = self.graph.node[nodes[0]]['preprocessed_event']
|
<commit_before><commit_msg>Add new method for abstraction<commit_after>
class MyMethod(object):
def __init__(self, graph, clusters):
self.graph = graph
self.clusters = clusters
self.count_partitions = {}
def __get_count(self):
abstraction = []
for cluster_id, nodes in self.clusters.iteritems():
if len(nodes) > 1:
for node_id in nodes:
message = self.graph.node[node_id]['preprocessed_event']
# get count
tokens = message.strip().split()
token_count = len(tokens)
partition_keys = self.count_partitions.keys()
if token_count not in partition_keys:
self.count_partitions[token_count] = []
self.count_partitions[token_count].append(message)
elif len(nodes) == 1:
abstraction[cluster_id] = self.graph.node[nodes[0]]['preprocessed_event']
|
|
21004e02d9aa43bdfe1d3412a9b92f16e9ecc0f1
|
salt/states/influxdb_database.py
|
salt/states/influxdb_database.py
|
# -*- coding: utf-8 -*-
'''
Management of Influxdb databases
================================
(compatible with InfluxDB version 0.9+)
'''
def __virtual__():
'''
Only load if the influxdb module is available
'''
if 'influxdb.db_exists' in __salt__:
return 'influxdb_database'
return False
def present(name, **client_args):
'''
Ensure that given database is present.
name
Name of the database to create.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(name)}
if not __salt__['influxdb.db_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Database {0} is absent and will be created'\
.format(name)
return ret
if __salt__['influxdb.db_create'](name, **client_args):
ret['comment'] = 'Database {0} has been created'.format(name)
ret['changes'][name] = 'Present'
return ret
else:
ret['comment'] = 'Failed to create database {0}'.format(name)
ret['result'] = False
return ret
return ret
def absent(name, **client_args):
'''
Ensure that given database is absent.
name
Name of the database to remove.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is not present'.format(name)}
if __salt__['influxdb.db_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Database {0} is present and needs to be removed'\
.format(name)
return ret
if __salt__['influxdb.db_remove'](name, **client_args):
ret['comment'] = 'Database {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['comment'] = 'Failed to remove database {0}'.format(name)
ret['result'] = False
return ret
return ret
|
Add state module to manage InfluxDB databases
|
Add state module to manage InfluxDB databases
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add state module to manage InfluxDB databases
|
# -*- coding: utf-8 -*-
'''
Management of Influxdb databases
================================
(compatible with InfluxDB version 0.9+)
'''
def __virtual__():
'''
Only load if the influxdb module is available
'''
if 'influxdb.db_exists' in __salt__:
return 'influxdb_database'
return False
def present(name, **client_args):
'''
Ensure that given database is present.
name
Name of the database to create.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(name)}
if not __salt__['influxdb.db_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Database {0} is absent and will be created'\
.format(name)
return ret
if __salt__['influxdb.db_create'](name, **client_args):
ret['comment'] = 'Database {0} has been created'.format(name)
ret['changes'][name] = 'Present'
return ret
else:
ret['comment'] = 'Failed to create database {0}'.format(name)
ret['result'] = False
return ret
return ret
def absent(name, **client_args):
'''
Ensure that given database is absent.
name
Name of the database to remove.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is not present'.format(name)}
if __salt__['influxdb.db_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Database {0} is present and needs to be removed'\
.format(name)
return ret
if __salt__['influxdb.db_remove'](name, **client_args):
ret['comment'] = 'Database {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['comment'] = 'Failed to remove database {0}'.format(name)
ret['result'] = False
return ret
return ret
|
<commit_before><commit_msg>Add state module to manage InfluxDB databases<commit_after>
|
# -*- coding: utf-8 -*-
'''
Management of Influxdb databases
================================
(compatible with InfluxDB version 0.9+)
'''
def __virtual__():
'''
Only load if the influxdb module is available
'''
if 'influxdb.db_exists' in __salt__:
return 'influxdb_database'
return False
def present(name, **client_args):
'''
Ensure that given database is present.
name
Name of the database to create.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(name)}
if not __salt__['influxdb.db_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Database {0} is absent and will be created'\
.format(name)
return ret
if __salt__['influxdb.db_create'](name, **client_args):
ret['comment'] = 'Database {0} has been created'.format(name)
ret['changes'][name] = 'Present'
return ret
else:
ret['comment'] = 'Failed to create database {0}'.format(name)
ret['result'] = False
return ret
return ret
def absent(name, **client_args):
'''
Ensure that given database is absent.
name
Name of the database to remove.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is not present'.format(name)}
if __salt__['influxdb.db_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Database {0} is present and needs to be removed'\
.format(name)
return ret
if __salt__['influxdb.db_remove'](name, **client_args):
ret['comment'] = 'Database {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['comment'] = 'Failed to remove database {0}'.format(name)
ret['result'] = False
return ret
return ret
|
Add state module to manage InfluxDB databases# -*- coding: utf-8 -*-
'''
Management of Influxdb databases
================================
(compatible with InfluxDB version 0.9+)
'''
def __virtual__():
'''
Only load if the influxdb module is available
'''
if 'influxdb.db_exists' in __salt__:
return 'influxdb_database'
return False
def present(name, **client_args):
'''
Ensure that given database is present.
name
Name of the database to create.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(name)}
if not __salt__['influxdb.db_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Database {0} is absent and will be created'\
.format(name)
return ret
if __salt__['influxdb.db_create'](name, **client_args):
ret['comment'] = 'Database {0} has been created'.format(name)
ret['changes'][name] = 'Present'
return ret
else:
ret['comment'] = 'Failed to create database {0}'.format(name)
ret['result'] = False
return ret
return ret
def absent(name, **client_args):
'''
Ensure that given database is absent.
name
Name of the database to remove.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is not present'.format(name)}
if __salt__['influxdb.db_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Database {0} is present and needs to be removed'\
.format(name)
return ret
if __salt__['influxdb.db_remove'](name, **client_args):
ret['comment'] = 'Database {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['comment'] = 'Failed to remove database {0}'.format(name)
ret['result'] = False
return ret
return ret
|
<commit_before><commit_msg>Add state module to manage InfluxDB databases<commit_after># -*- coding: utf-8 -*-
'''
Management of Influxdb databases
================================
(compatible with InfluxDB version 0.9+)
'''
def __virtual__():
'''
Only load if the influxdb module is available
'''
if 'influxdb.db_exists' in __salt__:
return 'influxdb_database'
return False
def present(name, **client_args):
'''
Ensure that given database is present.
name
Name of the database to create.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(name)}
if not __salt__['influxdb.db_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Database {0} is absent and will be created'\
.format(name)
return ret
if __salt__['influxdb.db_create'](name, **client_args):
ret['comment'] = 'Database {0} has been created'.format(name)
ret['changes'][name] = 'Present'
return ret
else:
ret['comment'] = 'Failed to create database {0}'.format(name)
ret['result'] = False
return ret
return ret
def absent(name, **client_args):
'''
Ensure that given database is absent.
name
Name of the database to remove.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is not present'.format(name)}
if __salt__['influxdb.db_exists'](name, **client_args):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Database {0} is present and needs to be removed'\
.format(name)
return ret
if __salt__['influxdb.db_remove'](name, **client_args):
ret['comment'] = 'Database {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
else:
ret['comment'] = 'Failed to remove database {0}'.format(name)
ret['result'] = False
return ret
return ret
|
|
0f38f94a283914d6aca289f8020e1c6a0551c4ee
|
plugins/ping_pong.py
|
plugins/ping_pong.py
|
"""
# Config (valid YAML document) must be at __doc__.
name: ping_pong # Name of plugin, lowercase, match with
# file or package name.
description: "Example plugin for testing bot."
config: # Config variable that needed to set
- SHELDON_PING_PONG_REPLY: '>>>' # in environment.
# You can set default values after colon.
"""
import sheldon
@sheldon.hooks.message('hello, bot')
def ping_pong():
pass
|
Add plugin for bot testing
|
Add plugin for bot testing
|
Python
|
mit
|
lises/sheldon
|
Add plugin for bot testing
|
"""
# Config (valid YAML document) must be at __doc__.
name: ping_pong # Name of plugin, lowercase, match with
# file or package name.
description: "Example plugin for testing bot."
config: # Config variable that needed to set
- SHELDON_PING_PONG_REPLY: '>>>' # in environment.
# You can set default values after colon.
"""
import sheldon
@sheldon.hooks.message('hello, bot')
def ping_pong():
pass
|
<commit_before><commit_msg>Add plugin for bot testing<commit_after>
|
"""
# Config (valid YAML document) must be at __doc__.
name: ping_pong # Name of plugin, lowercase, match with
# file or package name.
description: "Example plugin for testing bot."
config: # Config variable that needed to set
- SHELDON_PING_PONG_REPLY: '>>>' # in environment.
# You can set default values after colon.
"""
import sheldon
@sheldon.hooks.message('hello, bot')
def ping_pong():
pass
|
Add plugin for bot testing"""
# Config (valid YAML document) must be at __doc__.
name: ping_pong # Name of plugin, lowercase, match with
# file or package name.
description: "Example plugin for testing bot."
config: # Config variable that needed to set
- SHELDON_PING_PONG_REPLY: '>>>' # in environment.
# You can set default values after colon.
"""
import sheldon
@sheldon.hooks.message('hello, bot')
def ping_pong():
pass
|
<commit_before><commit_msg>Add plugin for bot testing<commit_after>"""
# Config (valid YAML document) must be at __doc__.
name: ping_pong # Name of plugin, lowercase, match with
# file or package name.
description: "Example plugin for testing bot."
config: # Config variable that needed to set
- SHELDON_PING_PONG_REPLY: '>>>' # in environment.
# You can set default values after colon.
"""
import sheldon
@sheldon.hooks.message('hello, bot')
def ping_pong():
pass
|
|
7ad8c853854395e34afe99a3b670596a27734e66
|
suq1/rankeditems.py
|
suq1/rankeditems.py
|
# -*- coding: utf-8 -*-
# Suq1 -- An ad hoc Python toolbox for a web service
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2009, 2010, 2011, 2012 Easter-eggs & Emmanuel Raviart
# Copyright (C) 2013, 2014 Easter-eggs, Etalab & Emmanuel Raviart
# https://github.com/eraviart/suq1
#
# This file is part of Suq1.
#
# Suq1 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Suq1 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Helpers to handle ranked items (ie a list of items sorted by rank; items of the same rank being in the same sublist)
"""
def iter_ranked_items(items):
if items is not None:
for rank, same_rank_items in enumerate(items):
for item in iter_same_rank_items(same_rank_items):
yield rank, item
def iter_same_rank_items(same_rank_items):
if same_rank_items is not None:
if isinstance(same_rank_items, list):
for item in same_rank_items:
yield item
else:
yield same_rank_items
|
Add module to handle ranked items (for Condorcet-Schulze voting method).
|
Add module to handle ranked items (for Condorcet-Schulze voting method).
|
Python
|
agpl-3.0
|
plastic-data/suq1
|
Add module to handle ranked items (for Condorcet-Schulze voting method).
|
# -*- coding: utf-8 -*-
# Suq1 -- An ad hoc Python toolbox for a web service
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2009, 2010, 2011, 2012 Easter-eggs & Emmanuel Raviart
# Copyright (C) 2013, 2014 Easter-eggs, Etalab & Emmanuel Raviart
# https://github.com/eraviart/suq1
#
# This file is part of Suq1.
#
# Suq1 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Suq1 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Helpers to handle ranked items (ie a list of items sorted by rank; items of the same rank being in the same sublist)
"""
def iter_ranked_items(items):
if items is not None:
for rank, same_rank_items in enumerate(items):
for item in iter_same_rank_items(same_rank_items):
yield rank, item
def iter_same_rank_items(same_rank_items):
if same_rank_items is not None:
if isinstance(same_rank_items, list):
for item in same_rank_items:
yield item
else:
yield same_rank_items
|
<commit_before><commit_msg>Add module to handle ranked items (for Condorcet-Schulze voting method).<commit_after>
|
# -*- coding: utf-8 -*-
# Suq1 -- An ad hoc Python toolbox for a web service
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2009, 2010, 2011, 2012 Easter-eggs & Emmanuel Raviart
# Copyright (C) 2013, 2014 Easter-eggs, Etalab & Emmanuel Raviart
# https://github.com/eraviart/suq1
#
# This file is part of Suq1.
#
# Suq1 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Suq1 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Helpers to handle ranked items (ie a list of items sorted by rank; items of the same rank being in the same sublist)
"""
def iter_ranked_items(items):
if items is not None:
for rank, same_rank_items in enumerate(items):
for item in iter_same_rank_items(same_rank_items):
yield rank, item
def iter_same_rank_items(same_rank_items):
if same_rank_items is not None:
if isinstance(same_rank_items, list):
for item in same_rank_items:
yield item
else:
yield same_rank_items
|
Add module to handle ranked items (for Condorcet-Schulze voting method).# -*- coding: utf-8 -*-
# Suq1 -- An ad hoc Python toolbox for a web service
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2009, 2010, 2011, 2012 Easter-eggs & Emmanuel Raviart
# Copyright (C) 2013, 2014 Easter-eggs, Etalab & Emmanuel Raviart
# https://github.com/eraviart/suq1
#
# This file is part of Suq1.
#
# Suq1 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Suq1 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Helpers to handle ranked items (ie a list of items sorted by rank; items of the same rank being in the same sublist)
"""
def iter_ranked_items(items):
if items is not None:
for rank, same_rank_items in enumerate(items):
for item in iter_same_rank_items(same_rank_items):
yield rank, item
def iter_same_rank_items(same_rank_items):
if same_rank_items is not None:
if isinstance(same_rank_items, list):
for item in same_rank_items:
yield item
else:
yield same_rank_items
|
<commit_before><commit_msg>Add module to handle ranked items (for Condorcet-Schulze voting method).<commit_after># -*- coding: utf-8 -*-
# Suq1 -- An ad hoc Python toolbox for a web service
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2009, 2010, 2011, 2012 Easter-eggs & Emmanuel Raviart
# Copyright (C) 2013, 2014 Easter-eggs, Etalab & Emmanuel Raviart
# https://github.com/eraviart/suq1
#
# This file is part of Suq1.
#
# Suq1 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Suq1 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Helpers to handle ranked items (ie a list of items sorted by rank; items of the same rank being in the same sublist)
"""
def iter_ranked_items(items):
if items is not None:
for rank, same_rank_items in enumerate(items):
for item in iter_same_rank_items(same_rank_items):
yield rank, item
def iter_same_rank_items(same_rank_items):
if same_rank_items is not None:
if isinstance(same_rank_items, list):
for item in same_rank_items:
yield item
else:
yield same_rank_items
|
|
951711b6c3e792de56b23423627a5f6faefe3353
|
python/FresStream.py
|
python/FresStream.py
|
import threading
import time
from functools import partial
from queue import Empty
import msgpack
import zmq
import infupy.backends.fresenius as fresenius
zmqhost = '127.0.0.1'
zmqport = 4201
freseniusport = 'COM5'
def stateWorker(stopevent):
context = zmq.Context()
zmqsocket = context.socket(zmq.PUB)
comm = fresenius.FreseniusComm(freseniusport)
base = fresenius.FreseniusBase(comm)
paramVolumeWorker = partial(volumeWorker, comm.eventq, zmqsocket, stopevent)
volumeThread = threading.Thread(target=paramVolumeWorker)
volumeThread.start()
while not stopevent.wait(5):
# Check Base
try:
dtype = base.readDeviceType()
except fresenius.CommandError as e:
print(f'Base error: {e}')
try:
base = fresenius.FreseniusBase(comm)
except fresenius.CommandError as e:
print(f'Failed to connect to base: {e}')
# Check Syringes
try:
modids = base.listModules()
for modid in modids:
if not modid in base.syringes:
s = base.connectSyringe(modid)
s.registerEvent(fresenius.VarId.volume)
except fresenius.CommandError as e:
print(f'Attach syringe error: {e}')
print('Stopping')
volumeThread.join()
print('volumeThread joined')
for s in base.syringes.values():
print(f'Disconnecting syringe {s.index}')
s.disconnect()
print(f'Disconnecting Base')
base.disconnect()
def volumeWorker(queue, zmqsocket, stopevent):
while not stopevent.is_set():
try:
dt, origin, inmsg = queue.get(timeout=1)
except Empty:
continue
print(f'Received: {origin} {inmsg} {dt}')
try:
volume = fresenius.extractVolume(inmsg)
except ValueError:
print("Failed to decode volume from: {inmsg}")
continue
print(f'Volume: {volume}')
timestamp = int(dt.timestamp() * 1e9)
value = {f'syringe{origin}_volume' : volume}
sample = {'basetime' : timestamp, 'data' : value, 'tags' : {}, 'meta' : {}}
packed = msgpack.packb(sample)
outmsg = b'infupy ' + packed
print(f'Sending: {outmsg}')
zmqsocket.send(outmsg)
if __name__ == '__main__':
stopevent = threading.Event()
stateThread = threading.Thread(target=stateWorker, args=[stopevent])
stateThread.start()
while not stopevent.is_set():
try:
time.sleep(1)
except KeyboardInterrupt:
stopevent.set()
stateThread.join()
print('stateThread joined')
|
Add python script that uses the infupy library and streams to zmq
|
Add python script that uses the infupy library and streams to zmq
|
Python
|
isc
|
jaj42/hsmedstream,jaj42/phystream
|
Add python script that uses the infupy library and streams to zmq
|
import threading
import time
from functools import partial
from queue import Empty
import msgpack
import zmq
import infupy.backends.fresenius as fresenius
zmqhost = '127.0.0.1'
zmqport = 4201
freseniusport = 'COM5'
def stateWorker(stopevent):
context = zmq.Context()
zmqsocket = context.socket(zmq.PUB)
comm = fresenius.FreseniusComm(freseniusport)
base = fresenius.FreseniusBase(comm)
paramVolumeWorker = partial(volumeWorker, comm.eventq, zmqsocket, stopevent)
volumeThread = threading.Thread(target=paramVolumeWorker)
volumeThread.start()
while not stopevent.wait(5):
# Check Base
try:
dtype = base.readDeviceType()
except fresenius.CommandError as e:
print(f'Base error: {e}')
try:
base = fresenius.FreseniusBase(comm)
except fresenius.CommandError as e:
print(f'Failed to connect to base: {e}')
# Check Syringes
try:
modids = base.listModules()
for modid in modids:
if not modid in base.syringes:
s = base.connectSyringe(modid)
s.registerEvent(fresenius.VarId.volume)
except fresenius.CommandError as e:
print(f'Attach syringe error: {e}')
print('Stopping')
volumeThread.join()
print('volumeThread joined')
for s in base.syringes.values():
print(f'Disconnecting syringe {s.index}')
s.disconnect()
print(f'Disconnecting Base')
base.disconnect()
def volumeWorker(queue, zmqsocket, stopevent):
while not stopevent.is_set():
try:
dt, origin, inmsg = queue.get(timeout=1)
except Empty:
continue
print(f'Received: {origin} {inmsg} {dt}')
try:
volume = fresenius.extractVolume(inmsg)
except ValueError:
print("Failed to decode volume from: {inmsg}")
continue
print(f'Volume: {volume}')
timestamp = int(dt.timestamp() * 1e9)
value = {f'syringe{origin}_volume' : volume}
sample = {'basetime' : timestamp, 'data' : value, 'tags' : {}, 'meta' : {}}
packed = msgpack.packb(sample)
outmsg = b'infupy ' + packed
print(f'Sending: {outmsg}')
zmqsocket.send(outmsg)
if __name__ == '__main__':
stopevent = threading.Event()
stateThread = threading.Thread(target=stateWorker, args=[stopevent])
stateThread.start()
while not stopevent.is_set():
try:
time.sleep(1)
except KeyboardInterrupt:
stopevent.set()
stateThread.join()
print('stateThread joined')
|
<commit_before><commit_msg>Add python script that uses the infupy library and streams to zmq<commit_after>
|
import threading
import time
from functools import partial
from queue import Empty
import msgpack
import zmq
import infupy.backends.fresenius as fresenius
zmqhost = '127.0.0.1'
zmqport = 4201
freseniusport = 'COM5'
def stateWorker(stopevent):
context = zmq.Context()
zmqsocket = context.socket(zmq.PUB)
comm = fresenius.FreseniusComm(freseniusport)
base = fresenius.FreseniusBase(comm)
paramVolumeWorker = partial(volumeWorker, comm.eventq, zmqsocket, stopevent)
volumeThread = threading.Thread(target=paramVolumeWorker)
volumeThread.start()
while not stopevent.wait(5):
# Check Base
try:
dtype = base.readDeviceType()
except fresenius.CommandError as e:
print(f'Base error: {e}')
try:
base = fresenius.FreseniusBase(comm)
except fresenius.CommandError as e:
print(f'Failed to connect to base: {e}')
# Check Syringes
try:
modids = base.listModules()
for modid in modids:
if not modid in base.syringes:
s = base.connectSyringe(modid)
s.registerEvent(fresenius.VarId.volume)
except fresenius.CommandError as e:
print(f'Attach syringe error: {e}')
print('Stopping')
volumeThread.join()
print('volumeThread joined')
for s in base.syringes.values():
print(f'Disconnecting syringe {s.index}')
s.disconnect()
print(f'Disconnecting Base')
base.disconnect()
def volumeWorker(queue, zmqsocket, stopevent):
while not stopevent.is_set():
try:
dt, origin, inmsg = queue.get(timeout=1)
except Empty:
continue
print(f'Received: {origin} {inmsg} {dt}')
try:
volume = fresenius.extractVolume(inmsg)
except ValueError:
print("Failed to decode volume from: {inmsg}")
continue
print(f'Volume: {volume}')
timestamp = int(dt.timestamp() * 1e9)
value = {f'syringe{origin}_volume' : volume}
sample = {'basetime' : timestamp, 'data' : value, 'tags' : {}, 'meta' : {}}
packed = msgpack.packb(sample)
outmsg = b'infupy ' + packed
print(f'Sending: {outmsg}')
zmqsocket.send(outmsg)
if __name__ == '__main__':
stopevent = threading.Event()
stateThread = threading.Thread(target=stateWorker, args=[stopevent])
stateThread.start()
while not stopevent.is_set():
try:
time.sleep(1)
except KeyboardInterrupt:
stopevent.set()
stateThread.join()
print('stateThread joined')
|
Add python script that uses the infupy library and streams to zmqimport threading
import time
from functools import partial
from queue import Empty
import msgpack
import zmq
import infupy.backends.fresenius as fresenius
zmqhost = '127.0.0.1'
zmqport = 4201
freseniusport = 'COM5'
def stateWorker(stopevent):
context = zmq.Context()
zmqsocket = context.socket(zmq.PUB)
comm = fresenius.FreseniusComm(freseniusport)
base = fresenius.FreseniusBase(comm)
paramVolumeWorker = partial(volumeWorker, comm.eventq, zmqsocket, stopevent)
volumeThread = threading.Thread(target=paramVolumeWorker)
volumeThread.start()
while not stopevent.wait(5):
# Check Base
try:
dtype = base.readDeviceType()
except fresenius.CommandError as e:
print(f'Base error: {e}')
try:
base = fresenius.FreseniusBase(comm)
except fresenius.CommandError as e:
print(f'Failed to connect to base: {e}')
# Check Syringes
try:
modids = base.listModules()
for modid in modids:
if not modid in base.syringes:
s = base.connectSyringe(modid)
s.registerEvent(fresenius.VarId.volume)
except fresenius.CommandError as e:
print(f'Attach syringe error: {e}')
print('Stopping')
volumeThread.join()
print('volumeThread joined')
for s in base.syringes.values():
print(f'Disconnecting syringe {s.index}')
s.disconnect()
print(f'Disconnecting Base')
base.disconnect()
def volumeWorker(queue, zmqsocket, stopevent):
while not stopevent.is_set():
try:
dt, origin, inmsg = queue.get(timeout=1)
except Empty:
continue
print(f'Received: {origin} {inmsg} {dt}')
try:
volume = fresenius.extractVolume(inmsg)
except ValueError:
print("Failed to decode volume from: {inmsg}")
continue
print(f'Volume: {volume}')
timestamp = int(dt.timestamp() * 1e9)
value = {f'syringe{origin}_volume' : volume}
sample = {'basetime' : timestamp, 'data' : value, 'tags' : {}, 'meta' : {}}
packed = msgpack.packb(sample)
outmsg = b'infupy ' + packed
print(f'Sending: {outmsg}')
zmqsocket.send(outmsg)
if __name__ == '__main__':
stopevent = threading.Event()
stateThread = threading.Thread(target=stateWorker, args=[stopevent])
stateThread.start()
while not stopevent.is_set():
try:
time.sleep(1)
except KeyboardInterrupt:
stopevent.set()
stateThread.join()
print('stateThread joined')
|
<commit_before><commit_msg>Add python script that uses the infupy library and streams to zmq<commit_after>import threading
import time
from functools import partial
from queue import Empty
import msgpack
import zmq
import infupy.backends.fresenius as fresenius
zmqhost = '127.0.0.1'
zmqport = 4201
freseniusport = 'COM5'
def stateWorker(stopevent):
context = zmq.Context()
zmqsocket = context.socket(zmq.PUB)
comm = fresenius.FreseniusComm(freseniusport)
base = fresenius.FreseniusBase(comm)
paramVolumeWorker = partial(volumeWorker, comm.eventq, zmqsocket, stopevent)
volumeThread = threading.Thread(target=paramVolumeWorker)
volumeThread.start()
while not stopevent.wait(5):
# Check Base
try:
dtype = base.readDeviceType()
except fresenius.CommandError as e:
print(f'Base error: {e}')
try:
base = fresenius.FreseniusBase(comm)
except fresenius.CommandError as e:
print(f'Failed to connect to base: {e}')
# Check Syringes
try:
modids = base.listModules()
for modid in modids:
if not modid in base.syringes:
s = base.connectSyringe(modid)
s.registerEvent(fresenius.VarId.volume)
except fresenius.CommandError as e:
print(f'Attach syringe error: {e}')
print('Stopping')
volumeThread.join()
print('volumeThread joined')
for s in base.syringes.values():
print(f'Disconnecting syringe {s.index}')
s.disconnect()
print(f'Disconnecting Base')
base.disconnect()
def volumeWorker(queue, zmqsocket, stopevent):
while not stopevent.is_set():
try:
dt, origin, inmsg = queue.get(timeout=1)
except Empty:
continue
print(f'Received: {origin} {inmsg} {dt}')
try:
volume = fresenius.extractVolume(inmsg)
except ValueError:
print("Failed to decode volume from: {inmsg}")
continue
print(f'Volume: {volume}')
timestamp = int(dt.timestamp() * 1e9)
value = {f'syringe{origin}_volume' : volume}
sample = {'basetime' : timestamp, 'data' : value, 'tags' : {}, 'meta' : {}}
packed = msgpack.packb(sample)
outmsg = b'infupy ' + packed
print(f'Sending: {outmsg}')
zmqsocket.send(outmsg)
if __name__ == '__main__':
stopevent = threading.Event()
stateThread = threading.Thread(target=stateWorker, args=[stopevent])
stateThread.start()
while not stopevent.is_set():
try:
time.sleep(1)
except KeyboardInterrupt:
stopevent.set()
stateThread.join()
print('stateThread joined')
|
|
a044a49b019f369aa5738e13004060a2e7d2a01c
|
evdemo/server/conf/connection_settings.py
|
evdemo/server/conf/connection_settings.py
|
# This file is auto-generated by the `evennia connections` wizard.
# Don't edit manually, your changes will be overwritten.
GAME_INDEX_ENABLED = True
GAME_INDEX_LISTING = \
{ 'game_status': 'launched',
'game_website': 'https://demo.evennia.com',
'listing_contact': 'a@a.com',
'long_description': 'The Evennia demo server shows off a standard install '
'of [Evennia](http://www.evennia.com), with a '
'single-player tutorial world as well as '
'the *Evscaperoom* - a full story-rich escaperoom-style game!\n\n'
'People can play around as builders and '
'explore some of Evennia\'s functionality. You can also chat '
'to developers in the Evennia IRC channel directly from '
'the demo!\n\nMax idle time is seven days and the demo '
'may be reset without notice, at which point you '
'need to recreate your account.',
'short_description': 'The Evennia demo server',
'telnet_hostname': 'demo.evennia.com',
'telnet_port': '4000',
'web_client_url': 'https://demo.evennia.com/webclient'}
|
Add demo connection setting to git
|
Add demo connection setting to git
|
Python
|
bsd-3-clause
|
evennia/evdemo
|
Add demo connection setting to git
|
# This file is auto-generated by the `evennia connections` wizard.
# Don't edit manually, your changes will be overwritten.
GAME_INDEX_ENABLED = True
GAME_INDEX_LISTING = \
{ 'game_status': 'launched',
'game_website': 'https://demo.evennia.com',
'listing_contact': 'a@a.com',
'long_description': 'The Evennia demo server shows off a standard install '
'of [Evennia](http://www.evennia.com), with a '
'single-player tutorial world as well as '
'the *Evscaperoom* - a full story-rich escaperoom-style game!\n\n'
'People can play around as builders and '
'explore some of Evennia\'s functionality. You can also chat '
'to developers in the Evennia IRC channel directly from '
'the demo!\n\nMax idle time is seven days and the demo '
'may be reset without notice, at which point you '
'need to recreate your account.',
'short_description': 'The Evennia demo server',
'telnet_hostname': 'demo.evennia.com',
'telnet_port': '4000',
'web_client_url': 'https://demo.evennia.com/webclient'}
|
<commit_before><commit_msg>Add demo connection setting to git<commit_after>
|
# This file is auto-generated by the `evennia connections` wizard.
# Don't edit manually, your changes will be overwritten.
GAME_INDEX_ENABLED = True
GAME_INDEX_LISTING = \
{ 'game_status': 'launched',
'game_website': 'https://demo.evennia.com',
'listing_contact': 'a@a.com',
'long_description': 'The Evennia demo server shows off a standard install '
'of [Evennia](http://www.evennia.com), with a '
'single-player tutorial world as well as '
'the *Evscaperoom* - a full story-rich escaperoom-style game!\n\n'
'People can play around as builders and '
'explore some of Evennia\'s functionality. You can also chat '
'to developers in the Evennia IRC channel directly from '
'the demo!\n\nMax idle time is seven days and the demo '
'may be reset without notice, at which point you '
'need to recreate your account.',
'short_description': 'The Evennia demo server',
'telnet_hostname': 'demo.evennia.com',
'telnet_port': '4000',
'web_client_url': 'https://demo.evennia.com/webclient'}
|
Add demo connection setting to git# This file is auto-generated by the `evennia connections` wizard.
# Don't edit manually, your changes will be overwritten.
GAME_INDEX_ENABLED = True
GAME_INDEX_LISTING = \
{ 'game_status': 'launched',
'game_website': 'https://demo.evennia.com',
'listing_contact': 'a@a.com',
'long_description': 'The Evennia demo server shows off a standard install '
'of [Evennia](http://www.evennia.com), with a '
'single-player tutorial world as well as '
'the *Evscaperoom* - a full story-rich escaperoom-style game!\n\n'
'People can play around as builders and '
'explore some of Evennia\'s functionality. You can also chat '
'to developers in the Evennia IRC channel directly from '
'the demo!\n\nMax idle time is seven days and the demo '
'may be reset without notice, at which point you '
'need to recreate your account.',
'short_description': 'The Evennia demo server',
'telnet_hostname': 'demo.evennia.com',
'telnet_port': '4000',
'web_client_url': 'https://demo.evennia.com/webclient'}
|
<commit_before><commit_msg>Add demo connection setting to git<commit_after># This file is auto-generated by the `evennia connections` wizard.
# Don't edit manually, your changes will be overwritten.
GAME_INDEX_ENABLED = True
GAME_INDEX_LISTING = \
{ 'game_status': 'launched',
'game_website': 'https://demo.evennia.com',
'listing_contact': 'a@a.com',
'long_description': 'The Evennia demo server shows off a standard install '
'of [Evennia](http://www.evennia.com), with a '
'single-player tutorial world as well as '
'the *Evscaperoom* - a full story-rich escaperoom-style game!\n\n'
'People can play around as builders and '
'explore some of Evennia\'s functionality. You can also chat '
'to developers in the Evennia IRC channel directly from '
'the demo!\n\nMax idle time is seven days and the demo '
'may be reset without notice, at which point you '
'need to recreate your account.',
'short_description': 'The Evennia demo server',
'telnet_hostname': 'demo.evennia.com',
'telnet_port': '4000',
'web_client_url': 'https://demo.evennia.com/webclient'}
|
|
5c08f299c0022a2d94c77dbe9f91a5fb3dec5b4d
|
CASA_tools/casa_tools/mytools.py
|
CASA_tools/casa_tools/mytools.py
|
'''
Define my own version of CASA tools that return python errors if they fail.
'''
from tasks import split, uvsub, concat, clean, rmtables
from .graceful_error_catch import catch_fail
def mysplit(**kwargs):
return catch_fail(split, **kwargs)
def myuvsub(**kwargs):
return catch_fail(uvsub, **kwargs)
def myconcat(**kwargs):
return catch_fail(concat, **kwargs)
def myclean(**kwargs):
return catch_fail(clean, **kwargs)
def myrmtables(**kwargs):
return catch_fail(rmtables, **kwargs)
|
Define casa tasks that fail with python errors
|
Define casa tasks that fail with python errors
|
Python
|
mit
|
e-koch/VLA_Lband,e-koch/VLA_Lband
|
Define casa tasks that fail with python errors
|
'''
Define my own version of CASA tools that return python errors if they fail.
'''
from tasks import split, uvsub, concat, clean, rmtables
from .graceful_error_catch import catch_fail
def mysplit(**kwargs):
return catch_fail(split, **kwargs)
def myuvsub(**kwargs):
return catch_fail(uvsub, **kwargs)
def myconcat(**kwargs):
return catch_fail(concat, **kwargs)
def myclean(**kwargs):
return catch_fail(clean, **kwargs)
def myrmtables(**kwargs):
return catch_fail(rmtables, **kwargs)
|
<commit_before><commit_msg>Define casa tasks that fail with python errors<commit_after>
|
'''
Define my own version of CASA tools that return python errors if they fail.
'''
from tasks import split, uvsub, concat, clean, rmtables
from .graceful_error_catch import catch_fail
def mysplit(**kwargs):
return catch_fail(split, **kwargs)
def myuvsub(**kwargs):
return catch_fail(uvsub, **kwargs)
def myconcat(**kwargs):
return catch_fail(concat, **kwargs)
def myclean(**kwargs):
return catch_fail(clean, **kwargs)
def myrmtables(**kwargs):
return catch_fail(rmtables, **kwargs)
|
Define casa tasks that fail with python errors
'''
Define my own version of CASA tools that return python errors if they fail.
'''
from tasks import split, uvsub, concat, clean, rmtables
from .graceful_error_catch import catch_fail
def mysplit(**kwargs):
return catch_fail(split, **kwargs)
def myuvsub(**kwargs):
return catch_fail(uvsub, **kwargs)
def myconcat(**kwargs):
return catch_fail(concat, **kwargs)
def myclean(**kwargs):
return catch_fail(clean, **kwargs)
def myrmtables(**kwargs):
return catch_fail(rmtables, **kwargs)
|
<commit_before><commit_msg>Define casa tasks that fail with python errors<commit_after>
'''
Define my own version of CASA tools that return python errors if they fail.
'''
from tasks import split, uvsub, concat, clean, rmtables
from .graceful_error_catch import catch_fail
def mysplit(**kwargs):
return catch_fail(split, **kwargs)
def myuvsub(**kwargs):
return catch_fail(uvsub, **kwargs)
def myconcat(**kwargs):
return catch_fail(concat, **kwargs)
def myclean(**kwargs):
return catch_fail(clean, **kwargs)
def myrmtables(**kwargs):
return catch_fail(rmtables, **kwargs)
|
|
f7a53f7dc09e44fd40367c7c973372314a476912
|
ideascube/conf/kb_gin_conakry.py
|
ideascube/conf/kb_gin_conakry.py
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'CONAKRY'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'koombookedu',
},
{
'id': 'bsfcampus',
},
]
|
Add conf file for Conakry devices
|
Add conf file for Conakry devices
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add conf file for Conakry devices
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'CONAKRY'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'koombookedu',
},
{
'id': 'bsfcampus',
},
]
|
<commit_before><commit_msg>Add conf file for Conakry devices<commit_after>
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'CONAKRY'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'koombookedu',
},
{
'id': 'bsfcampus',
},
]
|
Add conf file for Conakry devices# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'CONAKRY'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'koombookedu',
},
{
'id': 'bsfcampus',
},
]
|
<commit_before><commit_msg>Add conf file for Conakry devices<commit_after># -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'CONAKRY'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'koombookedu',
},
{
'id': 'bsfcampus',
},
]
|
|
bed17c14920fdc2c4e1d9413da4188a64fc4352e
|
test_atmospheres.py
|
test_atmospheres.py
|
# coding: utf-8
""" Test the model atmosphere interpolator """
__author__ = "Andy Casey <andy@astrowizici.st>"
import os
import logging
import numpy as np
import matplotlib.pyplot as plt
from atmosphere import AtmosphereInterpolator as atmospheres, \
parsers
atmosphere_type = "MARCS (2011)"
solar_model_filename = "sun.mod.gz"
folder, parser = parsers[atmosphere_type]
interpolator = atmospheres(folder, parser())
# Parse the solar atmosphere
solar_thermal_structure = interpolator.parser.parse_thermal_structure(
os.path.join(folder, solar_model_filename))
# These were taken directly from sun.mod.gz
truths = [5777, np.log10(2.7542e+04), 0., 0.]
# Interpolate the thermal structure for the truth values
interpolated_thermal_structure = interpolator.interpolate_thermal_structure(*truths)
# Generate comparisons
x = "k"
interpolated_properties = set(interpolated_thermal_structure.dtype.names).difference(x)
K = int(np.ceil(len(interpolated_properties)**0.5))
fig, axes = plt.subplots(K, K)
for i, (ax, y) in enumerate(zip(axes.flatten(), interpolated_properties)):
#ax.plot(solar_thermal_structure[x], solar_thermal_structure[y], 'k')
#ax.plot(interpolated_thermal_structure[x], interpolated_thermal_structure[y], 'b')
# Ensure the relative difference is less than 5%
relative_difference = 100 * (solar_thermal_structure[y] - interpolated_thermal_structure[y])/solar_thermal_structure[y]
finite = np.isfinite(relative_difference)
if not np.all(relative_difference[finite] < 5.):
logging.warn("Relative difference in {0} exceeds 5% ({1} > 5)!".format(y, int(np.max(relative_difference))))
ax.plot(solar_thermal_structure[x], relative_difference, 'k')
ax.set_xlabel(x)
ax.set_ylabel(y)
[each.set_visible(False) for each in axes.flatten()[len(interpolated_properties):]]
|
Test MARCS model atmosphere interpolator
|
Test MARCS model atmosphere interpolator
|
Python
|
mit
|
andycasey/precise-objective-differential-spectroscopy
|
Test MARCS model atmosphere interpolator
|
# coding: utf-8
""" Test the model atmosphere interpolator """
__author__ = "Andy Casey <andy@astrowizici.st>"
import os
import logging
import numpy as np
import matplotlib.pyplot as plt
from atmosphere import AtmosphereInterpolator as atmospheres, \
parsers
atmosphere_type = "MARCS (2011)"
solar_model_filename = "sun.mod.gz"
folder, parser = parsers[atmosphere_type]
interpolator = atmospheres(folder, parser())
# Parse the solar atmosphere
solar_thermal_structure = interpolator.parser.parse_thermal_structure(
os.path.join(folder, solar_model_filename))
# These were taken directly from sun.mod.gz
truths = [5777, np.log10(2.7542e+04), 0., 0.]
# Interpolate the thermal structure for the truth values
interpolated_thermal_structure = interpolator.interpolate_thermal_structure(*truths)
# Generate comparisons
x = "k"
interpolated_properties = set(interpolated_thermal_structure.dtype.names).difference(x)
K = int(np.ceil(len(interpolated_properties)**0.5))
fig, axes = plt.subplots(K, K)
for i, (ax, y) in enumerate(zip(axes.flatten(), interpolated_properties)):
#ax.plot(solar_thermal_structure[x], solar_thermal_structure[y], 'k')
#ax.plot(interpolated_thermal_structure[x], interpolated_thermal_structure[y], 'b')
# Ensure the relative difference is less than 5%
relative_difference = 100 * (solar_thermal_structure[y] - interpolated_thermal_structure[y])/solar_thermal_structure[y]
finite = np.isfinite(relative_difference)
if not np.all(relative_difference[finite] < 5.):
logging.warn("Relative difference in {0} exceeds 5% ({1} > 5)!".format(y, int(np.max(relative_difference))))
ax.plot(solar_thermal_structure[x], relative_difference, 'k')
ax.set_xlabel(x)
ax.set_ylabel(y)
[each.set_visible(False) for each in axes.flatten()[len(interpolated_properties):]]
|
<commit_before><commit_msg>Test MARCS model atmosphere interpolator<commit_after>
|
# coding: utf-8
""" Test the model atmosphere interpolator """
__author__ = "Andy Casey <andy@astrowizici.st>"
import os
import logging
import numpy as np
import matplotlib.pyplot as plt
from atmosphere import AtmosphereInterpolator as atmospheres, \
parsers
atmosphere_type = "MARCS (2011)"
solar_model_filename = "sun.mod.gz"
folder, parser = parsers[atmosphere_type]
interpolator = atmospheres(folder, parser())
# Parse the solar atmosphere
solar_thermal_structure = interpolator.parser.parse_thermal_structure(
os.path.join(folder, solar_model_filename))
# These were taken directly from sun.mod.gz
truths = [5777, np.log10(2.7542e+04), 0., 0.]
# Interpolate the thermal structure for the truth values
interpolated_thermal_structure = interpolator.interpolate_thermal_structure(*truths)
# Generate comparisons
x = "k"
interpolated_properties = set(interpolated_thermal_structure.dtype.names).difference(x)
K = int(np.ceil(len(interpolated_properties)**0.5))
fig, axes = plt.subplots(K, K)
for i, (ax, y) in enumerate(zip(axes.flatten(), interpolated_properties)):
#ax.plot(solar_thermal_structure[x], solar_thermal_structure[y], 'k')
#ax.plot(interpolated_thermal_structure[x], interpolated_thermal_structure[y], 'b')
# Ensure the relative difference is less than 5%
relative_difference = 100 * (solar_thermal_structure[y] - interpolated_thermal_structure[y])/solar_thermal_structure[y]
finite = np.isfinite(relative_difference)
if not np.all(relative_difference[finite] < 5.):
logging.warn("Relative difference in {0} exceeds 5% ({1} > 5)!".format(y, int(np.max(relative_difference))))
ax.plot(solar_thermal_structure[x], relative_difference, 'k')
ax.set_xlabel(x)
ax.set_ylabel(y)
[each.set_visible(False) for each in axes.flatten()[len(interpolated_properties):]]
|
Test MARCS model atmosphere interpolator# coding: utf-8
""" Test the model atmosphere interpolator """
__author__ = "Andy Casey <andy@astrowizici.st>"
import os
import logging
import numpy as np
import matplotlib.pyplot as plt
from atmosphere import AtmosphereInterpolator as atmospheres, \
parsers
atmosphere_type = "MARCS (2011)"
solar_model_filename = "sun.mod.gz"
folder, parser = parsers[atmosphere_type]
interpolator = atmospheres(folder, parser())
# Parse the solar atmosphere
solar_thermal_structure = interpolator.parser.parse_thermal_structure(
os.path.join(folder, solar_model_filename))
# These were taken directly from sun.mod.gz
truths = [5777, np.log10(2.7542e+04), 0., 0.]
# Interpolate the thermal structure for the truth values
interpolated_thermal_structure = interpolator.interpolate_thermal_structure(*truths)
# Generate comparisons
x = "k"
interpolated_properties = set(interpolated_thermal_structure.dtype.names).difference(x)
K = int(np.ceil(len(interpolated_properties)**0.5))
fig, axes = plt.subplots(K, K)
for i, (ax, y) in enumerate(zip(axes.flatten(), interpolated_properties)):
#ax.plot(solar_thermal_structure[x], solar_thermal_structure[y], 'k')
#ax.plot(interpolated_thermal_structure[x], interpolated_thermal_structure[y], 'b')
# Ensure the relative difference is less than 5%
relative_difference = 100 * (solar_thermal_structure[y] - interpolated_thermal_structure[y])/solar_thermal_structure[y]
finite = np.isfinite(relative_difference)
if not np.all(relative_difference[finite] < 5.):
logging.warn("Relative difference in {0} exceeds 5% ({1} > 5)!".format(y, int(np.max(relative_difference))))
ax.plot(solar_thermal_structure[x], relative_difference, 'k')
ax.set_xlabel(x)
ax.set_ylabel(y)
[each.set_visible(False) for each in axes.flatten()[len(interpolated_properties):]]
|
<commit_before><commit_msg>Test MARCS model atmosphere interpolator<commit_after># coding: utf-8
""" Test the model atmosphere interpolator """
__author__ = "Andy Casey <andy@astrowizici.st>"
import os
import logging
import numpy as np
import matplotlib.pyplot as plt
from atmosphere import AtmosphereInterpolator as atmospheres, \
parsers
atmosphere_type = "MARCS (2011)"
solar_model_filename = "sun.mod.gz"
folder, parser = parsers[atmosphere_type]
interpolator = atmospheres(folder, parser())
# Parse the solar atmosphere
solar_thermal_structure = interpolator.parser.parse_thermal_structure(
os.path.join(folder, solar_model_filename))
# These were taken directly from sun.mod.gz
truths = [5777, np.log10(2.7542e+04), 0., 0.]
# Interpolate the thermal structure for the truth values
interpolated_thermal_structure = interpolator.interpolate_thermal_structure(*truths)
# Generate comparisons
x = "k"
interpolated_properties = set(interpolated_thermal_structure.dtype.names).difference(x)
K = int(np.ceil(len(interpolated_properties)**0.5))
fig, axes = plt.subplots(K, K)
for i, (ax, y) in enumerate(zip(axes.flatten(), interpolated_properties)):
#ax.plot(solar_thermal_structure[x], solar_thermal_structure[y], 'k')
#ax.plot(interpolated_thermal_structure[x], interpolated_thermal_structure[y], 'b')
# Ensure the relative difference is less than 5%
relative_difference = 100 * (solar_thermal_structure[y] - interpolated_thermal_structure[y])/solar_thermal_structure[y]
finite = np.isfinite(relative_difference)
if not np.all(relative_difference[finite] < 5.):
logging.warn("Relative difference in {0} exceeds 5% ({1} > 5)!".format(y, int(np.max(relative_difference))))
ax.plot(solar_thermal_structure[x], relative_difference, 'k')
ax.set_xlabel(x)
ax.set_ylabel(y)
[each.set_visible(False) for each in axes.flatten()[len(interpolated_properties):]]
|
|
9267d2c83087bb8a570109b60d3517abb8fc66cf
|
tests/test_reset.py
|
tests/test_reset.py
|
#!/usr/bin/python
import sys
import pycurl
saw_error = 1
def main():
global saw_error
pycurl.global_init(pycurl.GLOBAL_DEFAULT)
outf = file("/dev/null", "rb+")
cm = pycurl.CurlMulti()
# Set multi handle's options
cm.setopt(pycurl.M_PIPELINING, 1)
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEDATA, outf)
eh.setopt(pycurl.URL, sys.argv[1])
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print "Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em)
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print "Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode)
raise RuntimeError
else:
print "Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
pycurl.global_cleanup()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <url>" % sys.argv[0]
sys.exit(2)
main()
|
Test for reset fixes refcount bug
|
Test for reset fixes refcount bug
|
Python
|
lgpl-2.1
|
pycurl/pycurl,p/pycurl-archived,pycurl/pycurl,pycurl/pycurl,p/pycurl-archived,p/pycurl-archived
|
Test for reset fixes refcount bug
|
#!/usr/bin/python
import sys
import pycurl
saw_error = 1
def main():
global saw_error
pycurl.global_init(pycurl.GLOBAL_DEFAULT)
outf = file("/dev/null", "rb+")
cm = pycurl.CurlMulti()
# Set multi handle's options
cm.setopt(pycurl.M_PIPELINING, 1)
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEDATA, outf)
eh.setopt(pycurl.URL, sys.argv[1])
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print "Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em)
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print "Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode)
raise RuntimeError
else:
print "Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
pycurl.global_cleanup()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <url>" % sys.argv[0]
sys.exit(2)
main()
|
<commit_before><commit_msg>Test for reset fixes refcount bug<commit_after>
|
#!/usr/bin/python
import sys
import pycurl
saw_error = 1
def main():
global saw_error
pycurl.global_init(pycurl.GLOBAL_DEFAULT)
outf = file("/dev/null", "rb+")
cm = pycurl.CurlMulti()
# Set multi handle's options
cm.setopt(pycurl.M_PIPELINING, 1)
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEDATA, outf)
eh.setopt(pycurl.URL, sys.argv[1])
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print "Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em)
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print "Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode)
raise RuntimeError
else:
print "Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
pycurl.global_cleanup()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <url>" % sys.argv[0]
sys.exit(2)
main()
|
Test for reset fixes refcount bug#!/usr/bin/python
import sys
import pycurl
saw_error = 1
def main():
global saw_error
pycurl.global_init(pycurl.GLOBAL_DEFAULT)
outf = file("/dev/null", "rb+")
cm = pycurl.CurlMulti()
# Set multi handle's options
cm.setopt(pycurl.M_PIPELINING, 1)
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEDATA, outf)
eh.setopt(pycurl.URL, sys.argv[1])
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print "Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em)
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print "Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode)
raise RuntimeError
else:
print "Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
pycurl.global_cleanup()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <url>" % sys.argv[0]
sys.exit(2)
main()
|
<commit_before><commit_msg>Test for reset fixes refcount bug<commit_after>#!/usr/bin/python
import sys
import pycurl
saw_error = 1
def main():
global saw_error
pycurl.global_init(pycurl.GLOBAL_DEFAULT)
outf = file("/dev/null", "rb+")
cm = pycurl.CurlMulti()
# Set multi handle's options
cm.setopt(pycurl.M_PIPELINING, 1)
eh = pycurl.Curl()
for x in range(1, 20):
eh.setopt(pycurl.WRITEDATA, outf)
eh.setopt(pycurl.URL, sys.argv[1])
cm.add_handle(eh)
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
while active_handles:
ret = cm.select(1.0)
if ret == -1:
continue
while 1:
ret, active_handles = cm.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
count, good, bad = cm.info_read()
for h, en, em in bad:
print "Transfer to %s failed with %d, %s\n" % \
(h.getinfo(pycurl.EFFECTIVE_URL), en, em)
raise RuntimeError
for h in good:
httpcode = h.getinfo(pycurl.RESPONSE_CODE)
if httpcode != 200:
print "Transfer to %s failed with code %d\n" %\
(h.getinfo(pycurl.EFFECTIVE_URL), httpcode)
raise RuntimeError
else:
print "Recd %d bytes from %s" % \
(h.getinfo(pycurl.SIZE_DOWNLOAD),
h.getinfo(pycurl.EFFECTIVE_URL))
cm.remove_handle(eh)
eh.reset()
eh.close()
cm.close()
outf.close()
pycurl.global_cleanup()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: %s <url>" % sys.argv[0]
sys.exit(2)
main()
|
|
f9ff15636a62a8ca0f7872874721d42019af7e9c
|
tests/unit_tests.py
|
tests/unit_tests.py
|
__author__ = 'Michael'
from src import PClass
# Test for bias addition and correctness of range in init function
testClass = PClass(3)
assert testClass.weightList.size() == 4
for x in range(0, testClass.numInputs):
assert -1 <= testClass.weightList[x] & testClass.weightList[x] <= 1
|
Create initial tests for code maintenance
|
Create initial tests for code maintenance
|
Python
|
mit
|
michaellee1/ANN-PCML
|
Create initial tests for code maintenance
|
__author__ = 'Michael'
from src import PClass
# Test for bias addition and correctness of range in init function
testClass = PClass(3)
assert testClass.weightList.size() == 4
for x in range(0, testClass.numInputs):
assert -1 <= testClass.weightList[x] & testClass.weightList[x] <= 1
|
<commit_before><commit_msg>Create initial tests for code maintenance<commit_after>
|
__author__ = 'Michael'
from src import PClass
# Test for bias addition and correctness of range in init function
testClass = PClass(3)
assert testClass.weightList.size() == 4
for x in range(0, testClass.numInputs):
assert -1 <= testClass.weightList[x] & testClass.weightList[x] <= 1
|
Create initial tests for code maintenance__author__ = 'Michael'
from src import PClass
# Test for bias addition and correctness of range in init function
testClass = PClass(3)
assert testClass.weightList.size() == 4
for x in range(0, testClass.numInputs):
assert -1 <= testClass.weightList[x] & testClass.weightList[x] <= 1
|
<commit_before><commit_msg>Create initial tests for code maintenance<commit_after>__author__ = 'Michael'
from src import PClass
# Test for bias addition and correctness of range in init function
testClass = PClass(3)
assert testClass.weightList.size() == 4
for x in range(0, testClass.numInputs):
assert -1 <= testClass.weightList[x] & testClass.weightList[x] <= 1
|
|
06a12ada48c11a101eb138220cd0e0ca33a612be
|
tests/thread/stress_create.py
|
tests/thread/stress_create.py
|
# stress test for creating many threads
try:
import utime as time
except ImportError:
import time
import _thread
def thread_entry(n):
pass
thread_num = 0
while thread_num < 500:
try:
_thread.start_new_thread(thread_entry, (thread_num,))
thread_num += 1
except MemoryError:
pass
# wait for the last threads to terminate
time.sleep(1)
print('done')
|
Add stress-test for creating many threads.
|
tests/thread: Add stress-test for creating many threads.
|
Python
|
mit
|
TDAbboud/micropython,Timmenem/micropython,deshipu/micropython,SHA2017-badge/micropython-esp32,alex-robbins/micropython,tobbad/micropython,lowRISC/micropython,oopy/micropython,deshipu/micropython,bvernoux/micropython,puuu/micropython,infinnovation/micropython,trezor/micropython,adafruit/micropython,oopy/micropython,trezor/micropython,tobbad/micropython,pfalcon/micropython,adafruit/circuitpython,deshipu/micropython,chrisdearman/micropython,HenrikSolver/micropython,selste/micropython,TDAbboud/micropython,infinnovation/micropython,hiway/micropython,hiway/micropython,pfalcon/micropython,tobbad/micropython,micropython/micropython-esp32,kerneltask/micropython,Peetz0r/micropython-esp32,Peetz0r/micropython-esp32,henriknelson/micropython,adafruit/circuitpython,tobbad/micropython,SHA2017-badge/micropython-esp32,MrSurly/micropython,SHA2017-badge/micropython-esp32,Peetz0r/micropython-esp32,tralamazza/micropython,pozetroninc/micropython,AriZuu/micropython,TDAbboud/micropython,swegener/micropython,deshipu/micropython,infinnovation/micropython,chrisdearman/micropython,kerneltask/micropython,toolmacher/micropython,swegener/micropython,ryannathans/micropython,SHA2017-badge/micropython-esp32,henriknelson/micropython,MrSurly/micropython-esp32,adafruit/circuitpython,pramasoul/micropython,pozetroninc/micropython,adafruit/circuitpython,chrisdearman/micropython,blazewicz/micropython,puuu/micropython,Peetz0r/micropython-esp32,dmazzella/micropython,SHA2017-badge/micropython-esp32,MrSurly/micropython,MrSurly/micropython,lowRISC/micropython,MrSurly/micropython-esp32,torwag/micropython,blazewicz/micropython,alex-robbins/micropython,HenrikSolver/micropython,HenrikSolver/micropython,Timmenem/micropython,Timmenem/micropython,TDAbboud/micropython,henriknelson/micropython,chrisdearman/micropython,cwyark/micropython,AriZuu/micropython,PappaPeppar/micropython,AriZuu/micropython,puuu/micropython,MrSurly/micropython,Peetz0r/micropython-esp32,selste/micropython,kerneltask/micropython,adafruit/circuitpython,micropython/micropython-esp32,Timmenem/micropython,adafruit/micropython,selste/micropython,pramasoul/micropython,dmazzella/micropython,toolmacher/micropython,alex-robbins/micropython,adafruit/circuitpython,Timmenem/micropython,pramasoul/micropython,lowRISC/micropython,tobbad/micropython,swegener/micropython,pozetroninc/micropython,hiway/micropython,AriZuu/micropython,tralamazza/micropython,toolmacher/micropython,blazewicz/micropython,ryannathans/micropython,swegener/micropython,adafruit/micropython,trezor/micropython,oopy/micropython,infinnovation/micropython,oopy/micropython,chrisdearman/micropython,MrSurly/micropython,bvernoux/micropython,selste/micropython,lowRISC/micropython,cwyark/micropython,bvernoux/micropython,micropython/micropython-esp32,MrSurly/micropython-esp32,puuu/micropython,lowRISC/micropython,alex-robbins/micropython,henriknelson/micropython,tralamazza/micropython,dmazzella/micropython,HenrikSolver/micropython,toolmacher/micropython,puuu/micropython,blazewicz/micropython,deshipu/micropython,ryannathans/micropython,pfalcon/micropython,trezor/micropython,torwag/micropython,kerneltask/micropython,dmazzella/micropython,pozetroninc/micropython,bvernoux/micropython,tralamazza/micropython,adafruit/micropython,ryannathans/micropython,PappaPeppar/micropython,micropython/micropython-esp32,TDAbboud/micropython,alex-robbins/micropython,pfalcon/micropython,pfalcon/micropython,ryannathans/micropython,oopy/micropython,micropython/micropython-esp32,trezor/micropython,blazewicz/micropython,PappaPeppar/micropython,hiway/micropython,torwag/micropython,adafruit/micropython,selste/micropython,pramasoul/micropython,pozetroninc/micropython,HenrikSolver/micropython,torwag/micropython,PappaPeppar/micropython,cwyark/micropython,kerneltask/micropython,henriknelson/micropython,toolmacher/micropython,cwyark/micropython,bvernoux/micropython,pramasoul/micropython,AriZuu/micropython,cwyark/micropython,PappaPeppar/micropython,swegener/micropython,MrSurly/micropython-esp32,infinnovation/micropython,torwag/micropython,hiway/micropython,MrSurly/micropython-esp32
|
tests/thread: Add stress-test for creating many threads.
|
# stress test for creating many threads
try:
import utime as time
except ImportError:
import time
import _thread
def thread_entry(n):
pass
thread_num = 0
while thread_num < 500:
try:
_thread.start_new_thread(thread_entry, (thread_num,))
thread_num += 1
except MemoryError:
pass
# wait for the last threads to terminate
time.sleep(1)
print('done')
|
<commit_before><commit_msg>tests/thread: Add stress-test for creating many threads.<commit_after>
|
# stress test for creating many threads
try:
import utime as time
except ImportError:
import time
import _thread
def thread_entry(n):
pass
thread_num = 0
while thread_num < 500:
try:
_thread.start_new_thread(thread_entry, (thread_num,))
thread_num += 1
except MemoryError:
pass
# wait for the last threads to terminate
time.sleep(1)
print('done')
|
tests/thread: Add stress-test for creating many threads.# stress test for creating many threads
try:
import utime as time
except ImportError:
import time
import _thread
def thread_entry(n):
pass
thread_num = 0
while thread_num < 500:
try:
_thread.start_new_thread(thread_entry, (thread_num,))
thread_num += 1
except MemoryError:
pass
# wait for the last threads to terminate
time.sleep(1)
print('done')
|
<commit_before><commit_msg>tests/thread: Add stress-test for creating many threads.<commit_after># stress test for creating many threads
try:
import utime as time
except ImportError:
import time
import _thread
def thread_entry(n):
pass
thread_num = 0
while thread_num < 500:
try:
_thread.start_new_thread(thread_entry, (thread_num,))
thread_num += 1
except MemoryError:
pass
# wait for the last threads to terminate
time.sleep(1)
print('done')
|
|
f2b261ea7af982653dfb3057b98e3b917c72b7a4
|
test/test_simple_module_pass.py
|
test/test_simple_module_pass.py
|
import sys, unittest
from tools import SamplesTestCase
OUTPUT_FOR_GLOBALS = '''\
Found global named "gfloat": type = float*
Found global named "gppfloat": type = float***
Found global named "gint": type = i32*
'''
PROG = 'simple_module_pass'
class TestSimpleModulePass(SamplesTestCase):
def test_on_globals(self):
self.assertSampleOutput([PROG], 'globals.ll', OUTPUT_FOR_GLOBALS)
if __name__ == '__main__':
unittest.main()
|
Add a test for simple_module_pass
|
Add a test for simple_module_pass
|
Python
|
unlicense
|
eliben/llvm-clang-samples,eliben/llvm-clang-samples,eliben/llvm-clang-samples,eliben/llvm-clang-samples,eliben/llvm-clang-samples,eliben/llvm-clang-samples
|
Add a test for simple_module_pass
|
import sys, unittest
from tools import SamplesTestCase
OUTPUT_FOR_GLOBALS = '''\
Found global named "gfloat": type = float*
Found global named "gppfloat": type = float***
Found global named "gint": type = i32*
'''
PROG = 'simple_module_pass'
class TestSimpleModulePass(SamplesTestCase):
def test_on_globals(self):
self.assertSampleOutput([PROG], 'globals.ll', OUTPUT_FOR_GLOBALS)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a test for simple_module_pass<commit_after>
|
import sys, unittest
from tools import SamplesTestCase
OUTPUT_FOR_GLOBALS = '''\
Found global named "gfloat": type = float*
Found global named "gppfloat": type = float***
Found global named "gint": type = i32*
'''
PROG = 'simple_module_pass'
class TestSimpleModulePass(SamplesTestCase):
def test_on_globals(self):
self.assertSampleOutput([PROG], 'globals.ll', OUTPUT_FOR_GLOBALS)
if __name__ == '__main__':
unittest.main()
|
Add a test for simple_module_passimport sys, unittest
from tools import SamplesTestCase
OUTPUT_FOR_GLOBALS = '''\
Found global named "gfloat": type = float*
Found global named "gppfloat": type = float***
Found global named "gint": type = i32*
'''
PROG = 'simple_module_pass'
class TestSimpleModulePass(SamplesTestCase):
def test_on_globals(self):
self.assertSampleOutput([PROG], 'globals.ll', OUTPUT_FOR_GLOBALS)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a test for simple_module_pass<commit_after>import sys, unittest
from tools import SamplesTestCase
OUTPUT_FOR_GLOBALS = '''\
Found global named "gfloat": type = float*
Found global named "gppfloat": type = float***
Found global named "gint": type = i32*
'''
PROG = 'simple_module_pass'
class TestSimpleModulePass(SamplesTestCase):
def test_on_globals(self):
self.assertSampleOutput([PROG], 'globals.ll', OUTPUT_FOR_GLOBALS)
if __name__ == '__main__':
unittest.main()
|
|
39646d3a7f3f8539dcb88ae9f79e57a3ad79a9f7
|
monasca_setup/detection/plugins/octavia.py
|
monasca_setup/detection/plugins/octavia.py
|
# (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import monasca_setup.detection
class Octavia(monasca_setup.detection.ServicePlugin):
"""Detect Octavia daemons and setup configuration to monitor them.
"""
def __init__(self, template_dir, overwrite=True, args=None):
service_params = {
'args': args,
'template_dir': template_dir,
'overwrite': overwrite,
'service_name': 'octavia',
'process_names': ['octavia-api', 'octavia-worker',
'octavia-health-manager', 'octavia-housekeeping'],
'service_api_url': 'http://localhost:9876',
'search_pattern': '.*200 OK.*',
}
super(Octavia, self).__init__(service_params)
|
Add process monitoring for Octavia
|
Add process monitoring for Octavia
This change creates a octavia detection plugin and adds process
monitoring for octavia-api, octavia-worker, octavia-health-manager
and octavia-housekeeping processes.
Change-Id: I71411c640713ac0ab2b3bbb332dfab064b08379b
|
Python
|
bsd-3-clause
|
sapcc/monasca-agent,sapcc/monasca-agent,sapcc/monasca-agent
|
Add process monitoring for Octavia
This change creates a octavia detection plugin and adds process
monitoring for octavia-api, octavia-worker, octavia-health-manager
and octavia-housekeeping processes.
Change-Id: I71411c640713ac0ab2b3bbb332dfab064b08379b
|
# (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import monasca_setup.detection
class Octavia(monasca_setup.detection.ServicePlugin):
"""Detect Octavia daemons and setup configuration to monitor them.
"""
def __init__(self, template_dir, overwrite=True, args=None):
service_params = {
'args': args,
'template_dir': template_dir,
'overwrite': overwrite,
'service_name': 'octavia',
'process_names': ['octavia-api', 'octavia-worker',
'octavia-health-manager', 'octavia-housekeeping'],
'service_api_url': 'http://localhost:9876',
'search_pattern': '.*200 OK.*',
}
super(Octavia, self).__init__(service_params)
|
<commit_before><commit_msg>Add process monitoring for Octavia
This change creates a octavia detection plugin and adds process
monitoring for octavia-api, octavia-worker, octavia-health-manager
and octavia-housekeeping processes.
Change-Id: I71411c640713ac0ab2b3bbb332dfab064b08379b<commit_after>
|
# (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import monasca_setup.detection
class Octavia(monasca_setup.detection.ServicePlugin):
"""Detect Octavia daemons and setup configuration to monitor them.
"""
def __init__(self, template_dir, overwrite=True, args=None):
service_params = {
'args': args,
'template_dir': template_dir,
'overwrite': overwrite,
'service_name': 'octavia',
'process_names': ['octavia-api', 'octavia-worker',
'octavia-health-manager', 'octavia-housekeeping'],
'service_api_url': 'http://localhost:9876',
'search_pattern': '.*200 OK.*',
}
super(Octavia, self).__init__(service_params)
|
Add process monitoring for Octavia
This change creates a octavia detection plugin and adds process
monitoring for octavia-api, octavia-worker, octavia-health-manager
and octavia-housekeeping processes.
Change-Id: I71411c640713ac0ab2b3bbb332dfab064b08379b# (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import monasca_setup.detection
class Octavia(monasca_setup.detection.ServicePlugin):
"""Detect Octavia daemons and setup configuration to monitor them.
"""
def __init__(self, template_dir, overwrite=True, args=None):
service_params = {
'args': args,
'template_dir': template_dir,
'overwrite': overwrite,
'service_name': 'octavia',
'process_names': ['octavia-api', 'octavia-worker',
'octavia-health-manager', 'octavia-housekeeping'],
'service_api_url': 'http://localhost:9876',
'search_pattern': '.*200 OK.*',
}
super(Octavia, self).__init__(service_params)
|
<commit_before><commit_msg>Add process monitoring for Octavia
This change creates a octavia detection plugin and adds process
monitoring for octavia-api, octavia-worker, octavia-health-manager
and octavia-housekeeping processes.
Change-Id: I71411c640713ac0ab2b3bbb332dfab064b08379b<commit_after># (C) Copyright 2016 Hewlett Packard Enterprise Development Company LP
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import monasca_setup.detection
class Octavia(monasca_setup.detection.ServicePlugin):
"""Detect Octavia daemons and setup configuration to monitor them.
"""
def __init__(self, template_dir, overwrite=True, args=None):
service_params = {
'args': args,
'template_dir': template_dir,
'overwrite': overwrite,
'service_name': 'octavia',
'process_names': ['octavia-api', 'octavia-worker',
'octavia-health-manager', 'octavia-housekeeping'],
'service_api_url': 'http://localhost:9876',
'search_pattern': '.*200 OK.*',
}
super(Octavia, self).__init__(service_params)
|
|
0228ccc1a635408c3d4a27f27ac851b4d89b72b2
|
takeyourmeds/reminders/reminders_calls/tests.py
|
takeyourmeds/reminders/reminders_calls/tests.py
|
from takeyourmeds.utils.test import TestCase
from ..enums import TypeEnum, SourceEnum
from .enums import StateEnum
class TwimlCallbackTest(TestCase):
def setUp(self):
super(TwimlCallbackTest, self).setUp()
self.call = self.user.reminders.create(
type=TypeEnum.call,
).instances.create(
source=SourceEnum.manual,
).calls.create()
def test_urls(self):
self.assert_(self.call.get_twiml_callback_url())
|
Check we can create an url
|
Check we can create an url
|
Python
|
mit
|
takeyourmeds/takeyourmeds-web,takeyourmeds/takeyourmeds-web,takeyourmeds/takeyourmeds-web,takeyourmeds/takeyourmeds-web
|
Check we can create an url
|
from takeyourmeds.utils.test import TestCase
from ..enums import TypeEnum, SourceEnum
from .enums import StateEnum
class TwimlCallbackTest(TestCase):
def setUp(self):
super(TwimlCallbackTest, self).setUp()
self.call = self.user.reminders.create(
type=TypeEnum.call,
).instances.create(
source=SourceEnum.manual,
).calls.create()
def test_urls(self):
self.assert_(self.call.get_twiml_callback_url())
|
<commit_before><commit_msg>Check we can create an url<commit_after>
|
from takeyourmeds.utils.test import TestCase
from ..enums import TypeEnum, SourceEnum
from .enums import StateEnum
class TwimlCallbackTest(TestCase):
def setUp(self):
super(TwimlCallbackTest, self).setUp()
self.call = self.user.reminders.create(
type=TypeEnum.call,
).instances.create(
source=SourceEnum.manual,
).calls.create()
def test_urls(self):
self.assert_(self.call.get_twiml_callback_url())
|
Check we can create an urlfrom takeyourmeds.utils.test import TestCase
from ..enums import TypeEnum, SourceEnum
from .enums import StateEnum
class TwimlCallbackTest(TestCase):
def setUp(self):
super(TwimlCallbackTest, self).setUp()
self.call = self.user.reminders.create(
type=TypeEnum.call,
).instances.create(
source=SourceEnum.manual,
).calls.create()
def test_urls(self):
self.assert_(self.call.get_twiml_callback_url())
|
<commit_before><commit_msg>Check we can create an url<commit_after>from takeyourmeds.utils.test import TestCase
from ..enums import TypeEnum, SourceEnum
from .enums import StateEnum
class TwimlCallbackTest(TestCase):
def setUp(self):
super(TwimlCallbackTest, self).setUp()
self.call = self.user.reminders.create(
type=TypeEnum.call,
).instances.create(
source=SourceEnum.manual,
).calls.create()
def test_urls(self):
self.assert_(self.call.get_twiml_callback_url())
|
|
c06f653cb6763f1d1b64797aa48e65820652f981
|
tests/alerts/test_open_port_violation.py
|
tests/alerts/test_open_port_violation.py
|
from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertOpenPortViolation(AlertTestSuite):
alert_filename = "open_port_violation"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "event",
"_source": {
"tags": ["open_port_policy_violation"],
"details": {
"destinationipaddress": "220.231.44.213",
"destinationport": 25,
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "open_port_policy_violation",
"tags": ['open_port_policy_violation'],
"severity": "CRITICAL",
"summary": '10 unauthorized open port(s) on 220.231.44.213 (25 25 25 25 25 )',
}
test_cases = []
default_events = list()
for num in xrange(10):
default_events.append(AlertTestSuite.copy(default_event))
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=default_events,
expected_alert=default_alert
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda(date_timedelta={'minutes': 239})
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events a minute earlier",
events=custom_events,
expected_alert=default_alert
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_type'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect _type",
events=custom_events,
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['tags'] = 'bad tag example'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect tags",
events=custom_events,
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=custom_events,
)
)
|
Add unit-tests for open port violation alert
|
Add unit-tests for open port violation alert
|
Python
|
mpl-2.0
|
Phrozyn/MozDef,ameihm0912/MozDef,ameihm0912/MozDef,jeffbryner/MozDef,jeffbryner/MozDef,mpurzynski/MozDef,mozilla/MozDef,mozilla/MozDef,jeffbryner/MozDef,gdestuynder/MozDef,mozilla/MozDef,mozilla/MozDef,Phrozyn/MozDef,mpurzynski/MozDef,mpurzynski/MozDef,gdestuynder/MozDef,ameihm0912/MozDef,gdestuynder/MozDef,Phrozyn/MozDef,gdestuynder/MozDef,jeffbryner/MozDef,Phrozyn/MozDef,mpurzynski/MozDef,ameihm0912/MozDef
|
Add unit-tests for open port violation alert
|
from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertOpenPortViolation(AlertTestSuite):
alert_filename = "open_port_violation"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "event",
"_source": {
"tags": ["open_port_policy_violation"],
"details": {
"destinationipaddress": "220.231.44.213",
"destinationport": 25,
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "open_port_policy_violation",
"tags": ['open_port_policy_violation'],
"severity": "CRITICAL",
"summary": '10 unauthorized open port(s) on 220.231.44.213 (25 25 25 25 25 )',
}
test_cases = []
default_events = list()
for num in xrange(10):
default_events.append(AlertTestSuite.copy(default_event))
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=default_events,
expected_alert=default_alert
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda(date_timedelta={'minutes': 239})
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events a minute earlier",
events=custom_events,
expected_alert=default_alert
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_type'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect _type",
events=custom_events,
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['tags'] = 'bad tag example'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect tags",
events=custom_events,
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=custom_events,
)
)
|
<commit_before><commit_msg>Add unit-tests for open port violation alert<commit_after>
|
from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertOpenPortViolation(AlertTestSuite):
alert_filename = "open_port_violation"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "event",
"_source": {
"tags": ["open_port_policy_violation"],
"details": {
"destinationipaddress": "220.231.44.213",
"destinationport": 25,
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "open_port_policy_violation",
"tags": ['open_port_policy_violation'],
"severity": "CRITICAL",
"summary": '10 unauthorized open port(s) on 220.231.44.213 (25 25 25 25 25 )',
}
test_cases = []
default_events = list()
for num in xrange(10):
default_events.append(AlertTestSuite.copy(default_event))
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=default_events,
expected_alert=default_alert
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda(date_timedelta={'minutes': 239})
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events a minute earlier",
events=custom_events,
expected_alert=default_alert
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_type'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect _type",
events=custom_events,
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['tags'] = 'bad tag example'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect tags",
events=custom_events,
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=custom_events,
)
)
|
Add unit-tests for open port violation alertfrom positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertOpenPortViolation(AlertTestSuite):
alert_filename = "open_port_violation"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "event",
"_source": {
"tags": ["open_port_policy_violation"],
"details": {
"destinationipaddress": "220.231.44.213",
"destinationport": 25,
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "open_port_policy_violation",
"tags": ['open_port_policy_violation'],
"severity": "CRITICAL",
"summary": '10 unauthorized open port(s) on 220.231.44.213 (25 25 25 25 25 )',
}
test_cases = []
default_events = list()
for num in xrange(10):
default_events.append(AlertTestSuite.copy(default_event))
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=default_events,
expected_alert=default_alert
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda(date_timedelta={'minutes': 239})
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events a minute earlier",
events=custom_events,
expected_alert=default_alert
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_type'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect _type",
events=custom_events,
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['tags'] = 'bad tag example'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect tags",
events=custom_events,
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=custom_events,
)
)
|
<commit_before><commit_msg>Add unit-tests for open port violation alert<commit_after>from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertOpenPortViolation(AlertTestSuite):
alert_filename = "open_port_violation"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "event",
"_source": {
"tags": ["open_port_policy_violation"],
"details": {
"destinationipaddress": "220.231.44.213",
"destinationport": 25,
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "open_port_policy_violation",
"tags": ['open_port_policy_violation'],
"severity": "CRITICAL",
"summary": '10 unauthorized open port(s) on 220.231.44.213 (25 25 25 25 25 )',
}
test_cases = []
default_events = list()
for num in xrange(10):
default_events.append(AlertTestSuite.copy(default_event))
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=default_events,
expected_alert=default_alert
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda(date_timedelta={'minutes': 239})
test_cases.append(
PositiveAlertTestCase(
description="Positive test with events a minute earlier",
events=custom_events,
expected_alert=default_alert
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_type'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect _type",
events=custom_events,
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['tags'] = 'bad tag example'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect tags",
events=custom_events,
)
)
custom_events = default_events
for temp_event in custom_events:
temp_event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({'minutes': 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=custom_events,
)
)
|
|
5314e142827016eed55c2a1c94faae853d2331e5
|
tests/unit/modules/test_bigip.py
|
tests/unit/modules/test_bigip.py
|
# -*- coding: utf-8 -*-
"""
tests.unit.modules.test_bigip
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the bigip module
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.modules.bigip as bigip
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class RequestsSession:
def __init__(self):
self.auth = None
self.verify = None
self.headers = {}
class BigipModuleTest(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {bigip: {}}
def test__build_session_verify_ssl(self):
requests_session = RequestsSession()
with patch("salt.modules.bigip.requests.sessions.Session", MagicMock(return_value=requests_session)):
bigip._build_session("username", "password")
self.assertEqual(requests_session.auth, ("username", "password"))
assert requests_session.verify is True
|
Add tests to validate that bigip module requests session verifies SSL
|
Add tests to validate that bigip module requests session verifies SSL
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add tests to validate that bigip module requests session verifies SSL
|
# -*- coding: utf-8 -*-
"""
tests.unit.modules.test_bigip
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the bigip module
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.modules.bigip as bigip
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class RequestsSession:
def __init__(self):
self.auth = None
self.verify = None
self.headers = {}
class BigipModuleTest(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {bigip: {}}
def test__build_session_verify_ssl(self):
requests_session = RequestsSession()
with patch("salt.modules.bigip.requests.sessions.Session", MagicMock(return_value=requests_session)):
bigip._build_session("username", "password")
self.assertEqual(requests_session.auth, ("username", "password"))
assert requests_session.verify is True
|
<commit_before><commit_msg>Add tests to validate that bigip module requests session verifies SSL<commit_after>
|
# -*- coding: utf-8 -*-
"""
tests.unit.modules.test_bigip
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the bigip module
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.modules.bigip as bigip
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class RequestsSession:
def __init__(self):
self.auth = None
self.verify = None
self.headers = {}
class BigipModuleTest(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {bigip: {}}
def test__build_session_verify_ssl(self):
requests_session = RequestsSession()
with patch("salt.modules.bigip.requests.sessions.Session", MagicMock(return_value=requests_session)):
bigip._build_session("username", "password")
self.assertEqual(requests_session.auth, ("username", "password"))
assert requests_session.verify is True
|
Add tests to validate that bigip module requests session verifies SSL# -*- coding: utf-8 -*-
"""
tests.unit.modules.test_bigip
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the bigip module
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.modules.bigip as bigip
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class RequestsSession:
def __init__(self):
self.auth = None
self.verify = None
self.headers = {}
class BigipModuleTest(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {bigip: {}}
def test__build_session_verify_ssl(self):
requests_session = RequestsSession()
with patch("salt.modules.bigip.requests.sessions.Session", MagicMock(return_value=requests_session)):
bigip._build_session("username", "password")
self.assertEqual(requests_session.auth, ("username", "password"))
assert requests_session.verify is True
|
<commit_before><commit_msg>Add tests to validate that bigip module requests session verifies SSL<commit_after># -*- coding: utf-8 -*-
"""
tests.unit.modules.test_bigip
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Unit tests for the bigip module
"""
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.modules.bigip as bigip
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class RequestsSession:
def __init__(self):
self.auth = None
self.verify = None
self.headers = {}
class BigipModuleTest(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {bigip: {}}
def test__build_session_verify_ssl(self):
requests_session = RequestsSession()
with patch("salt.modules.bigip.requests.sessions.Session", MagicMock(return_value=requests_session)):
bigip._build_session("username", "password")
self.assertEqual(requests_session.auth, ("username", "password"))
assert requests_session.verify is True
|
|
6852c9ee1ff9baf7ea21e5e0e11e8984a8315fba
|
glance/registry/db/migrate_repo/versions/004_add_checksum.py
|
glance/registry/db/migrate_repo/versions/004_add_checksum.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import *
from sqlalchemy import *
from sqlalchemy.sql import and_, not_
from glance.registry.db.migrate_repo.schema import (
Boolean, DateTime, Integer, String, Text, from_migration_import)
def get_images_table(meta):
"""
Returns the Table object for the images table that
corresponds to the images table definition of this version.
"""
images = Table('images', meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('disk_format', String(20)),
Column('container_format', String(20)),
Column('size', Integer()),
Column('status', String(30), nullable=False),
Column('is_public', Boolean(), nullable=False, default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), nullable=False, default=False,
index=True),
Column('checksum', String(32)),
mysql_engine='InnoDB',
useexisting=True)
return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 002...
"""
(define_image_properties_table,) = from_migration_import(
'002_add_image_properties_table', ['define_image_properties_table'])
image_properties = define_image_properties_table(meta)
return image_properties
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
checksum = Column('checksum', String(32))
checksum.create(images)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
images.columns['checksum'].drop()
|
Add migration script for checksum column
|
Add migration script for checksum column
|
Python
|
apache-2.0
|
akash1808/glance,jumpstarter-io/glance,cloudbau/glance,JioCloud/glance,tanglei528/glance,jumpstarter-io/glance,wkoathp/glance,paramite/glance,stevelle/glance,sigmavirus24/glance,cloudbau/glance,rajalokan/glance,redhat-openstack/glance,ozamiatin/glance,ntt-sic/glance,rickerc/glance_audit,darren-wang/gl,kfwang/Glance-OVA-OVF,paramite/glance,stevelle/glance,scripnichenko/glance,akash1808/glance,SUSE-Cloud/glance,kfwang/Glance-OVA-OVF,rickerc/glance_audit,vuntz/glance,vuntz/glance,citrix-openstack-build/glance,ozamiatin/glance,tanglei528/glance,klmitch/glance,redhat-openstack/glance,saeki-masaki/glance,SUSE-Cloud/glance,openstack/glance,scripnichenko/glance,takeshineshiro/glance,wkoathp/glance,sigmavirus24/glance,rajalokan/glance,dims/glance,darren-wang/gl,openstack/glance,takeshineshiro/glance,klmitch/glance,openstack/glance,JioCloud/glance,dims/glance,citrix-openstack-build/glance,saeki-masaki/glance,ntt-sic/glance
|
Add migration script for checksum column
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import *
from sqlalchemy import *
from sqlalchemy.sql import and_, not_
from glance.registry.db.migrate_repo.schema import (
Boolean, DateTime, Integer, String, Text, from_migration_import)
def get_images_table(meta):
"""
Returns the Table object for the images table that
corresponds to the images table definition of this version.
"""
images = Table('images', meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('disk_format', String(20)),
Column('container_format', String(20)),
Column('size', Integer()),
Column('status', String(30), nullable=False),
Column('is_public', Boolean(), nullable=False, default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), nullable=False, default=False,
index=True),
Column('checksum', String(32)),
mysql_engine='InnoDB',
useexisting=True)
return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 002...
"""
(define_image_properties_table,) = from_migration_import(
'002_add_image_properties_table', ['define_image_properties_table'])
image_properties = define_image_properties_table(meta)
return image_properties
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
checksum = Column('checksum', String(32))
checksum.create(images)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
images.columns['checksum'].drop()
|
<commit_before><commit_msg>Add migration script for checksum column<commit_after>
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import *
from sqlalchemy import *
from sqlalchemy.sql import and_, not_
from glance.registry.db.migrate_repo.schema import (
Boolean, DateTime, Integer, String, Text, from_migration_import)
def get_images_table(meta):
"""
Returns the Table object for the images table that
corresponds to the images table definition of this version.
"""
images = Table('images', meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('disk_format', String(20)),
Column('container_format', String(20)),
Column('size', Integer()),
Column('status', String(30), nullable=False),
Column('is_public', Boolean(), nullable=False, default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), nullable=False, default=False,
index=True),
Column('checksum', String(32)),
mysql_engine='InnoDB',
useexisting=True)
return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 002...
"""
(define_image_properties_table,) = from_migration_import(
'002_add_image_properties_table', ['define_image_properties_table'])
image_properties = define_image_properties_table(meta)
return image_properties
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
checksum = Column('checksum', String(32))
checksum.create(images)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
images.columns['checksum'].drop()
|
Add migration script for checksum column# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import *
from sqlalchemy import *
from sqlalchemy.sql import and_, not_
from glance.registry.db.migrate_repo.schema import (
Boolean, DateTime, Integer, String, Text, from_migration_import)
def get_images_table(meta):
"""
Returns the Table object for the images table that
corresponds to the images table definition of this version.
"""
images = Table('images', meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('disk_format', String(20)),
Column('container_format', String(20)),
Column('size', Integer()),
Column('status', String(30), nullable=False),
Column('is_public', Boolean(), nullable=False, default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), nullable=False, default=False,
index=True),
Column('checksum', String(32)),
mysql_engine='InnoDB',
useexisting=True)
return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 002...
"""
(define_image_properties_table,) = from_migration_import(
'002_add_image_properties_table', ['define_image_properties_table'])
image_properties = define_image_properties_table(meta)
return image_properties
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
checksum = Column('checksum', String(32))
checksum.create(images)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
images.columns['checksum'].drop()
|
<commit_before><commit_msg>Add migration script for checksum column<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from migrate.changeset import *
from sqlalchemy import *
from sqlalchemy.sql import and_, not_
from glance.registry.db.migrate_repo.schema import (
Boolean, DateTime, Integer, String, Text, from_migration_import)
def get_images_table(meta):
"""
Returns the Table object for the images table that
corresponds to the images table definition of this version.
"""
images = Table('images', meta,
Column('id', Integer(), primary_key=True, nullable=False),
Column('name', String(255)),
Column('disk_format', String(20)),
Column('container_format', String(20)),
Column('size', Integer()),
Column('status', String(30), nullable=False),
Column('is_public', Boolean(), nullable=False, default=False,
index=True),
Column('location', Text()),
Column('created_at', DateTime(), nullable=False),
Column('updated_at', DateTime()),
Column('deleted_at', DateTime()),
Column('deleted', Boolean(), nullable=False, default=False,
index=True),
Column('checksum', String(32)),
mysql_engine='InnoDB',
useexisting=True)
return images
def get_image_properties_table(meta):
"""
No changes to the image properties table from 002...
"""
(define_image_properties_table,) = from_migration_import(
'002_add_image_properties_table', ['define_image_properties_table'])
image_properties = define_image_properties_table(meta)
return image_properties
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
checksum = Column('checksum', String(32))
checksum.create(images)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
images = get_images_table(meta)
images.columns['checksum'].drop()
|
|
371bb8f7d7a0f77e7f756ed691b5806a8da33326
|
IPython/utils/tests/test_text.py
|
IPython/utils/tests/test_text.py
|
# encoding: utf-8
"""Tests for IPython.utils.text"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import nose.tools as nt
from nose import with_setup
from IPython.testing import decorators as dec
from IPython.utils import text
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
def test_columnize():
"""Test columnize with very long inputs"""
text.columnize(['a'*180, 'b'*180])
|
Add failing test: columnize called with very long entries.
|
Add failing test: columnize called with very long entries.
Bug reported on-list.
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add failing test: columnize called with very long entries.
Bug reported on-list.
|
# encoding: utf-8
"""Tests for IPython.utils.text"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import nose.tools as nt
from nose import with_setup
from IPython.testing import decorators as dec
from IPython.utils import text
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
def test_columnize():
"""Test columnize with very long inputs"""
text.columnize(['a'*180, 'b'*180])
|
<commit_before><commit_msg>Add failing test: columnize called with very long entries.
Bug reported on-list.<commit_after>
|
# encoding: utf-8
"""Tests for IPython.utils.text"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import nose.tools as nt
from nose import with_setup
from IPython.testing import decorators as dec
from IPython.utils import text
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
def test_columnize():
"""Test columnize with very long inputs"""
text.columnize(['a'*180, 'b'*180])
|
Add failing test: columnize called with very long entries.
Bug reported on-list.# encoding: utf-8
"""Tests for IPython.utils.text"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import nose.tools as nt
from nose import with_setup
from IPython.testing import decorators as dec
from IPython.utils import text
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
def test_columnize():
"""Test columnize with very long inputs"""
text.columnize(['a'*180, 'b'*180])
|
<commit_before><commit_msg>Add failing test: columnize called with very long entries.
Bug reported on-list.<commit_after># encoding: utf-8
"""Tests for IPython.utils.text"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import nose.tools as nt
from nose import with_setup
from IPython.testing import decorators as dec
from IPython.utils import text
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
def test_columnize():
"""Test columnize with very long inputs"""
text.columnize(['a'*180, 'b'*180])
|
|
f8c26bb11a3d064902fd6bb70b9933861ce7fe1d
|
rest/httpUtils.py
|
rest/httpUtils.py
|
from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
# Initially taken from
# http://www.django-rest-framework.org/tutorial/1-serialization/
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json; charset=utf-8'
super(JSONResponse, self).__init__(content, **kwargs)
|
Add one place to get JSONResponse from.
|
Add one place to get JSONResponse from.
|
Python
|
apache-2.0
|
CMPUT404W17T06/CMPUT404-project,CMPUT404W17T06/CMPUT404-project,CMPUT404W17T06/CMPUT404-project
|
Add one place to get JSONResponse from.
|
from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
# Initially taken from
# http://www.django-rest-framework.org/tutorial/1-serialization/
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json; charset=utf-8'
super(JSONResponse, self).__init__(content, **kwargs)
|
<commit_before><commit_msg>Add one place to get JSONResponse from.<commit_after>
|
from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
# Initially taken from
# http://www.django-rest-framework.org/tutorial/1-serialization/
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json; charset=utf-8'
super(JSONResponse, self).__init__(content, **kwargs)
|
Add one place to get JSONResponse from.from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
# Initially taken from
# http://www.django-rest-framework.org/tutorial/1-serialization/
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json; charset=utf-8'
super(JSONResponse, self).__init__(content, **kwargs)
|
<commit_before><commit_msg>Add one place to get JSONResponse from.<commit_after>from django.http import HttpResponse
from rest_framework.renderers import JSONRenderer
# Initially taken from
# http://www.django-rest-framework.org/tutorial/1-serialization/
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json; charset=utf-8'
super(JSONResponse, self).__init__(content, **kwargs)
|
|
5a27d230c92215594d909c49f22ff2ff592806c3
|
gaphor/UML/tests/test_diagram.py
|
gaphor/UML/tests/test_diagram.py
|
import gaphas
import pytest
from gaphor.services.eventmanager import EventManager
from gaphor.UML import Diagram
from gaphor.UML.elementfactory import ElementFactory
from gaphor.UML.presentation import Presentation
class Example(Presentation, gaphas.Element):
def unlink(self):
self.test_unlinked = True
super().unlink()
@pytest.fixture
def element_factory():
event_manager = EventManager()
element_factory = ElementFactory()
yield element_factory
element_factory.shutdown()
event_manager.shutdown()
def test_canvas_is_set_up():
diagram = Diagram("id", None)
assert diagram.canvas
def test_canvas_is_saved():
diagram = Diagram("id", None)
saved_keys = []
diagram.save(lambda name, val: saved_keys.append(name))
assert "canvas" in saved_keys
def test_canvas_item_is_created(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
assert example in diagram.canvas.get_all_items()
def test_canvas_is_unlinked(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
diagram.unlink()
assert example.test_unlinked
|
Add some extra tests for Diagram
|
Add some extra tests for Diagram
|
Python
|
lgpl-2.1
|
amolenaar/gaphor,amolenaar/gaphor
|
Add some extra tests for Diagram
|
import gaphas
import pytest
from gaphor.services.eventmanager import EventManager
from gaphor.UML import Diagram
from gaphor.UML.elementfactory import ElementFactory
from gaphor.UML.presentation import Presentation
class Example(Presentation, gaphas.Element):
def unlink(self):
self.test_unlinked = True
super().unlink()
@pytest.fixture
def element_factory():
event_manager = EventManager()
element_factory = ElementFactory()
yield element_factory
element_factory.shutdown()
event_manager.shutdown()
def test_canvas_is_set_up():
diagram = Diagram("id", None)
assert diagram.canvas
def test_canvas_is_saved():
diagram = Diagram("id", None)
saved_keys = []
diagram.save(lambda name, val: saved_keys.append(name))
assert "canvas" in saved_keys
def test_canvas_item_is_created(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
assert example in diagram.canvas.get_all_items()
def test_canvas_is_unlinked(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
diagram.unlink()
assert example.test_unlinked
|
<commit_before><commit_msg>Add some extra tests for Diagram<commit_after>
|
import gaphas
import pytest
from gaphor.services.eventmanager import EventManager
from gaphor.UML import Diagram
from gaphor.UML.elementfactory import ElementFactory
from gaphor.UML.presentation import Presentation
class Example(Presentation, gaphas.Element):
def unlink(self):
self.test_unlinked = True
super().unlink()
@pytest.fixture
def element_factory():
event_manager = EventManager()
element_factory = ElementFactory()
yield element_factory
element_factory.shutdown()
event_manager.shutdown()
def test_canvas_is_set_up():
diagram = Diagram("id", None)
assert diagram.canvas
def test_canvas_is_saved():
diagram = Diagram("id", None)
saved_keys = []
diagram.save(lambda name, val: saved_keys.append(name))
assert "canvas" in saved_keys
def test_canvas_item_is_created(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
assert example in diagram.canvas.get_all_items()
def test_canvas_is_unlinked(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
diagram.unlink()
assert example.test_unlinked
|
Add some extra tests for Diagramimport gaphas
import pytest
from gaphor.services.eventmanager import EventManager
from gaphor.UML import Diagram
from gaphor.UML.elementfactory import ElementFactory
from gaphor.UML.presentation import Presentation
class Example(Presentation, gaphas.Element):
def unlink(self):
self.test_unlinked = True
super().unlink()
@pytest.fixture
def element_factory():
event_manager = EventManager()
element_factory = ElementFactory()
yield element_factory
element_factory.shutdown()
event_manager.shutdown()
def test_canvas_is_set_up():
diagram = Diagram("id", None)
assert diagram.canvas
def test_canvas_is_saved():
diagram = Diagram("id", None)
saved_keys = []
diagram.save(lambda name, val: saved_keys.append(name))
assert "canvas" in saved_keys
def test_canvas_item_is_created(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
assert example in diagram.canvas.get_all_items()
def test_canvas_is_unlinked(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
diagram.unlink()
assert example.test_unlinked
|
<commit_before><commit_msg>Add some extra tests for Diagram<commit_after>import gaphas
import pytest
from gaphor.services.eventmanager import EventManager
from gaphor.UML import Diagram
from gaphor.UML.elementfactory import ElementFactory
from gaphor.UML.presentation import Presentation
class Example(Presentation, gaphas.Element):
def unlink(self):
self.test_unlinked = True
super().unlink()
@pytest.fixture
def element_factory():
event_manager = EventManager()
element_factory = ElementFactory()
yield element_factory
element_factory.shutdown()
event_manager.shutdown()
def test_canvas_is_set_up():
diagram = Diagram("id", None)
assert diagram.canvas
def test_canvas_is_saved():
diagram = Diagram("id", None)
saved_keys = []
diagram.save(lambda name, val: saved_keys.append(name))
assert "canvas" in saved_keys
def test_canvas_item_is_created(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
assert example in diagram.canvas.get_all_items()
def test_canvas_is_unlinked(element_factory):
diagram = element_factory.create(Diagram)
example = diagram.create(Example)
diagram.unlink()
assert example.test_unlinked
|
|
0a1dbf4d891feda44b1fd45beb7bf59a5737f797
|
dataportal/__init__.py
|
dataportal/__init__.py
|
import sys
import logging
from .sources import *
logger = logging.getLogger(__name__)
__version__ = 'v0.0.5.post0'
|
import sys
import logging
from .sources import *
logger = logging.getLogger(__name__)
__version__ = 'v0.0.5.post0'
from .broker import DataBroker
from .muxer import DataMuxer
|
Make DataBroker and DataMuxer top-level.
|
API: Make DataBroker and DataMuxer top-level.
|
Python
|
bsd-3-clause
|
ericdill/databroker,danielballan/dataportal,ericdill/datamuxer,tacaswell/dataportal,NSLS-II/datamuxer,NSLS-II/dataportal,danielballan/datamuxer,danielballan/datamuxer,tacaswell/dataportal,danielballan/dataportal,ericdill/datamuxer,NSLS-II/dataportal,ericdill/databroker
|
import sys
import logging
from .sources import *
logger = logging.getLogger(__name__)
__version__ = 'v0.0.5.post0'
API: Make DataBroker and DataMuxer top-level.
|
import sys
import logging
from .sources import *
logger = logging.getLogger(__name__)
__version__ = 'v0.0.5.post0'
from .broker import DataBroker
from .muxer import DataMuxer
|
<commit_before>import sys
import logging
from .sources import *
logger = logging.getLogger(__name__)
__version__ = 'v0.0.5.post0'
<commit_msg>API: Make DataBroker and DataMuxer top-level.<commit_after>
|
import sys
import logging
from .sources import *
logger = logging.getLogger(__name__)
__version__ = 'v0.0.5.post0'
from .broker import DataBroker
from .muxer import DataMuxer
|
import sys
import logging
from .sources import *
logger = logging.getLogger(__name__)
__version__ = 'v0.0.5.post0'
API: Make DataBroker and DataMuxer top-level.import sys
import logging
from .sources import *
logger = logging.getLogger(__name__)
__version__ = 'v0.0.5.post0'
from .broker import DataBroker
from .muxer import DataMuxer
|
<commit_before>import sys
import logging
from .sources import *
logger = logging.getLogger(__name__)
__version__ = 'v0.0.5.post0'
<commit_msg>API: Make DataBroker and DataMuxer top-level.<commit_after>import sys
import logging
from .sources import *
logger = logging.getLogger(__name__)
__version__ = 'v0.0.5.post0'
from .broker import DataBroker
from .muxer import DataMuxer
|
7d068af4cab1eeb9d7e4b78babb8216f60314f4f
|
portal/tests/test_views.py
|
portal/tests/test_views.py
|
from django.test import TestCase
from django.urls import reverse
# Create your tests here.
class HomeViewTestCase(TestCase):
"""HomeView test suite"""
expected_url = '/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:home')
self.assertEquals(reverse_url, self.expected_url)
class LoginViewTestCase(TestCase):
"""LoginView test suite"""
expected_url = '/login/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:login')
self.assertEquals(reverse_url, self.expected_url)
class LogoutViewTestCase(TestCase):
"""LogoutView test suite"""
expected_url = '/logout/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:logout')
self.assertEquals(reverse_url, self.expected_url)
class UserFormViewTestCase(TestCase):
"""UserFormView test suite"""
expected_url = '/register/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:register')
self.assertEquals(reverse_url, self.expected_url)
|
Add view test cases for portal
|
Add view test cases for portal
|
Python
|
mit
|
huangsam/chowist,huangsam/chowist,huangsam/chowist
|
Add view test cases for portal
|
from django.test import TestCase
from django.urls import reverse
# Create your tests here.
class HomeViewTestCase(TestCase):
"""HomeView test suite"""
expected_url = '/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:home')
self.assertEquals(reverse_url, self.expected_url)
class LoginViewTestCase(TestCase):
"""LoginView test suite"""
expected_url = '/login/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:login')
self.assertEquals(reverse_url, self.expected_url)
class LogoutViewTestCase(TestCase):
"""LogoutView test suite"""
expected_url = '/logout/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:logout')
self.assertEquals(reverse_url, self.expected_url)
class UserFormViewTestCase(TestCase):
"""UserFormView test suite"""
expected_url = '/register/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:register')
self.assertEquals(reverse_url, self.expected_url)
|
<commit_before><commit_msg>Add view test cases for portal<commit_after>
|
from django.test import TestCase
from django.urls import reverse
# Create your tests here.
class HomeViewTestCase(TestCase):
"""HomeView test suite"""
expected_url = '/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:home')
self.assertEquals(reverse_url, self.expected_url)
class LoginViewTestCase(TestCase):
"""LoginView test suite"""
expected_url = '/login/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:login')
self.assertEquals(reverse_url, self.expected_url)
class LogoutViewTestCase(TestCase):
"""LogoutView test suite"""
expected_url = '/logout/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:logout')
self.assertEquals(reverse_url, self.expected_url)
class UserFormViewTestCase(TestCase):
"""UserFormView test suite"""
expected_url = '/register/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:register')
self.assertEquals(reverse_url, self.expected_url)
|
Add view test cases for portalfrom django.test import TestCase
from django.urls import reverse
# Create your tests here.
class HomeViewTestCase(TestCase):
"""HomeView test suite"""
expected_url = '/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:home')
self.assertEquals(reverse_url, self.expected_url)
class LoginViewTestCase(TestCase):
"""LoginView test suite"""
expected_url = '/login/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:login')
self.assertEquals(reverse_url, self.expected_url)
class LogoutViewTestCase(TestCase):
"""LogoutView test suite"""
expected_url = '/logout/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:logout')
self.assertEquals(reverse_url, self.expected_url)
class UserFormViewTestCase(TestCase):
"""UserFormView test suite"""
expected_url = '/register/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:register')
self.assertEquals(reverse_url, self.expected_url)
|
<commit_before><commit_msg>Add view test cases for portal<commit_after>from django.test import TestCase
from django.urls import reverse
# Create your tests here.
class HomeViewTestCase(TestCase):
"""HomeView test suite"""
expected_url = '/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:home')
self.assertEquals(reverse_url, self.expected_url)
class LoginViewTestCase(TestCase):
"""LoginView test suite"""
expected_url = '/login/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:login')
self.assertEquals(reverse_url, self.expected_url)
class LogoutViewTestCase(TestCase):
"""LogoutView test suite"""
expected_url = '/logout/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:logout')
self.assertEquals(reverse_url, self.expected_url)
class UserFormViewTestCase(TestCase):
"""UserFormView test suite"""
expected_url = '/register/'
def test_desired_location(self):
resp = self.client.get(self.expected_url)
self.assertEqual(resp.status_code, 200)
def test_desired_name(self):
reverse_url = reverse('portal:register')
self.assertEquals(reverse_url, self.expected_url)
|
|
26b9bc9547865c9b4ea654504adbee15f21ed633
|
backend/populate_dionyziz.py
|
backend/populate_dionyziz.py
|
from string import ascii_lowercase
import django
import os
import string
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target, Victim
endpoint = 'https://dionyziz.com/breach-test/reflect.php?ref=%s'
prefix = 'imper'
alphabet = ascii_lowercase
secretlength = 9
target_1 = Target(
endpoint=endpoint,
prefix=prefix,
alphabet=alphabet,
secretlength=secretlength,
alignmentalphabet=string.ascii_uppercase
)
target_1.save()
print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength)
snifferendpoint = 'http://127.0.0.1:9000'
sourceip = '192.168.1.22'
victim_1 = Victim(
target=target_1,
snifferendpoint=snifferendpoint,
sourceip=sourceip,
method='serial'
)
victim_1.save()
print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
|
Add population script for dionyziz
|
Add population script for dionyziz
|
Python
|
mit
|
dimkarakostas/rupture,dionyziz/rupture,dimkarakostas/rupture,esarafianou/rupture,dimriou/rupture,dionyziz/rupture,dimriou/rupture,dionyziz/rupture,dimriou/rupture,dimriou/rupture,esarafianou/rupture,dimkarakostas/rupture,dimkarakostas/rupture,esarafianou/rupture,dimkarakostas/rupture,esarafianou/rupture,dimriou/rupture,dionyziz/rupture,dionyziz/rupture
|
Add population script for dionyziz
|
from string import ascii_lowercase
import django
import os
import string
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target, Victim
endpoint = 'https://dionyziz.com/breach-test/reflect.php?ref=%s'
prefix = 'imper'
alphabet = ascii_lowercase
secretlength = 9
target_1 = Target(
endpoint=endpoint,
prefix=prefix,
alphabet=alphabet,
secretlength=secretlength,
alignmentalphabet=string.ascii_uppercase
)
target_1.save()
print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength)
snifferendpoint = 'http://127.0.0.1:9000'
sourceip = '192.168.1.22'
victim_1 = Victim(
target=target_1,
snifferendpoint=snifferendpoint,
sourceip=sourceip,
method='serial'
)
victim_1.save()
print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
|
<commit_before><commit_msg>Add population script for dionyziz<commit_after>
|
from string import ascii_lowercase
import django
import os
import string
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target, Victim
endpoint = 'https://dionyziz.com/breach-test/reflect.php?ref=%s'
prefix = 'imper'
alphabet = ascii_lowercase
secretlength = 9
target_1 = Target(
endpoint=endpoint,
prefix=prefix,
alphabet=alphabet,
secretlength=secretlength,
alignmentalphabet=string.ascii_uppercase
)
target_1.save()
print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength)
snifferendpoint = 'http://127.0.0.1:9000'
sourceip = '192.168.1.22'
victim_1 = Victim(
target=target_1,
snifferendpoint=snifferendpoint,
sourceip=sourceip,
method='serial'
)
victim_1.save()
print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
|
Add population script for dionyzizfrom string import ascii_lowercase
import django
import os
import string
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target, Victim
endpoint = 'https://dionyziz.com/breach-test/reflect.php?ref=%s'
prefix = 'imper'
alphabet = ascii_lowercase
secretlength = 9
target_1 = Target(
endpoint=endpoint,
prefix=prefix,
alphabet=alphabet,
secretlength=secretlength,
alignmentalphabet=string.ascii_uppercase
)
target_1.save()
print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength)
snifferendpoint = 'http://127.0.0.1:9000'
sourceip = '192.168.1.22'
victim_1 = Victim(
target=target_1,
snifferendpoint=snifferendpoint,
sourceip=sourceip,
method='serial'
)
victim_1.save()
print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
|
<commit_before><commit_msg>Add population script for dionyziz<commit_after>from string import ascii_lowercase
import django
import os
import string
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target, Victim
endpoint = 'https://dionyziz.com/breach-test/reflect.php?ref=%s'
prefix = 'imper'
alphabet = ascii_lowercase
secretlength = 9
target_1 = Target(
endpoint=endpoint,
prefix=prefix,
alphabet=alphabet,
secretlength=secretlength,
alignmentalphabet=string.ascii_uppercase
)
target_1.save()
print 'Created Target:\n\tendpoint: {}\n\tprefix: {}\n\talphabet: {}\n\tsecretlength: {}'.format(endpoint, prefix, alphabet, secretlength)
snifferendpoint = 'http://127.0.0.1:9000'
sourceip = '192.168.1.22'
victim_1 = Victim(
target=target_1,
snifferendpoint=snifferendpoint,
sourceip=sourceip,
method='serial'
)
victim_1.save()
print 'Created Victim:\n\tvictim_id: {}\n\tsnifferendpoint: {}\n\tsourceip: {}'.format(victim_1.id, snifferendpoint, sourceip)
|
|
a355a9f76add5b7b91b31b9e8b07c4f0fbc9ce3a
|
docs/examples/core/ipython-get-history.py
|
docs/examples/core/ipython-get-history.py
|
#!/usr/bin/env python
"""Extract a session from the IPython input history.
Usage:
ipython-get-history.py sessionnumber [outputfile]
If outputfile is not given, the relevant history is written to stdout. If
outputfile has a .py extension, the translated history (without IPython's
special syntax) will be extracted.
Example:
./ipython-get-history.py 57 record.ipy
This script is a simple demonstration of HistoryAccessor. It should be possible
to build much more flexible and powerful tools to browse and pull from the
history database.
"""
import sys
import codecs
from IPython.core.history import HistoryAccessor
session_number = int(sys.argv[1])
if len(sys.argv) > 2:
dest = open(sys.argv[2], "w")
raw = not sys.argv[2].endswith('.py')
else:
dest = sys.stdout
raw = True
dest.write("# coding: utf-8\n")
# Profiles other than 'default' can be specified here with a profile= argument:
hist = HistoryAccessor()
for session, lineno, cell in hist.get_range(session=session_number, raw=raw):
# To use this in Python 3, remove the .encode() here:
dest.write(cell.encode('utf-8') + '\n')
|
Add example script for extracting history from the database.
|
Add example script for extracting history from the database.
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add example script for extracting history from the database.
|
#!/usr/bin/env python
"""Extract a session from the IPython input history.
Usage:
ipython-get-history.py sessionnumber [outputfile]
If outputfile is not given, the relevant history is written to stdout. If
outputfile has a .py extension, the translated history (without IPython's
special syntax) will be extracted.
Example:
./ipython-get-history.py 57 record.ipy
This script is a simple demonstration of HistoryAccessor. It should be possible
to build much more flexible and powerful tools to browse and pull from the
history database.
"""
import sys
import codecs
from IPython.core.history import HistoryAccessor
session_number = int(sys.argv[1])
if len(sys.argv) > 2:
dest = open(sys.argv[2], "w")
raw = not sys.argv[2].endswith('.py')
else:
dest = sys.stdout
raw = True
dest.write("# coding: utf-8\n")
# Profiles other than 'default' can be specified here with a profile= argument:
hist = HistoryAccessor()
for session, lineno, cell in hist.get_range(session=session_number, raw=raw):
# To use this in Python 3, remove the .encode() here:
dest.write(cell.encode('utf-8') + '\n')
|
<commit_before><commit_msg>Add example script for extracting history from the database.<commit_after>
|
#!/usr/bin/env python
"""Extract a session from the IPython input history.
Usage:
ipython-get-history.py sessionnumber [outputfile]
If outputfile is not given, the relevant history is written to stdout. If
outputfile has a .py extension, the translated history (without IPython's
special syntax) will be extracted.
Example:
./ipython-get-history.py 57 record.ipy
This script is a simple demonstration of HistoryAccessor. It should be possible
to build much more flexible and powerful tools to browse and pull from the
history database.
"""
import sys
import codecs
from IPython.core.history import HistoryAccessor
session_number = int(sys.argv[1])
if len(sys.argv) > 2:
dest = open(sys.argv[2], "w")
raw = not sys.argv[2].endswith('.py')
else:
dest = sys.stdout
raw = True
dest.write("# coding: utf-8\n")
# Profiles other than 'default' can be specified here with a profile= argument:
hist = HistoryAccessor()
for session, lineno, cell in hist.get_range(session=session_number, raw=raw):
# To use this in Python 3, remove the .encode() here:
dest.write(cell.encode('utf-8') + '\n')
|
Add example script for extracting history from the database.#!/usr/bin/env python
"""Extract a session from the IPython input history.
Usage:
ipython-get-history.py sessionnumber [outputfile]
If outputfile is not given, the relevant history is written to stdout. If
outputfile has a .py extension, the translated history (without IPython's
special syntax) will be extracted.
Example:
./ipython-get-history.py 57 record.ipy
This script is a simple demonstration of HistoryAccessor. It should be possible
to build much more flexible and powerful tools to browse and pull from the
history database.
"""
import sys
import codecs
from IPython.core.history import HistoryAccessor
session_number = int(sys.argv[1])
if len(sys.argv) > 2:
dest = open(sys.argv[2], "w")
raw = not sys.argv[2].endswith('.py')
else:
dest = sys.stdout
raw = True
dest.write("# coding: utf-8\n")
# Profiles other than 'default' can be specified here with a profile= argument:
hist = HistoryAccessor()
for session, lineno, cell in hist.get_range(session=session_number, raw=raw):
# To use this in Python 3, remove the .encode() here:
dest.write(cell.encode('utf-8') + '\n')
|
<commit_before><commit_msg>Add example script for extracting history from the database.<commit_after>#!/usr/bin/env python
"""Extract a session from the IPython input history.
Usage:
ipython-get-history.py sessionnumber [outputfile]
If outputfile is not given, the relevant history is written to stdout. If
outputfile has a .py extension, the translated history (without IPython's
special syntax) will be extracted.
Example:
./ipython-get-history.py 57 record.ipy
This script is a simple demonstration of HistoryAccessor. It should be possible
to build much more flexible and powerful tools to browse and pull from the
history database.
"""
import sys
import codecs
from IPython.core.history import HistoryAccessor
session_number = int(sys.argv[1])
if len(sys.argv) > 2:
dest = open(sys.argv[2], "w")
raw = not sys.argv[2].endswith('.py')
else:
dest = sys.stdout
raw = True
dest.write("# coding: utf-8\n")
# Profiles other than 'default' can be specified here with a profile= argument:
hist = HistoryAccessor()
for session, lineno, cell in hist.get_range(session=session_number, raw=raw):
# To use this in Python 3, remove the .encode() here:
dest.write(cell.encode('utf-8') + '\n')
|
|
64da0215068c789a0e5422e3829c3e876a6cc526
|
scripts/osfstorage/repopulate_sha.py
|
scripts/osfstorage/repopulate_sha.py
|
import sys
import logging
from modularodm import Q
from website.app import init_app
from website.files.models import FileVersion
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
def do_migration():
logger.info('Starting sha256 recovery migration')
for version in FileVersion.find(Q('metadata.sha256', 'eq', None)):
if not version.location:
continue
logger.debug('Adding sha {} to version {}'.format(version.location['object'], version._id))
version.metadata['sha256'] = version.location['object']
version.save()
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
main(dry=dry)
|
Add a migration for repopulating sha256s of older osfstorage file versions
|
Add a migration for repopulating sha256s of older osfstorage file versions
|
Python
|
apache-2.0
|
kch8qx/osf.io,laurenrevere/osf.io,laurenrevere/osf.io,rdhyee/osf.io,haoyuchen1992/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,samanehsan/osf.io,brandonPurvis/osf.io,caneruguz/osf.io,TomHeatwole/osf.io,abought/osf.io,acshi/osf.io,leb2dg/osf.io,cslzchen/osf.io,Nesiehr/osf.io,zamattiac/osf.io,Ghalko/osf.io,mluke93/osf.io,mluo613/osf.io,hmoco/osf.io,caseyrollins/osf.io,crcresearch/osf.io,caseyrollins/osf.io,caneruguz/osf.io,petermalcolm/osf.io,aaxelb/osf.io,emetsger/osf.io,Nesiehr/osf.io,njantrania/osf.io,brandonPurvis/osf.io,TomHeatwole/osf.io,caseyrygt/osf.io,brianjgeiger/osf.io,kwierman/osf.io,Ghalko/osf.io,petermalcolm/osf.io,acshi/osf.io,amyshi188/osf.io,RomanZWang/osf.io,brianjgeiger/osf.io,kwierman/osf.io,wearpants/osf.io,samchrisinger/osf.io,ZobairAlijan/osf.io,caseyrygt/osf.io,chennan47/osf.io,abought/osf.io,mluo613/osf.io,chrisseto/osf.io,Nesiehr/osf.io,kch8qx/osf.io,GageGaskins/osf.io,billyhunt/osf.io,mattclark/osf.io,Johnetordoff/osf.io,zachjanicki/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,DanielSBrown/osf.io,laurenrevere/osf.io,cosenal/osf.io,doublebits/osf.io,samchrisinger/osf.io,zamattiac/osf.io,KAsante95/osf.io,crcresearch/osf.io,ZobairAlijan/osf.io,felliott/osf.io,chrisseto/osf.io,brandonPurvis/osf.io,aaxelb/osf.io,sloria/osf.io,monikagrabowska/osf.io,njantrania/osf.io,ZobairAlijan/osf.io,zamattiac/osf.io,baylee-d/osf.io,cslzchen/osf.io,monikagrabowska/osf.io,rdhyee/osf.io,hmoco/osf.io,cosenal/osf.io,cslzchen/osf.io,erinspace/osf.io,chennan47/osf.io,crcresearch/osf.io,samanehsan/osf.io,saradbowman/osf.io,kwierman/osf.io,brandonPurvis/osf.io,amyshi188/osf.io,mluke93/osf.io,ZobairAlijan/osf.io,acshi/osf.io,KAsante95/osf.io,doublebits/osf.io,njantrania/osf.io,felliott/osf.io,saradbowman/osf.io,DanielSBrown/osf.io,rdhyee/osf.io,danielneis/osf.io,KAsante95/osf.io,asanfilippo7/osf.io,caseyrygt/osf.io,ticklemepierce/osf.io,wearpants/osf.io,kch8qx/osf.io,pattisdr/osf.io,billyhunt/osf.io,TomBaxter/osf.io,petermalcolm/osf.io,brandonPurvis/osf.io,adlius/osf.io,cosenal/osf.io,samchrisinger/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,arpitar/osf.io,alexschiller/osf.io,petermalcolm/osf.io,abought/osf.io,alexschiller/osf.io,Ghalko/osf.io,CenterForOpenScience/osf.io,billyhunt/osf.io,kwierman/osf.io,adlius/osf.io,ticklemepierce/osf.io,SSJohns/osf.io,kch8qx/osf.io,mattclark/osf.io,billyhunt/osf.io,mfraezz/osf.io,KAsante95/osf.io,haoyuchen1992/osf.io,acshi/osf.io,chrisseto/osf.io,mluke93/osf.io,cwisecarver/osf.io,kch8qx/osf.io,KAsante95/osf.io,felliott/osf.io,jnayak1/osf.io,amyshi188/osf.io,brianjgeiger/osf.io,leb2dg/osf.io,caneruguz/osf.io,doublebits/osf.io,sloria/osf.io,pattisdr/osf.io,jnayak1/osf.io,amyshi188/osf.io,haoyuchen1992/osf.io,asanfilippo7/osf.io,emetsger/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io,rdhyee/osf.io,samanehsan/osf.io,RomanZWang/osf.io,Ghalko/osf.io,Nesiehr/osf.io,alexschiller/osf.io,emetsger/osf.io,icereval/osf.io,cslzchen/osf.io,zamattiac/osf.io,mfraezz/osf.io,mluke93/osf.io,TomHeatwole/osf.io,arpitar/osf.io,cwisecarver/osf.io,leb2dg/osf.io,mattclark/osf.io,samanehsan/osf.io,monikagrabowska/osf.io,GageGaskins/osf.io,felliott/osf.io,hmoco/osf.io,SSJohns/osf.io,caseyrygt/osf.io,wearpants/osf.io,HalcyonChimera/osf.io,cwisecarver/osf.io,aaxelb/osf.io,mluo613/osf.io,caseyrollins/osf.io,wearpants/osf.io,njantrania/osf.io,arpitar/osf.io,arpitar/osf.io,TomHeatwole/osf.io,DanielSBrown/osf.io,alexschiller/osf.io,doublebits/osf.io,mluo613/osf.io,chennan47/osf.io,billyhunt/osf.io,CenterForOpenScience/osf.io,DanielSBrown/osf.io,monikagrabowska/osf.io,doublebits/osf.io,mluo613/osf.io,icereval/osf.io,zachjanicki/osf.io,mfraezz/osf.io,GageGaskins/osf.io,baylee-d/osf.io,erinspace/osf.io,cosenal/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,GageGaskins/osf.io,emetsger/osf.io,GageGaskins/osf.io,mfraezz/osf.io,icereval/osf.io,asanfilippo7/osf.io,danielneis/osf.io,HalcyonChimera/osf.io,zachjanicki/osf.io,sloria/osf.io,aaxelb/osf.io,SSJohns/osf.io,ticklemepierce/osf.io,acshi/osf.io,zachjanicki/osf.io,RomanZWang/osf.io,danielneis/osf.io,monikagrabowska/osf.io,haoyuchen1992/osf.io,hmoco/osf.io,adlius/osf.io,binoculars/osf.io,jnayak1/osf.io,jnayak1/osf.io,adlius/osf.io,RomanZWang/osf.io,ticklemepierce/osf.io,caneruguz/osf.io,danielneis/osf.io,samchrisinger/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,TomBaxter/osf.io,SSJohns/osf.io,abought/osf.io,TomBaxter/osf.io,binoculars/osf.io,Johnetordoff/osf.io,erinspace/osf.io,asanfilippo7/osf.io,chrisseto/osf.io,RomanZWang/osf.io
|
Add a migration for repopulating sha256s of older osfstorage file versions
|
import sys
import logging
from modularodm import Q
from website.app import init_app
from website.files.models import FileVersion
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
def do_migration():
logger.info('Starting sha256 recovery migration')
for version in FileVersion.find(Q('metadata.sha256', 'eq', None)):
if not version.location:
continue
logger.debug('Adding sha {} to version {}'.format(version.location['object'], version._id))
version.metadata['sha256'] = version.location['object']
version.save()
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
main(dry=dry)
|
<commit_before><commit_msg>Add a migration for repopulating sha256s of older osfstorage file versions<commit_after>
|
import sys
import logging
from modularodm import Q
from website.app import init_app
from website.files.models import FileVersion
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
def do_migration():
logger.info('Starting sha256 recovery migration')
for version in FileVersion.find(Q('metadata.sha256', 'eq', None)):
if not version.location:
continue
logger.debug('Adding sha {} to version {}'.format(version.location['object'], version._id))
version.metadata['sha256'] = version.location['object']
version.save()
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
main(dry=dry)
|
Add a migration for repopulating sha256s of older osfstorage file versionsimport sys
import logging
from modularodm import Q
from website.app import init_app
from website.files.models import FileVersion
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
def do_migration():
logger.info('Starting sha256 recovery migration')
for version in FileVersion.find(Q('metadata.sha256', 'eq', None)):
if not version.location:
continue
logger.debug('Adding sha {} to version {}'.format(version.location['object'], version._id))
version.metadata['sha256'] = version.location['object']
version.save()
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
main(dry=dry)
|
<commit_before><commit_msg>Add a migration for repopulating sha256s of older osfstorage file versions<commit_after>import sys
import logging
from modularodm import Q
from website.app import init_app
from website.files.models import FileVersion
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
logger = logging.getLogger(__name__)
def do_migration():
logger.info('Starting sha256 recovery migration')
for version in FileVersion.find(Q('metadata.sha256', 'eq', None)):
if not version.location:
continue
logger.debug('Adding sha {} to version {}'.format(version.location['object'], version._id))
version.metadata['sha256'] = version.location['object']
version.save()
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
if 'debug' in sys.argv:
logger.setLevel(logging.DEBUG)
elif 'warning' in sys.argv:
logger.setLevel(logging.WARNING)
elif 'info' in sys.argv:
logger.setLevel(logging.INFO)
elif 'error' in sys.argv:
logger.setLevel(logging.ERROR)
main(dry=dry)
|
|
ee496c2e8e210124184a3b9b0e4be15dbbe1b01a
|
scripts/validate_schema.py
|
scripts/validate_schema.py
|
#!/usr/bin/env python
# Copyright 2017 Cargill Incorporated
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pipewrench import merge
from marshmallow import Schema, fields
import yaml
# This script will validate required metatada properties are filled in a given configuration.
class Column(Schema):
comment = fields.String(required=True)
class Table(Schema):
META_CONTACT_INFO = fields.String(required=True)
META_LOAD_FREQUENCY = fields.String(required=True)
META_SECURITY_CLASSIFICATION = fields.String(required=True)
META_SOURCE = fields.String(required=True)
id = fields.String(required=True)
columns = fields.Nested(Column, many=True)
class Conf(Schema):
tables = fields.Nested(Column, many=True)
def main(conf_path, env_path):
has_errors = False
env = merge.get_env(env_path)
conf = merge.get_conf(conf_path, env)
for table in conf['tables']:
result = Table().load(table)
if table['id']:
print('results for: ' + table['id'])
print()
if (result.errors):
print(yaml.dump({ table['id']: result.errors}))
else:
print('ok')
has_errors = True
exit(not has_errors)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='yaml schema validator'
'Usage: ./script.py --env=/path/to/env'
' --conf=/path/to/conf')
parser.add_argument('--conf',
dest='conf',
help='Yaml format configuration file.'
' This file can be used as a template to be filled by '
'\'env.yml\'')
parser.add_argument('--env',
dest='env',
help='Yaml format environment file.'
' Key-value pairs in this file that are templated in '
'\'conf.yml\'')
args = parser.parse_args()
main(args.conf, args.env)
|
Add script to validate table metadata values exist
|
Add script to validate table metadata values exist
|
Python
|
apache-2.0
|
Cargill/pipewrench,Cargill/pipewrench
|
Add script to validate table metadata values exist
|
#!/usr/bin/env python
# Copyright 2017 Cargill Incorporated
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pipewrench import merge
from marshmallow import Schema, fields
import yaml
# This script will validate required metatada properties are filled in a given configuration.
class Column(Schema):
comment = fields.String(required=True)
class Table(Schema):
META_CONTACT_INFO = fields.String(required=True)
META_LOAD_FREQUENCY = fields.String(required=True)
META_SECURITY_CLASSIFICATION = fields.String(required=True)
META_SOURCE = fields.String(required=True)
id = fields.String(required=True)
columns = fields.Nested(Column, many=True)
class Conf(Schema):
tables = fields.Nested(Column, many=True)
def main(conf_path, env_path):
has_errors = False
env = merge.get_env(env_path)
conf = merge.get_conf(conf_path, env)
for table in conf['tables']:
result = Table().load(table)
if table['id']:
print('results for: ' + table['id'])
print()
if (result.errors):
print(yaml.dump({ table['id']: result.errors}))
else:
print('ok')
has_errors = True
exit(not has_errors)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='yaml schema validator'
'Usage: ./script.py --env=/path/to/env'
' --conf=/path/to/conf')
parser.add_argument('--conf',
dest='conf',
help='Yaml format configuration file.'
' This file can be used as a template to be filled by '
'\'env.yml\'')
parser.add_argument('--env',
dest='env',
help='Yaml format environment file.'
' Key-value pairs in this file that are templated in '
'\'conf.yml\'')
args = parser.parse_args()
main(args.conf, args.env)
|
<commit_before><commit_msg>Add script to validate table metadata values exist<commit_after>
|
#!/usr/bin/env python
# Copyright 2017 Cargill Incorporated
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pipewrench import merge
from marshmallow import Schema, fields
import yaml
# This script will validate required metatada properties are filled in a given configuration.
class Column(Schema):
comment = fields.String(required=True)
class Table(Schema):
META_CONTACT_INFO = fields.String(required=True)
META_LOAD_FREQUENCY = fields.String(required=True)
META_SECURITY_CLASSIFICATION = fields.String(required=True)
META_SOURCE = fields.String(required=True)
id = fields.String(required=True)
columns = fields.Nested(Column, many=True)
class Conf(Schema):
tables = fields.Nested(Column, many=True)
def main(conf_path, env_path):
has_errors = False
env = merge.get_env(env_path)
conf = merge.get_conf(conf_path, env)
for table in conf['tables']:
result = Table().load(table)
if table['id']:
print('results for: ' + table['id'])
print()
if (result.errors):
print(yaml.dump({ table['id']: result.errors}))
else:
print('ok')
has_errors = True
exit(not has_errors)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='yaml schema validator'
'Usage: ./script.py --env=/path/to/env'
' --conf=/path/to/conf')
parser.add_argument('--conf',
dest='conf',
help='Yaml format configuration file.'
' This file can be used as a template to be filled by '
'\'env.yml\'')
parser.add_argument('--env',
dest='env',
help='Yaml format environment file.'
' Key-value pairs in this file that are templated in '
'\'conf.yml\'')
args = parser.parse_args()
main(args.conf, args.env)
|
Add script to validate table metadata values exist#!/usr/bin/env python
# Copyright 2017 Cargill Incorporated
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pipewrench import merge
from marshmallow import Schema, fields
import yaml
# This script will validate required metatada properties are filled in a given configuration.
class Column(Schema):
comment = fields.String(required=True)
class Table(Schema):
META_CONTACT_INFO = fields.String(required=True)
META_LOAD_FREQUENCY = fields.String(required=True)
META_SECURITY_CLASSIFICATION = fields.String(required=True)
META_SOURCE = fields.String(required=True)
id = fields.String(required=True)
columns = fields.Nested(Column, many=True)
class Conf(Schema):
tables = fields.Nested(Column, many=True)
def main(conf_path, env_path):
has_errors = False
env = merge.get_env(env_path)
conf = merge.get_conf(conf_path, env)
for table in conf['tables']:
result = Table().load(table)
if table['id']:
print('results for: ' + table['id'])
print()
if (result.errors):
print(yaml.dump({ table['id']: result.errors}))
else:
print('ok')
has_errors = True
exit(not has_errors)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='yaml schema validator'
'Usage: ./script.py --env=/path/to/env'
' --conf=/path/to/conf')
parser.add_argument('--conf',
dest='conf',
help='Yaml format configuration file.'
' This file can be used as a template to be filled by '
'\'env.yml\'')
parser.add_argument('--env',
dest='env',
help='Yaml format environment file.'
' Key-value pairs in this file that are templated in '
'\'conf.yml\'')
args = parser.parse_args()
main(args.conf, args.env)
|
<commit_before><commit_msg>Add script to validate table metadata values exist<commit_after>#!/usr/bin/env python
# Copyright 2017 Cargill Incorporated
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pipewrench import merge
from marshmallow import Schema, fields
import yaml
# This script will validate required metatada properties are filled in a given configuration.
class Column(Schema):
comment = fields.String(required=True)
class Table(Schema):
META_CONTACT_INFO = fields.String(required=True)
META_LOAD_FREQUENCY = fields.String(required=True)
META_SECURITY_CLASSIFICATION = fields.String(required=True)
META_SOURCE = fields.String(required=True)
id = fields.String(required=True)
columns = fields.Nested(Column, many=True)
class Conf(Schema):
tables = fields.Nested(Column, many=True)
def main(conf_path, env_path):
has_errors = False
env = merge.get_env(env_path)
conf = merge.get_conf(conf_path, env)
for table in conf['tables']:
result = Table().load(table)
if table['id']:
print('results for: ' + table['id'])
print()
if (result.errors):
print(yaml.dump({ table['id']: result.errors}))
else:
print('ok')
has_errors = True
exit(not has_errors)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='yaml schema validator'
'Usage: ./script.py --env=/path/to/env'
' --conf=/path/to/conf')
parser.add_argument('--conf',
dest='conf',
help='Yaml format configuration file.'
' This file can be used as a template to be filled by '
'\'env.yml\'')
parser.add_argument('--env',
dest='env',
help='Yaml format environment file.'
' Key-value pairs in this file that are templated in '
'\'conf.yml\'')
args = parser.parse_args()
main(args.conf, args.env)
|
|
c405eb28fb15a7f2891f6630a550bf532d80494a
|
tests/GIR/test_401_workspace.py
|
tests/GIR/test_401_workspace.py
|
# coding=utf-8
import sys
import struct
import unittest
from test_000_config import TestConfig
from test_020_connection import TestConnection
from gi.repository import Midgard
from gi.repository import GObject
class TestWorkspace(unittest.TestCase):
mgd = None
manager = None
def setUp(self):
if self.mgd is None:
self.mgd = TestConnection.openConnection()
if self.manager is None:
self.manager = Midgard.WorkspaceManager(connection = self.mgd)
def tearDown(self):
self.mgd.close()
self.mgd = None
def createWorkspaces(self):
ws_foo = Midgard.Workspace(name = "Foo")
self.manager.create_workspace(ws_foo, "")
ws_bar = Midgard.Workspace(name = "Bar")
self.manager.create_workspace(ws_bar, "/Foo")
ws_baz = Midgard.Workspace(name = "Baz")
self.manager.create_workspace(ws_baz, "/Foo")
def testAListWorkspacesNames(self):
self.createWorkspaces()
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
names = ws.list_workspace_names()
self.assertEqual(len(names), 2)
self.assertIn("Bar", names)
self.assertIn("Baz", names)
def testListChildren(self):
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
children = ws.list_children()
names = {"Bar", "Baz"}
self.assertEqual(len(children), 2)
self.assertIn(children[0].get_property("name"), names)
self.assertIn(children[1].get_property("name"), names)
def testGetPath(self):
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
self.assertEqual(ws.get_path(), "/Foo");
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo/Bar")
self.assertEqual(ws.get_path(), "/Foo/Bar");
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo/Baz")
self.assertEqual(ws.get_path(), "/Foo/Baz");
def testInheritance(self):
ws = Midgard.Workspace()
self.assertIsInstance(ws, GObject.GObject)
self.assertIsInstance(ws, Midgard.Workspace)
self.assertIsInstance(ws, Midgard.WorkspaceStorage)
if __name__ == "__main__":
unittest.main()
|
Test workspace. Initial. Closes gh-181
|
Test workspace. Initial. Closes gh-181
|
Python
|
lgpl-2.1
|
midgardproject/midgard-core,midgardproject/midgard-core,midgardproject/midgard-core,midgardproject/midgard-core
|
Test workspace. Initial. Closes gh-181
|
# coding=utf-8
import sys
import struct
import unittest
from test_000_config import TestConfig
from test_020_connection import TestConnection
from gi.repository import Midgard
from gi.repository import GObject
class TestWorkspace(unittest.TestCase):
mgd = None
manager = None
def setUp(self):
if self.mgd is None:
self.mgd = TestConnection.openConnection()
if self.manager is None:
self.manager = Midgard.WorkspaceManager(connection = self.mgd)
def tearDown(self):
self.mgd.close()
self.mgd = None
def createWorkspaces(self):
ws_foo = Midgard.Workspace(name = "Foo")
self.manager.create_workspace(ws_foo, "")
ws_bar = Midgard.Workspace(name = "Bar")
self.manager.create_workspace(ws_bar, "/Foo")
ws_baz = Midgard.Workspace(name = "Baz")
self.manager.create_workspace(ws_baz, "/Foo")
def testAListWorkspacesNames(self):
self.createWorkspaces()
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
names = ws.list_workspace_names()
self.assertEqual(len(names), 2)
self.assertIn("Bar", names)
self.assertIn("Baz", names)
def testListChildren(self):
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
children = ws.list_children()
names = {"Bar", "Baz"}
self.assertEqual(len(children), 2)
self.assertIn(children[0].get_property("name"), names)
self.assertIn(children[1].get_property("name"), names)
def testGetPath(self):
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
self.assertEqual(ws.get_path(), "/Foo");
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo/Bar")
self.assertEqual(ws.get_path(), "/Foo/Bar");
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo/Baz")
self.assertEqual(ws.get_path(), "/Foo/Baz");
def testInheritance(self):
ws = Midgard.Workspace()
self.assertIsInstance(ws, GObject.GObject)
self.assertIsInstance(ws, Midgard.Workspace)
self.assertIsInstance(ws, Midgard.WorkspaceStorage)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test workspace. Initial. Closes gh-181<commit_after>
|
# coding=utf-8
import sys
import struct
import unittest
from test_000_config import TestConfig
from test_020_connection import TestConnection
from gi.repository import Midgard
from gi.repository import GObject
class TestWorkspace(unittest.TestCase):
mgd = None
manager = None
def setUp(self):
if self.mgd is None:
self.mgd = TestConnection.openConnection()
if self.manager is None:
self.manager = Midgard.WorkspaceManager(connection = self.mgd)
def tearDown(self):
self.mgd.close()
self.mgd = None
def createWorkspaces(self):
ws_foo = Midgard.Workspace(name = "Foo")
self.manager.create_workspace(ws_foo, "")
ws_bar = Midgard.Workspace(name = "Bar")
self.manager.create_workspace(ws_bar, "/Foo")
ws_baz = Midgard.Workspace(name = "Baz")
self.manager.create_workspace(ws_baz, "/Foo")
def testAListWorkspacesNames(self):
self.createWorkspaces()
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
names = ws.list_workspace_names()
self.assertEqual(len(names), 2)
self.assertIn("Bar", names)
self.assertIn("Baz", names)
def testListChildren(self):
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
children = ws.list_children()
names = {"Bar", "Baz"}
self.assertEqual(len(children), 2)
self.assertIn(children[0].get_property("name"), names)
self.assertIn(children[1].get_property("name"), names)
def testGetPath(self):
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
self.assertEqual(ws.get_path(), "/Foo");
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo/Bar")
self.assertEqual(ws.get_path(), "/Foo/Bar");
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo/Baz")
self.assertEqual(ws.get_path(), "/Foo/Baz");
def testInheritance(self):
ws = Midgard.Workspace()
self.assertIsInstance(ws, GObject.GObject)
self.assertIsInstance(ws, Midgard.Workspace)
self.assertIsInstance(ws, Midgard.WorkspaceStorage)
if __name__ == "__main__":
unittest.main()
|
Test workspace. Initial. Closes gh-181# coding=utf-8
import sys
import struct
import unittest
from test_000_config import TestConfig
from test_020_connection import TestConnection
from gi.repository import Midgard
from gi.repository import GObject
class TestWorkspace(unittest.TestCase):
mgd = None
manager = None
def setUp(self):
if self.mgd is None:
self.mgd = TestConnection.openConnection()
if self.manager is None:
self.manager = Midgard.WorkspaceManager(connection = self.mgd)
def tearDown(self):
self.mgd.close()
self.mgd = None
def createWorkspaces(self):
ws_foo = Midgard.Workspace(name = "Foo")
self.manager.create_workspace(ws_foo, "")
ws_bar = Midgard.Workspace(name = "Bar")
self.manager.create_workspace(ws_bar, "/Foo")
ws_baz = Midgard.Workspace(name = "Baz")
self.manager.create_workspace(ws_baz, "/Foo")
def testAListWorkspacesNames(self):
self.createWorkspaces()
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
names = ws.list_workspace_names()
self.assertEqual(len(names), 2)
self.assertIn("Bar", names)
self.assertIn("Baz", names)
def testListChildren(self):
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
children = ws.list_children()
names = {"Bar", "Baz"}
self.assertEqual(len(children), 2)
self.assertIn(children[0].get_property("name"), names)
self.assertIn(children[1].get_property("name"), names)
def testGetPath(self):
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
self.assertEqual(ws.get_path(), "/Foo");
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo/Bar")
self.assertEqual(ws.get_path(), "/Foo/Bar");
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo/Baz")
self.assertEqual(ws.get_path(), "/Foo/Baz");
def testInheritance(self):
ws = Midgard.Workspace()
self.assertIsInstance(ws, GObject.GObject)
self.assertIsInstance(ws, Midgard.Workspace)
self.assertIsInstance(ws, Midgard.WorkspaceStorage)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test workspace. Initial. Closes gh-181<commit_after># coding=utf-8
import sys
import struct
import unittest
from test_000_config import TestConfig
from test_020_connection import TestConnection
from gi.repository import Midgard
from gi.repository import GObject
class TestWorkspace(unittest.TestCase):
mgd = None
manager = None
def setUp(self):
if self.mgd is None:
self.mgd = TestConnection.openConnection()
if self.manager is None:
self.manager = Midgard.WorkspaceManager(connection = self.mgd)
def tearDown(self):
self.mgd.close()
self.mgd = None
def createWorkspaces(self):
ws_foo = Midgard.Workspace(name = "Foo")
self.manager.create_workspace(ws_foo, "")
ws_bar = Midgard.Workspace(name = "Bar")
self.manager.create_workspace(ws_bar, "/Foo")
ws_baz = Midgard.Workspace(name = "Baz")
self.manager.create_workspace(ws_baz, "/Foo")
def testAListWorkspacesNames(self):
self.createWorkspaces()
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
names = ws.list_workspace_names()
self.assertEqual(len(names), 2)
self.assertIn("Bar", names)
self.assertIn("Baz", names)
def testListChildren(self):
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
children = ws.list_children()
names = {"Bar", "Baz"}
self.assertEqual(len(children), 2)
self.assertIn(children[0].get_property("name"), names)
self.assertIn(children[1].get_property("name"), names)
def testGetPath(self):
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo")
self.assertEqual(ws.get_path(), "/Foo");
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo/Bar")
self.assertEqual(ws.get_path(), "/Foo/Bar");
ws = Midgard.Workspace()
self.manager.get_workspace_by_path(ws, "/Foo/Baz")
self.assertEqual(ws.get_path(), "/Foo/Baz");
def testInheritance(self):
ws = Midgard.Workspace()
self.assertIsInstance(ws, GObject.GObject)
self.assertIsInstance(ws, Midgard.Workspace)
self.assertIsInstance(ws, Midgard.WorkspaceStorage)
if __name__ == "__main__":
unittest.main()
|
|
b70fe116d3652202ea9ccee63f255f8b39cd0ebf
|
src/commands/fedex.py
|
src/commands/fedex.py
|
import urllib
import webbrowser
import qmk
class FedexCommand(qmk.Command):
'''Use the Fedex website to view the tracking info for the given
tracking number.'''
def __init__(self):
self._name = 'fedex'
self._help = self.__doc__
self.__baseURL = 'http://www.fedex.com/Tracking' \
'?clienttype=dotcom&ascend_header=1&cntry_code=us' \
'&language=english&mi=n&tracknumbers=%s'
@qmk.Command.actionRequiresArgument
def action(self, arg):
query = self.__baseURL % urllib.quote_plus(
arg.split()[0].encode('utf-8'))
webbrowser.open_new_tab(query)
def commands(): return [ FedexCommand() ]
|
Add Fedex command for tracking packages.
|
Add Fedex command for tracking packages.
|
Python
|
mit
|
kivhift/qmk,kivhift/qmk
|
Add Fedex command for tracking packages.
|
import urllib
import webbrowser
import qmk
class FedexCommand(qmk.Command):
'''Use the Fedex website to view the tracking info for the given
tracking number.'''
def __init__(self):
self._name = 'fedex'
self._help = self.__doc__
self.__baseURL = 'http://www.fedex.com/Tracking' \
'?clienttype=dotcom&ascend_header=1&cntry_code=us' \
'&language=english&mi=n&tracknumbers=%s'
@qmk.Command.actionRequiresArgument
def action(self, arg):
query = self.__baseURL % urllib.quote_plus(
arg.split()[0].encode('utf-8'))
webbrowser.open_new_tab(query)
def commands(): return [ FedexCommand() ]
|
<commit_before><commit_msg>Add Fedex command for tracking packages.<commit_after>
|
import urllib
import webbrowser
import qmk
class FedexCommand(qmk.Command):
'''Use the Fedex website to view the tracking info for the given
tracking number.'''
def __init__(self):
self._name = 'fedex'
self._help = self.__doc__
self.__baseURL = 'http://www.fedex.com/Tracking' \
'?clienttype=dotcom&ascend_header=1&cntry_code=us' \
'&language=english&mi=n&tracknumbers=%s'
@qmk.Command.actionRequiresArgument
def action(self, arg):
query = self.__baseURL % urllib.quote_plus(
arg.split()[0].encode('utf-8'))
webbrowser.open_new_tab(query)
def commands(): return [ FedexCommand() ]
|
Add Fedex command for tracking packages.import urllib
import webbrowser
import qmk
class FedexCommand(qmk.Command):
'''Use the Fedex website to view the tracking info for the given
tracking number.'''
def __init__(self):
self._name = 'fedex'
self._help = self.__doc__
self.__baseURL = 'http://www.fedex.com/Tracking' \
'?clienttype=dotcom&ascend_header=1&cntry_code=us' \
'&language=english&mi=n&tracknumbers=%s'
@qmk.Command.actionRequiresArgument
def action(self, arg):
query = self.__baseURL % urllib.quote_plus(
arg.split()[0].encode('utf-8'))
webbrowser.open_new_tab(query)
def commands(): return [ FedexCommand() ]
|
<commit_before><commit_msg>Add Fedex command for tracking packages.<commit_after>import urllib
import webbrowser
import qmk
class FedexCommand(qmk.Command):
'''Use the Fedex website to view the tracking info for the given
tracking number.'''
def __init__(self):
self._name = 'fedex'
self._help = self.__doc__
self.__baseURL = 'http://www.fedex.com/Tracking' \
'?clienttype=dotcom&ascend_header=1&cntry_code=us' \
'&language=english&mi=n&tracknumbers=%s'
@qmk.Command.actionRequiresArgument
def action(self, arg):
query = self.__baseURL % urllib.quote_plus(
arg.split()[0].encode('utf-8'))
webbrowser.open_new_tab(query)
def commands(): return [ FedexCommand() ]
|
|
f3e0f0340b146d7583ae4165549837497ea7a7b0
|
scripts/get_bank_registry_cz.py
|
scripts/get_bank_registry_cz.py
|
import json
import csv
import requests
URL = "https://www.cnb.cz/cs/platebni-styk/.galleries/ucty_kody_bank/download/kody_bank_CR.csv"
registry = []
def process():
with requests.get(URL, stream=True) as csvfile:
count = 0
for row in csvfile.iter_lines():
if count != 0:
bank_code, name, bic = row.decode("latin1").split(";")[0:3]
registry.append(
{
"country_code": "CZ",
"primary": True,
"bic": bic.upper(),
"bank_code": bank_code,
"name": name,
"short_name": name,
}
)
else:
count = 1
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_cz.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
Add script to generate CZ bank bic codes registry
|
Add script to generate CZ bank bic codes registry
|
Python
|
mit
|
figo-connect/schwifty
|
Add script to generate CZ bank bic codes registry
|
import json
import csv
import requests
URL = "https://www.cnb.cz/cs/platebni-styk/.galleries/ucty_kody_bank/download/kody_bank_CR.csv"
registry = []
def process():
with requests.get(URL, stream=True) as csvfile:
count = 0
for row in csvfile.iter_lines():
if count != 0:
bank_code, name, bic = row.decode("latin1").split(";")[0:3]
registry.append(
{
"country_code": "CZ",
"primary": True,
"bic": bic.upper(),
"bank_code": bank_code,
"name": name,
"short_name": name,
}
)
else:
count = 1
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_cz.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
<commit_before><commit_msg>Add script to generate CZ bank bic codes registry<commit_after>
|
import json
import csv
import requests
URL = "https://www.cnb.cz/cs/platebni-styk/.galleries/ucty_kody_bank/download/kody_bank_CR.csv"
registry = []
def process():
with requests.get(URL, stream=True) as csvfile:
count = 0
for row in csvfile.iter_lines():
if count != 0:
bank_code, name, bic = row.decode("latin1").split(";")[0:3]
registry.append(
{
"country_code": "CZ",
"primary": True,
"bic": bic.upper(),
"bank_code": bank_code,
"name": name,
"short_name": name,
}
)
else:
count = 1
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_cz.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
Add script to generate CZ bank bic codes registryimport json
import csv
import requests
URL = "https://www.cnb.cz/cs/platebni-styk/.galleries/ucty_kody_bank/download/kody_bank_CR.csv"
registry = []
def process():
with requests.get(URL, stream=True) as csvfile:
count = 0
for row in csvfile.iter_lines():
if count != 0:
bank_code, name, bic = row.decode("latin1").split(";")[0:3]
registry.append(
{
"country_code": "CZ",
"primary": True,
"bic": bic.upper(),
"bank_code": bank_code,
"name": name,
"short_name": name,
}
)
else:
count = 1
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_cz.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
<commit_before><commit_msg>Add script to generate CZ bank bic codes registry<commit_after>import json
import csv
import requests
URL = "https://www.cnb.cz/cs/platebni-styk/.galleries/ucty_kody_bank/download/kody_bank_CR.csv"
registry = []
def process():
with requests.get(URL, stream=True) as csvfile:
count = 0
for row in csvfile.iter_lines():
if count != 0:
bank_code, name, bic = row.decode("latin1").split(";")[0:3]
registry.append(
{
"country_code": "CZ",
"primary": True,
"bic": bic.upper(),
"bank_code": bank_code,
"name": name,
"short_name": name,
}
)
else:
count = 1
return registry
if __name__ == "__main__":
with open("schwifty/bank_registry/generated_cz.json", "w") as fp:
json.dump(process(), fp, indent=2)
|
|
975aa908d01129129c3e103643648aaab994f426
|
tests/unit/test_question_answer.py
|
tests/unit/test_question_answer.py
|
import os
from tests import PMGTestCase
from tests.fixtures import (
dbfixture, CommitteeQuestionData
)
class TestQuestionAnswer(PMGTestCase):
def setUp(self):
super(TestQuestionAnswer, self).setUp()
self.fx = dbfixture.data(
CommitteeQuestionData,
)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestQuestionAnswer, self).tearDown()
def test_get_minister_questions_combined(self):
response = self.client.get(
"minister-questions-combined/",
base_url="http://api.pmg.test:5000/"
)
results = response.json["results"]
self.assertEqual(2, len(results))
questions = [result['question'] for result in results]
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions)
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions)
def test_get_minister_questions_combined_filter_by_year(self):
response = self.client.get(
"minister-questions-combined/?filter[year]=2018",
base_url="http://api.pmg.test:5000"
)
results = response.json["results"]
self.assertEqual(1, len(results))
questions = [result['question'] for result in results]
self.assertNotIn(self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions)
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions)
|
Add unit test for minister_questions_combined api endpoint
|
Add unit test for minister_questions_combined api endpoint
|
Python
|
apache-2.0
|
Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2
|
Add unit test for minister_questions_combined api endpoint
|
import os
from tests import PMGTestCase
from tests.fixtures import (
dbfixture, CommitteeQuestionData
)
class TestQuestionAnswer(PMGTestCase):
def setUp(self):
super(TestQuestionAnswer, self).setUp()
self.fx = dbfixture.data(
CommitteeQuestionData,
)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestQuestionAnswer, self).tearDown()
def test_get_minister_questions_combined(self):
response = self.client.get(
"minister-questions-combined/",
base_url="http://api.pmg.test:5000/"
)
results = response.json["results"]
self.assertEqual(2, len(results))
questions = [result['question'] for result in results]
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions)
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions)
def test_get_minister_questions_combined_filter_by_year(self):
response = self.client.get(
"minister-questions-combined/?filter[year]=2018",
base_url="http://api.pmg.test:5000"
)
results = response.json["results"]
self.assertEqual(1, len(results))
questions = [result['question'] for result in results]
self.assertNotIn(self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions)
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions)
|
<commit_before><commit_msg>Add unit test for minister_questions_combined api endpoint<commit_after>
|
import os
from tests import PMGTestCase
from tests.fixtures import (
dbfixture, CommitteeQuestionData
)
class TestQuestionAnswer(PMGTestCase):
def setUp(self):
super(TestQuestionAnswer, self).setUp()
self.fx = dbfixture.data(
CommitteeQuestionData,
)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestQuestionAnswer, self).tearDown()
def test_get_minister_questions_combined(self):
response = self.client.get(
"minister-questions-combined/",
base_url="http://api.pmg.test:5000/"
)
results = response.json["results"]
self.assertEqual(2, len(results))
questions = [result['question'] for result in results]
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions)
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions)
def test_get_minister_questions_combined_filter_by_year(self):
response = self.client.get(
"minister-questions-combined/?filter[year]=2018",
base_url="http://api.pmg.test:5000"
)
results = response.json["results"]
self.assertEqual(1, len(results))
questions = [result['question'] for result in results]
self.assertNotIn(self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions)
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions)
|
Add unit test for minister_questions_combined api endpointimport os
from tests import PMGTestCase
from tests.fixtures import (
dbfixture, CommitteeQuestionData
)
class TestQuestionAnswer(PMGTestCase):
def setUp(self):
super(TestQuestionAnswer, self).setUp()
self.fx = dbfixture.data(
CommitteeQuestionData,
)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestQuestionAnswer, self).tearDown()
def test_get_minister_questions_combined(self):
response = self.client.get(
"minister-questions-combined/",
base_url="http://api.pmg.test:5000/"
)
results = response.json["results"]
self.assertEqual(2, len(results))
questions = [result['question'] for result in results]
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions)
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions)
def test_get_minister_questions_combined_filter_by_year(self):
response = self.client.get(
"minister-questions-combined/?filter[year]=2018",
base_url="http://api.pmg.test:5000"
)
results = response.json["results"]
self.assertEqual(1, len(results))
questions = [result['question'] for result in results]
self.assertNotIn(self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions)
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions)
|
<commit_before><commit_msg>Add unit test for minister_questions_combined api endpoint<commit_after>import os
from tests import PMGTestCase
from tests.fixtures import (
dbfixture, CommitteeQuestionData
)
class TestQuestionAnswer(PMGTestCase):
def setUp(self):
super(TestQuestionAnswer, self).setUp()
self.fx = dbfixture.data(
CommitteeQuestionData,
)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestQuestionAnswer, self).tearDown()
def test_get_minister_questions_combined(self):
response = self.client.get(
"minister-questions-combined/",
base_url="http://api.pmg.test:5000/"
)
results = response.json["results"]
self.assertEqual(2, len(results))
questions = [result['question'] for result in results]
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions)
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions)
def test_get_minister_questions_combined_filter_by_year(self):
response = self.client.get(
"minister-questions-combined/?filter[year]=2018",
base_url="http://api.pmg.test:5000"
)
results = response.json["results"]
self.assertEqual(1, len(results))
questions = [result['question'] for result in results]
self.assertNotIn(self.fx.CommitteeQuestionData.arts_committee_question_one.question,
questions)
self.assertIn(self.fx.CommitteeQuestionData.arts_committee_question_two.question,
questions)
|
|
2d8f221d55b0c04280a1b44ef72588882946fdea
|
takeoutStrings.py
|
takeoutStrings.py
|
#!/usr/bin/env python
# coding=utf-8
'''
This script will help you take out all strings.xml(Placed in a language-region folder) from an Android projectDir
We now use it to provide the strings and upload them to Crowdin.
'''
from os import system,listdir,path
from sys import argv
def takeoutStrings(resPath):
for item in listdir(resPath):
itemPath = resPath + '/' + item
print item
if (not 'values' in item) :
rmUnused = 'rm -rf %s'%(itemPath)
print rmUnused
system(rmUnused)
continue
if ('large' in item or 'dpi' in item):
rmLayout = 'rm -rf %s'%(itemPath)
print rmLayout
system(rmLayout)
continue
if (not path.isdir(itemPath)):
rmFile = 'rm -rf %s'%(itemPath)
print rmFile
system(rmFile)
continue
for subItem in listdir(itemPath):
subItemPath = itemPath + '/' + subItem
if (not 'strings.xml' == subItem):
rmNotStrings = 'rm -rf %s'%(subItemPath)
print rmNotStrings
system(rmNotStrings)
if (len(listdir(itemPath)) == 0):
removeEmptyFolder = 'rm -rf %s'%(itemPath)
print 'Removing empty folder %s'%(itemPath)
system(removeEmptyFolder)
def main():
'''
Usage: takeoutStrings.py projectPath
'''
if (len(argv) < 2) :
print main.__doc__
return
projectDir = argv[1]
projResDir = projectDir + '/res/'
destBaseDir = '/tmp/'
destDir = '/tmp/res/'
cpRes = 'cp %s %s -rf '%(projResDir, destBaseDir)
print cpRes
system(cpRes)
takeoutStrings(destDir)
main()
|
Add script for Taking out of strings.xml files
|
Add script for Taking out of strings.xml files
|
Python
|
apache-2.0
|
androidyue/DroidPy
|
Add script for Taking out of strings.xml files
|
#!/usr/bin/env python
# coding=utf-8
'''
This script will help you take out all strings.xml(Placed in a language-region folder) from an Android projectDir
We now use it to provide the strings and upload them to Crowdin.
'''
from os import system,listdir,path
from sys import argv
def takeoutStrings(resPath):
for item in listdir(resPath):
itemPath = resPath + '/' + item
print item
if (not 'values' in item) :
rmUnused = 'rm -rf %s'%(itemPath)
print rmUnused
system(rmUnused)
continue
if ('large' in item or 'dpi' in item):
rmLayout = 'rm -rf %s'%(itemPath)
print rmLayout
system(rmLayout)
continue
if (not path.isdir(itemPath)):
rmFile = 'rm -rf %s'%(itemPath)
print rmFile
system(rmFile)
continue
for subItem in listdir(itemPath):
subItemPath = itemPath + '/' + subItem
if (not 'strings.xml' == subItem):
rmNotStrings = 'rm -rf %s'%(subItemPath)
print rmNotStrings
system(rmNotStrings)
if (len(listdir(itemPath)) == 0):
removeEmptyFolder = 'rm -rf %s'%(itemPath)
print 'Removing empty folder %s'%(itemPath)
system(removeEmptyFolder)
def main():
'''
Usage: takeoutStrings.py projectPath
'''
if (len(argv) < 2) :
print main.__doc__
return
projectDir = argv[1]
projResDir = projectDir + '/res/'
destBaseDir = '/tmp/'
destDir = '/tmp/res/'
cpRes = 'cp %s %s -rf '%(projResDir, destBaseDir)
print cpRes
system(cpRes)
takeoutStrings(destDir)
main()
|
<commit_before><commit_msg>Add script for Taking out of strings.xml files<commit_after>
|
#!/usr/bin/env python
# coding=utf-8
'''
This script will help you take out all strings.xml(Placed in a language-region folder) from an Android projectDir
We now use it to provide the strings and upload them to Crowdin.
'''
from os import system,listdir,path
from sys import argv
def takeoutStrings(resPath):
for item in listdir(resPath):
itemPath = resPath + '/' + item
print item
if (not 'values' in item) :
rmUnused = 'rm -rf %s'%(itemPath)
print rmUnused
system(rmUnused)
continue
if ('large' in item or 'dpi' in item):
rmLayout = 'rm -rf %s'%(itemPath)
print rmLayout
system(rmLayout)
continue
if (not path.isdir(itemPath)):
rmFile = 'rm -rf %s'%(itemPath)
print rmFile
system(rmFile)
continue
for subItem in listdir(itemPath):
subItemPath = itemPath + '/' + subItem
if (not 'strings.xml' == subItem):
rmNotStrings = 'rm -rf %s'%(subItemPath)
print rmNotStrings
system(rmNotStrings)
if (len(listdir(itemPath)) == 0):
removeEmptyFolder = 'rm -rf %s'%(itemPath)
print 'Removing empty folder %s'%(itemPath)
system(removeEmptyFolder)
def main():
'''
Usage: takeoutStrings.py projectPath
'''
if (len(argv) < 2) :
print main.__doc__
return
projectDir = argv[1]
projResDir = projectDir + '/res/'
destBaseDir = '/tmp/'
destDir = '/tmp/res/'
cpRes = 'cp %s %s -rf '%(projResDir, destBaseDir)
print cpRes
system(cpRes)
takeoutStrings(destDir)
main()
|
Add script for Taking out of strings.xml files#!/usr/bin/env python
# coding=utf-8
'''
This script will help you take out all strings.xml(Placed in a language-region folder) from an Android projectDir
We now use it to provide the strings and upload them to Crowdin.
'''
from os import system,listdir,path
from sys import argv
def takeoutStrings(resPath):
for item in listdir(resPath):
itemPath = resPath + '/' + item
print item
if (not 'values' in item) :
rmUnused = 'rm -rf %s'%(itemPath)
print rmUnused
system(rmUnused)
continue
if ('large' in item or 'dpi' in item):
rmLayout = 'rm -rf %s'%(itemPath)
print rmLayout
system(rmLayout)
continue
if (not path.isdir(itemPath)):
rmFile = 'rm -rf %s'%(itemPath)
print rmFile
system(rmFile)
continue
for subItem in listdir(itemPath):
subItemPath = itemPath + '/' + subItem
if (not 'strings.xml' == subItem):
rmNotStrings = 'rm -rf %s'%(subItemPath)
print rmNotStrings
system(rmNotStrings)
if (len(listdir(itemPath)) == 0):
removeEmptyFolder = 'rm -rf %s'%(itemPath)
print 'Removing empty folder %s'%(itemPath)
system(removeEmptyFolder)
def main():
'''
Usage: takeoutStrings.py projectPath
'''
if (len(argv) < 2) :
print main.__doc__
return
projectDir = argv[1]
projResDir = projectDir + '/res/'
destBaseDir = '/tmp/'
destDir = '/tmp/res/'
cpRes = 'cp %s %s -rf '%(projResDir, destBaseDir)
print cpRes
system(cpRes)
takeoutStrings(destDir)
main()
|
<commit_before><commit_msg>Add script for Taking out of strings.xml files<commit_after>#!/usr/bin/env python
# coding=utf-8
'''
This script will help you take out all strings.xml(Placed in a language-region folder) from an Android projectDir
We now use it to provide the strings and upload them to Crowdin.
'''
from os import system,listdir,path
from sys import argv
def takeoutStrings(resPath):
for item in listdir(resPath):
itemPath = resPath + '/' + item
print item
if (not 'values' in item) :
rmUnused = 'rm -rf %s'%(itemPath)
print rmUnused
system(rmUnused)
continue
if ('large' in item or 'dpi' in item):
rmLayout = 'rm -rf %s'%(itemPath)
print rmLayout
system(rmLayout)
continue
if (not path.isdir(itemPath)):
rmFile = 'rm -rf %s'%(itemPath)
print rmFile
system(rmFile)
continue
for subItem in listdir(itemPath):
subItemPath = itemPath + '/' + subItem
if (not 'strings.xml' == subItem):
rmNotStrings = 'rm -rf %s'%(subItemPath)
print rmNotStrings
system(rmNotStrings)
if (len(listdir(itemPath)) == 0):
removeEmptyFolder = 'rm -rf %s'%(itemPath)
print 'Removing empty folder %s'%(itemPath)
system(removeEmptyFolder)
def main():
'''
Usage: takeoutStrings.py projectPath
'''
if (len(argv) < 2) :
print main.__doc__
return
projectDir = argv[1]
projResDir = projectDir + '/res/'
destBaseDir = '/tmp/'
destDir = '/tmp/res/'
cpRes = 'cp %s %s -rf '%(projResDir, destBaseDir)
print cpRes
system(cpRes)
takeoutStrings(destDir)
main()
|
|
824b616123ca8378e3540409a522708997368ea1
|
crandom.py
|
crandom.py
|
from __future__ import print_function, division
import numpy as np
def randbool(n, rng=None):
if rng is None:
rng = np.random
return rng.randint(2, size=n) * 2 - 1
|
Add random number generation utility module.
|
Add random number generation utility module.
|
Python
|
bsd-3-clause
|
eddiejessup/ciabatta
|
Add random number generation utility module.
|
from __future__ import print_function, division
import numpy as np
def randbool(n, rng=None):
if rng is None:
rng = np.random
return rng.randint(2, size=n) * 2 - 1
|
<commit_before><commit_msg>Add random number generation utility module.<commit_after>
|
from __future__ import print_function, division
import numpy as np
def randbool(n, rng=None):
if rng is None:
rng = np.random
return rng.randint(2, size=n) * 2 - 1
|
Add random number generation utility module.from __future__ import print_function, division
import numpy as np
def randbool(n, rng=None):
if rng is None:
rng = np.random
return rng.randint(2, size=n) * 2 - 1
|
<commit_before><commit_msg>Add random number generation utility module.<commit_after>from __future__ import print_function, division
import numpy as np
def randbool(n, rng=None):
if rng is None:
rng = np.random
return rng.randint(2, size=n) * 2 - 1
|
|
634832b6ed70adfc3a54f6ca5a72e6b396f60d90
|
symsynd/exceptions.py
|
symsynd/exceptions.py
|
class SymbolicationError(Exception):
message = None
def __init__(self, message):
if isinstance(message, bytes):
message = message.decode('utf-8', 'replace')
Exception.__init__(self, message)
self.message = message
def __str__(self):
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
|
Move exception into common module and add unicode support
|
Move exception into common module and add unicode support
|
Python
|
bsd-3-clause
|
getsentry/symsynd,getsentry/symsynd,getsentry/symsynd,getsentry/symsynd,getsentry/symsynd
|
Move exception into common module and add unicode support
|
class SymbolicationError(Exception):
message = None
def __init__(self, message):
if isinstance(message, bytes):
message = message.decode('utf-8', 'replace')
Exception.__init__(self, message)
self.message = message
def __str__(self):
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
|
<commit_before><commit_msg>Move exception into common module and add unicode support<commit_after>
|
class SymbolicationError(Exception):
message = None
def __init__(self, message):
if isinstance(message, bytes):
message = message.decode('utf-8', 'replace')
Exception.__init__(self, message)
self.message = message
def __str__(self):
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
|
Move exception into common module and add unicode supportclass SymbolicationError(Exception):
message = None
def __init__(self, message):
if isinstance(message, bytes):
message = message.decode('utf-8', 'replace')
Exception.__init__(self, message)
self.message = message
def __str__(self):
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
|
<commit_before><commit_msg>Move exception into common module and add unicode support<commit_after>class SymbolicationError(Exception):
message = None
def __init__(self, message):
if isinstance(message, bytes):
message = message.decode('utf-8', 'replace')
Exception.__init__(self, message)
self.message = message
def __str__(self):
return self.message.encode('utf-8')
def __unicode__(self):
return self.message
|
|
705b80fba3b165e35fa6d10656e3b888092aa86c
|
test/autopep8_vim.py
|
test/autopep8_vim.py
|
"""Run autopep8 on the selected buffer in Vim.
map <C-I> :pyf <path_to>/autopep8_vim.py<CR>
"""
import autopep8
import vim
# TODO: Find out how to get the actual encoding from Vim.
encoding = 'utf-8'
options = autopep8.parse_args(['--range',
str(vim.current.range.start),
str(vim.current.range.end),
''])[0]
source = '\n'.join(vim.current.buffer).decode(encoding) + '\n'
formatted = autopep8.fix_string(source, options=options)
if source != formatted:
vim.current.buffer[:] = [line.encode(encoding)
for line in formatted.splitlines()]
|
Add example of how to use autopep8 in Vim
|
Add example of how to use autopep8 in Vim
This example supports selecting a specific line range to format.
|
Python
|
mit
|
MeteorAdminz/autopep8,hhatto/autopep8,Vauxoo/autopep8,Vauxoo/autopep8,vauxoo-dev/autopep8,MeteorAdminz/autopep8,hhatto/autopep8,vauxoo-dev/autopep8,SG345/autopep8,SG345/autopep8
|
Add example of how to use autopep8 in Vim
This example supports selecting a specific line range to format.
|
"""Run autopep8 on the selected buffer in Vim.
map <C-I> :pyf <path_to>/autopep8_vim.py<CR>
"""
import autopep8
import vim
# TODO: Find out how to get the actual encoding from Vim.
encoding = 'utf-8'
options = autopep8.parse_args(['--range',
str(vim.current.range.start),
str(vim.current.range.end),
''])[0]
source = '\n'.join(vim.current.buffer).decode(encoding) + '\n'
formatted = autopep8.fix_string(source, options=options)
if source != formatted:
vim.current.buffer[:] = [line.encode(encoding)
for line in formatted.splitlines()]
|
<commit_before><commit_msg>Add example of how to use autopep8 in Vim
This example supports selecting a specific line range to format.<commit_after>
|
"""Run autopep8 on the selected buffer in Vim.
map <C-I> :pyf <path_to>/autopep8_vim.py<CR>
"""
import autopep8
import vim
# TODO: Find out how to get the actual encoding from Vim.
encoding = 'utf-8'
options = autopep8.parse_args(['--range',
str(vim.current.range.start),
str(vim.current.range.end),
''])[0]
source = '\n'.join(vim.current.buffer).decode(encoding) + '\n'
formatted = autopep8.fix_string(source, options=options)
if source != formatted:
vim.current.buffer[:] = [line.encode(encoding)
for line in formatted.splitlines()]
|
Add example of how to use autopep8 in Vim
This example supports selecting a specific line range to format."""Run autopep8 on the selected buffer in Vim.
map <C-I> :pyf <path_to>/autopep8_vim.py<CR>
"""
import autopep8
import vim
# TODO: Find out how to get the actual encoding from Vim.
encoding = 'utf-8'
options = autopep8.parse_args(['--range',
str(vim.current.range.start),
str(vim.current.range.end),
''])[0]
source = '\n'.join(vim.current.buffer).decode(encoding) + '\n'
formatted = autopep8.fix_string(source, options=options)
if source != formatted:
vim.current.buffer[:] = [line.encode(encoding)
for line in formatted.splitlines()]
|
<commit_before><commit_msg>Add example of how to use autopep8 in Vim
This example supports selecting a specific line range to format.<commit_after>"""Run autopep8 on the selected buffer in Vim.
map <C-I> :pyf <path_to>/autopep8_vim.py<CR>
"""
import autopep8
import vim
# TODO: Find out how to get the actual encoding from Vim.
encoding = 'utf-8'
options = autopep8.parse_args(['--range',
str(vim.current.range.start),
str(vim.current.range.end),
''])[0]
source = '\n'.join(vim.current.buffer).decode(encoding) + '\n'
formatted = autopep8.fix_string(source, options=options)
if source != formatted:
vim.current.buffer[:] = [line.encode(encoding)
for line in formatted.splitlines()]
|
|
a64c04bcdd39d3c4f734e790a8a1f1f072d31fac
|
example/tests/test_settings.py
|
example/tests/test_settings.py
|
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from . import factory
class AdminTestCase(WebTest):
def setUp(self):
super(AdminTestCase, self).setUp()
self.user = factory.create_superuser()
self.image = factory.create_cropped_image()
def test_jquery_included(self):
edit_view = reverse('admin:example_image_change', args=[self.image.pk])
response = self.app.get(edit_view, user=self.user)
self.assertTrue('src="/static/js/jquery.min.js">' in response.content)
@override_settings(IMAGE_CROPPING_JQUERY_URL=None)
def test_jquery_not_included(self):
edit_view = reverse('admin:example_image_change', args=[self.image.pk])
response = self.app.get(edit_view, user=self.user)
self.assertFalse('src="/static/js/jquery.min.js">' in response.content)
|
Test disabling inclusion of jQuery
|
Test disabling inclusion of jQuery
|
Python
|
bsd-3-clause
|
winzard/django-image-cropping,henriquechehad/django-image-cropping,winzard/django-image-cropping,henriquechehad/django-image-cropping,winzard/django-image-cropping,henriquechehad/django-image-cropping
|
Test disabling inclusion of jQuery
|
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from . import factory
class AdminTestCase(WebTest):
def setUp(self):
super(AdminTestCase, self).setUp()
self.user = factory.create_superuser()
self.image = factory.create_cropped_image()
def test_jquery_included(self):
edit_view = reverse('admin:example_image_change', args=[self.image.pk])
response = self.app.get(edit_view, user=self.user)
self.assertTrue('src="/static/js/jquery.min.js">' in response.content)
@override_settings(IMAGE_CROPPING_JQUERY_URL=None)
def test_jquery_not_included(self):
edit_view = reverse('admin:example_image_change', args=[self.image.pk])
response = self.app.get(edit_view, user=self.user)
self.assertFalse('src="/static/js/jquery.min.js">' in response.content)
|
<commit_before><commit_msg>Test disabling inclusion of jQuery<commit_after>
|
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from . import factory
class AdminTestCase(WebTest):
def setUp(self):
super(AdminTestCase, self).setUp()
self.user = factory.create_superuser()
self.image = factory.create_cropped_image()
def test_jquery_included(self):
edit_view = reverse('admin:example_image_change', args=[self.image.pk])
response = self.app.get(edit_view, user=self.user)
self.assertTrue('src="/static/js/jquery.min.js">' in response.content)
@override_settings(IMAGE_CROPPING_JQUERY_URL=None)
def test_jquery_not_included(self):
edit_view = reverse('admin:example_image_change', args=[self.image.pk])
response = self.app.get(edit_view, user=self.user)
self.assertFalse('src="/static/js/jquery.min.js">' in response.content)
|
Test disabling inclusion of jQueryfrom django_webtest import WebTest
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from . import factory
class AdminTestCase(WebTest):
def setUp(self):
super(AdminTestCase, self).setUp()
self.user = factory.create_superuser()
self.image = factory.create_cropped_image()
def test_jquery_included(self):
edit_view = reverse('admin:example_image_change', args=[self.image.pk])
response = self.app.get(edit_view, user=self.user)
self.assertTrue('src="/static/js/jquery.min.js">' in response.content)
@override_settings(IMAGE_CROPPING_JQUERY_URL=None)
def test_jquery_not_included(self):
edit_view = reverse('admin:example_image_change', args=[self.image.pk])
response = self.app.get(edit_view, user=self.user)
self.assertFalse('src="/static/js/jquery.min.js">' in response.content)
|
<commit_before><commit_msg>Test disabling inclusion of jQuery<commit_after>from django_webtest import WebTest
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from . import factory
class AdminTestCase(WebTest):
def setUp(self):
super(AdminTestCase, self).setUp()
self.user = factory.create_superuser()
self.image = factory.create_cropped_image()
def test_jquery_included(self):
edit_view = reverse('admin:example_image_change', args=[self.image.pk])
response = self.app.get(edit_view, user=self.user)
self.assertTrue('src="/static/js/jquery.min.js">' in response.content)
@override_settings(IMAGE_CROPPING_JQUERY_URL=None)
def test_jquery_not_included(self):
edit_view = reverse('admin:example_image_change', args=[self.image.pk])
response = self.app.get(edit_view, user=self.user)
self.assertFalse('src="/static/js/jquery.min.js">' in response.content)
|
|
76139dd3bfab10256c05aed7998f3c1d79265ced
|
fireplace/cards/tgt/paladin.py
|
fireplace/cards/tgt/paladin.py
|
from ..utils import *
##
# Minions
# Murloc Knight
class AT_076:
inspire = Summon(CONTROLLER, RandomMurloc())
# Eadric the Pure
class AT_081:
play = Buff(ALL_MINIONS, "AT_081e")
##
# Spells
# Seal of Champions
class AT_074:
play = Buff(TARGET, "AT_074e2")
##
# Secrets
# Competitive Spirit
class AT_073:
events = OWN_TURN_BEGIN.on(
Buff(FRIENDLY_MINIONS, "AT_073e"), Reveal(SELF)
)
|
Implement Paladin cards for The Grand Tournament
|
Implement Paladin cards for The Grand Tournament
|
Python
|
agpl-3.0
|
Ragowit/fireplace,oftc-ftw/fireplace,smallnamespace/fireplace,Meerkov/fireplace,Ragowit/fireplace,liujimj/fireplace,smallnamespace/fireplace,amw2104/fireplace,beheh/fireplace,jleclanche/fireplace,liujimj/fireplace,NightKev/fireplace,Meerkov/fireplace,oftc-ftw/fireplace,amw2104/fireplace
|
Implement Paladin cards for The Grand Tournament
|
from ..utils import *
##
# Minions
# Murloc Knight
class AT_076:
inspire = Summon(CONTROLLER, RandomMurloc())
# Eadric the Pure
class AT_081:
play = Buff(ALL_MINIONS, "AT_081e")
##
# Spells
# Seal of Champions
class AT_074:
play = Buff(TARGET, "AT_074e2")
##
# Secrets
# Competitive Spirit
class AT_073:
events = OWN_TURN_BEGIN.on(
Buff(FRIENDLY_MINIONS, "AT_073e"), Reveal(SELF)
)
|
<commit_before><commit_msg>Implement Paladin cards for The Grand Tournament<commit_after>
|
from ..utils import *
##
# Minions
# Murloc Knight
class AT_076:
inspire = Summon(CONTROLLER, RandomMurloc())
# Eadric the Pure
class AT_081:
play = Buff(ALL_MINIONS, "AT_081e")
##
# Spells
# Seal of Champions
class AT_074:
play = Buff(TARGET, "AT_074e2")
##
# Secrets
# Competitive Spirit
class AT_073:
events = OWN_TURN_BEGIN.on(
Buff(FRIENDLY_MINIONS, "AT_073e"), Reveal(SELF)
)
|
Implement Paladin cards for The Grand Tournamentfrom ..utils import *
##
# Minions
# Murloc Knight
class AT_076:
inspire = Summon(CONTROLLER, RandomMurloc())
# Eadric the Pure
class AT_081:
play = Buff(ALL_MINIONS, "AT_081e")
##
# Spells
# Seal of Champions
class AT_074:
play = Buff(TARGET, "AT_074e2")
##
# Secrets
# Competitive Spirit
class AT_073:
events = OWN_TURN_BEGIN.on(
Buff(FRIENDLY_MINIONS, "AT_073e"), Reveal(SELF)
)
|
<commit_before><commit_msg>Implement Paladin cards for The Grand Tournament<commit_after>from ..utils import *
##
# Minions
# Murloc Knight
class AT_076:
inspire = Summon(CONTROLLER, RandomMurloc())
# Eadric the Pure
class AT_081:
play = Buff(ALL_MINIONS, "AT_081e")
##
# Spells
# Seal of Champions
class AT_074:
play = Buff(TARGET, "AT_074e2")
##
# Secrets
# Competitive Spirit
class AT_073:
events = OWN_TURN_BEGIN.on(
Buff(FRIENDLY_MINIONS, "AT_073e"), Reveal(SELF)
)
|
|
71a29223077e6d447238789191446a0c5cc4cdd8
|
scan.py
|
scan.py
|
#!/usr/bin/env python
# encoding: utf-8
from wand.image import Image
def rotate_image(filename, degrees):
"""Rotates filename degrees degrees."""
with Image(filename=filename) as image:
with image.clone() as rotated:
rotated.rotate(degrees)
rotated.save(filename=filename)
def main():
"""TODO: docstring"""
rotate_image('test.pdf', 180)
if __name__ == '__main__':
main()
|
Add rotate_image() that rotates image a given amount of degrees
|
Add rotate_image() that rotates image a given amount of degrees
|
Python
|
bsd-2-clause
|
sjktje/sjkscan,sjktje/sjkscan
|
Add rotate_image() that rotates image a given amount of degrees
|
#!/usr/bin/env python
# encoding: utf-8
from wand.image import Image
def rotate_image(filename, degrees):
"""Rotates filename degrees degrees."""
with Image(filename=filename) as image:
with image.clone() as rotated:
rotated.rotate(degrees)
rotated.save(filename=filename)
def main():
"""TODO: docstring"""
rotate_image('test.pdf', 180)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add rotate_image() that rotates image a given amount of degrees<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
from wand.image import Image
def rotate_image(filename, degrees):
"""Rotates filename degrees degrees."""
with Image(filename=filename) as image:
with image.clone() as rotated:
rotated.rotate(degrees)
rotated.save(filename=filename)
def main():
"""TODO: docstring"""
rotate_image('test.pdf', 180)
if __name__ == '__main__':
main()
|
Add rotate_image() that rotates image a given amount of degrees#!/usr/bin/env python
# encoding: utf-8
from wand.image import Image
def rotate_image(filename, degrees):
"""Rotates filename degrees degrees."""
with Image(filename=filename) as image:
with image.clone() as rotated:
rotated.rotate(degrees)
rotated.save(filename=filename)
def main():
"""TODO: docstring"""
rotate_image('test.pdf', 180)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add rotate_image() that rotates image a given amount of degrees<commit_after>#!/usr/bin/env python
# encoding: utf-8
from wand.image import Image
def rotate_image(filename, degrees):
"""Rotates filename degrees degrees."""
with Image(filename=filename) as image:
with image.clone() as rotated:
rotated.rotate(degrees)
rotated.save(filename=filename)
def main():
"""TODO: docstring"""
rotate_image('test.pdf', 180)
if __name__ == '__main__':
main()
|
|
3cf2eda808b9849a8bc57929b31826bbb95a41b9
|
examples/test_window_switching.py
|
examples/test_window_switching.py
|
"""
Sometimes tests open new tabs/windows, and you'll need
to switch to them first in order to interact with them.
The starting window is window(0). Then increments by 1.
"""
from seleniumbase import BaseCase
class TabSwitchingTests(BaseCase):
def test_switch_to_tabs(self):
self.open("data:text/html,<h1>Page A</h1>")
self.assert_text("Page A")
self.open_new_window()
self.open("data:text/html,<h1>Page B</h1>")
self.assert_text("Page B")
self.switch_to_window(0)
self.assert_text("Page A")
self.assert_text_not_visible("Page B")
self.switch_to_window(1)
self.assert_text("Page B")
self.assert_text_not_visible("Page A")
|
Add an example test that has switching to a new tab/window
|
Add an example test that has switching to a new tab/window
|
Python
|
mit
|
seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase
|
Add an example test that has switching to a new tab/window
|
"""
Sometimes tests open new tabs/windows, and you'll need
to switch to them first in order to interact with them.
The starting window is window(0). Then increments by 1.
"""
from seleniumbase import BaseCase
class TabSwitchingTests(BaseCase):
def test_switch_to_tabs(self):
self.open("data:text/html,<h1>Page A</h1>")
self.assert_text("Page A")
self.open_new_window()
self.open("data:text/html,<h1>Page B</h1>")
self.assert_text("Page B")
self.switch_to_window(0)
self.assert_text("Page A")
self.assert_text_not_visible("Page B")
self.switch_to_window(1)
self.assert_text("Page B")
self.assert_text_not_visible("Page A")
|
<commit_before><commit_msg>Add an example test that has switching to a new tab/window<commit_after>
|
"""
Sometimes tests open new tabs/windows, and you'll need
to switch to them first in order to interact with them.
The starting window is window(0). Then increments by 1.
"""
from seleniumbase import BaseCase
class TabSwitchingTests(BaseCase):
def test_switch_to_tabs(self):
self.open("data:text/html,<h1>Page A</h1>")
self.assert_text("Page A")
self.open_new_window()
self.open("data:text/html,<h1>Page B</h1>")
self.assert_text("Page B")
self.switch_to_window(0)
self.assert_text("Page A")
self.assert_text_not_visible("Page B")
self.switch_to_window(1)
self.assert_text("Page B")
self.assert_text_not_visible("Page A")
|
Add an example test that has switching to a new tab/window"""
Sometimes tests open new tabs/windows, and you'll need
to switch to them first in order to interact with them.
The starting window is window(0). Then increments by 1.
"""
from seleniumbase import BaseCase
class TabSwitchingTests(BaseCase):
def test_switch_to_tabs(self):
self.open("data:text/html,<h1>Page A</h1>")
self.assert_text("Page A")
self.open_new_window()
self.open("data:text/html,<h1>Page B</h1>")
self.assert_text("Page B")
self.switch_to_window(0)
self.assert_text("Page A")
self.assert_text_not_visible("Page B")
self.switch_to_window(1)
self.assert_text("Page B")
self.assert_text_not_visible("Page A")
|
<commit_before><commit_msg>Add an example test that has switching to a new tab/window<commit_after>"""
Sometimes tests open new tabs/windows, and you'll need
to switch to them first in order to interact with them.
The starting window is window(0). Then increments by 1.
"""
from seleniumbase import BaseCase
class TabSwitchingTests(BaseCase):
def test_switch_to_tabs(self):
self.open("data:text/html,<h1>Page A</h1>")
self.assert_text("Page A")
self.open_new_window()
self.open("data:text/html,<h1>Page B</h1>")
self.assert_text("Page B")
self.switch_to_window(0)
self.assert_text("Page A")
self.assert_text_not_visible("Page B")
self.switch_to_window(1)
self.assert_text("Page B")
self.assert_text_not_visible("Page A")
|
|
0834e37317b44940c27cdcdd3ee9929498356220
|
copperhead/runtime/np_interop.py
|
copperhead/runtime/np_interop.py
|
import numpy as np
import copperhead.compiler.backendtypes as ET
import copperhead.compiler.coretypes as T
from copperhead.compiler.conversions import back_to_front_type
def to_numpy(ary):
front_type = back_to_front_type(ary.type)
if not isinstance(front_type, T.Seq):
raise ValueError("Not convertible to numpy")
sub = front_type.unbox()
if str(sub) == str(T.Int):
return np.fromiter(ary, dtype=np.int32, count=-1)
elif str(sub) == str(T.Long):
return np.fromiter(ary, dtype=np.int64, count=-1)
elif str(sub) == str(T.Float):
return np.fromiter(ary, dtype=np.float32, count=-1)
elif str(sub) == str(T.Double):
return np.fromiter(ary, dtype=np.float64, count=-1)
elif str(sub) == str(T.Bool):
return np.fromiter(ary, dtype=np.bool, count=-1)
else:
raise ValueError("Not convertible to numpy")
|
Add conversion function between cuarray and np.ndarray.
|
Add conversion function between cuarray and np.ndarray.
|
Python
|
apache-2.0
|
beni55/copperhead,copperhead/copperhead,shyamalschandra/copperhead,shyamalschandra/copperhead,beni55/copperhead,copperhead/copperhead
|
Add conversion function between cuarray and np.ndarray.
|
import numpy as np
import copperhead.compiler.backendtypes as ET
import copperhead.compiler.coretypes as T
from copperhead.compiler.conversions import back_to_front_type
def to_numpy(ary):
front_type = back_to_front_type(ary.type)
if not isinstance(front_type, T.Seq):
raise ValueError("Not convertible to numpy")
sub = front_type.unbox()
if str(sub) == str(T.Int):
return np.fromiter(ary, dtype=np.int32, count=-1)
elif str(sub) == str(T.Long):
return np.fromiter(ary, dtype=np.int64, count=-1)
elif str(sub) == str(T.Float):
return np.fromiter(ary, dtype=np.float32, count=-1)
elif str(sub) == str(T.Double):
return np.fromiter(ary, dtype=np.float64, count=-1)
elif str(sub) == str(T.Bool):
return np.fromiter(ary, dtype=np.bool, count=-1)
else:
raise ValueError("Not convertible to numpy")
|
<commit_before><commit_msg>Add conversion function between cuarray and np.ndarray.<commit_after>
|
import numpy as np
import copperhead.compiler.backendtypes as ET
import copperhead.compiler.coretypes as T
from copperhead.compiler.conversions import back_to_front_type
def to_numpy(ary):
front_type = back_to_front_type(ary.type)
if not isinstance(front_type, T.Seq):
raise ValueError("Not convertible to numpy")
sub = front_type.unbox()
if str(sub) == str(T.Int):
return np.fromiter(ary, dtype=np.int32, count=-1)
elif str(sub) == str(T.Long):
return np.fromiter(ary, dtype=np.int64, count=-1)
elif str(sub) == str(T.Float):
return np.fromiter(ary, dtype=np.float32, count=-1)
elif str(sub) == str(T.Double):
return np.fromiter(ary, dtype=np.float64, count=-1)
elif str(sub) == str(T.Bool):
return np.fromiter(ary, dtype=np.bool, count=-1)
else:
raise ValueError("Not convertible to numpy")
|
Add conversion function between cuarray and np.ndarray.import numpy as np
import copperhead.compiler.backendtypes as ET
import copperhead.compiler.coretypes as T
from copperhead.compiler.conversions import back_to_front_type
def to_numpy(ary):
front_type = back_to_front_type(ary.type)
if not isinstance(front_type, T.Seq):
raise ValueError("Not convertible to numpy")
sub = front_type.unbox()
if str(sub) == str(T.Int):
return np.fromiter(ary, dtype=np.int32, count=-1)
elif str(sub) == str(T.Long):
return np.fromiter(ary, dtype=np.int64, count=-1)
elif str(sub) == str(T.Float):
return np.fromiter(ary, dtype=np.float32, count=-1)
elif str(sub) == str(T.Double):
return np.fromiter(ary, dtype=np.float64, count=-1)
elif str(sub) == str(T.Bool):
return np.fromiter(ary, dtype=np.bool, count=-1)
else:
raise ValueError("Not convertible to numpy")
|
<commit_before><commit_msg>Add conversion function between cuarray and np.ndarray.<commit_after>import numpy as np
import copperhead.compiler.backendtypes as ET
import copperhead.compiler.coretypes as T
from copperhead.compiler.conversions import back_to_front_type
def to_numpy(ary):
front_type = back_to_front_type(ary.type)
if not isinstance(front_type, T.Seq):
raise ValueError("Not convertible to numpy")
sub = front_type.unbox()
if str(sub) == str(T.Int):
return np.fromiter(ary, dtype=np.int32, count=-1)
elif str(sub) == str(T.Long):
return np.fromiter(ary, dtype=np.int64, count=-1)
elif str(sub) == str(T.Float):
return np.fromiter(ary, dtype=np.float32, count=-1)
elif str(sub) == str(T.Double):
return np.fromiter(ary, dtype=np.float64, count=-1)
elif str(sub) == str(T.Bool):
return np.fromiter(ary, dtype=np.bool, count=-1)
else:
raise ValueError("Not convertible to numpy")
|
|
3279d68859d947f2e618e2770a9fd1b7ce3f26c9
|
tests/test_cardxml.py
|
tests/test_cardxml.py
|
from hearthstone.enums import GameTag, Rarity
import utils
def test_all_tags_known():
"""
Iterate through the card database and check that all specified GameTags
are known in hearthstone.enums.GameTag
"""
unknown_tags = set()
known_tags = list(GameTag)
known_rarities = list(Rarity)
# Check the db loaded correctly
assert utils.fireplace.cards.db
for card in utils.fireplace.cards.db.values():
card_tags = [int(e.attrib["enumID"]) for e in card.xml.findall("./Tag")]
for tag in card_tags:
# We have fake tags in fireplace.enums which are always negative
if tag not in known_tags and tag > 0:
unknown_tags.add(tag)
# Test rarities as well (cf. TB_BlingBrawl_Blade1e in 10956...)
assert card.rarity in known_rarities
assert not unknown_tags
|
Add a test to verify that all GameTags are known
|
Add a test to verify that all GameTags are known
|
Python
|
agpl-3.0
|
Ragowit/fireplace,smallnamespace/fireplace,jleclanche/fireplace,beheh/fireplace,amw2104/fireplace,amw2104/fireplace,smallnamespace/fireplace,NightKev/fireplace,Ragowit/fireplace
|
Add a test to verify that all GameTags are known
|
from hearthstone.enums import GameTag, Rarity
import utils
def test_all_tags_known():
"""
Iterate through the card database and check that all specified GameTags
are known in hearthstone.enums.GameTag
"""
unknown_tags = set()
known_tags = list(GameTag)
known_rarities = list(Rarity)
# Check the db loaded correctly
assert utils.fireplace.cards.db
for card in utils.fireplace.cards.db.values():
card_tags = [int(e.attrib["enumID"]) for e in card.xml.findall("./Tag")]
for tag in card_tags:
# We have fake tags in fireplace.enums which are always negative
if tag not in known_tags and tag > 0:
unknown_tags.add(tag)
# Test rarities as well (cf. TB_BlingBrawl_Blade1e in 10956...)
assert card.rarity in known_rarities
assert not unknown_tags
|
<commit_before><commit_msg>Add a test to verify that all GameTags are known<commit_after>
|
from hearthstone.enums import GameTag, Rarity
import utils
def test_all_tags_known():
"""
Iterate through the card database and check that all specified GameTags
are known in hearthstone.enums.GameTag
"""
unknown_tags = set()
known_tags = list(GameTag)
known_rarities = list(Rarity)
# Check the db loaded correctly
assert utils.fireplace.cards.db
for card in utils.fireplace.cards.db.values():
card_tags = [int(e.attrib["enumID"]) for e in card.xml.findall("./Tag")]
for tag in card_tags:
# We have fake tags in fireplace.enums which are always negative
if tag not in known_tags and tag > 0:
unknown_tags.add(tag)
# Test rarities as well (cf. TB_BlingBrawl_Blade1e in 10956...)
assert card.rarity in known_rarities
assert not unknown_tags
|
Add a test to verify that all GameTags are knownfrom hearthstone.enums import GameTag, Rarity
import utils
def test_all_tags_known():
"""
Iterate through the card database and check that all specified GameTags
are known in hearthstone.enums.GameTag
"""
unknown_tags = set()
known_tags = list(GameTag)
known_rarities = list(Rarity)
# Check the db loaded correctly
assert utils.fireplace.cards.db
for card in utils.fireplace.cards.db.values():
card_tags = [int(e.attrib["enumID"]) for e in card.xml.findall("./Tag")]
for tag in card_tags:
# We have fake tags in fireplace.enums which are always negative
if tag not in known_tags and tag > 0:
unknown_tags.add(tag)
# Test rarities as well (cf. TB_BlingBrawl_Blade1e in 10956...)
assert card.rarity in known_rarities
assert not unknown_tags
|
<commit_before><commit_msg>Add a test to verify that all GameTags are known<commit_after>from hearthstone.enums import GameTag, Rarity
import utils
def test_all_tags_known():
"""
Iterate through the card database and check that all specified GameTags
are known in hearthstone.enums.GameTag
"""
unknown_tags = set()
known_tags = list(GameTag)
known_rarities = list(Rarity)
# Check the db loaded correctly
assert utils.fireplace.cards.db
for card in utils.fireplace.cards.db.values():
card_tags = [int(e.attrib["enumID"]) for e in card.xml.findall("./Tag")]
for tag in card_tags:
# We have fake tags in fireplace.enums which are always negative
if tag not in known_tags and tag > 0:
unknown_tags.add(tag)
# Test rarities as well (cf. TB_BlingBrawl_Blade1e in 10956...)
assert card.rarity in known_rarities
assert not unknown_tags
|
|
eaf44ba0daccf08fa223ecd3d6fc9c967851eeb5
|
examples/stream_dealer_router.py
|
examples/stream_dealer_router.py
|
import asyncio
import aiozmq
import zmq
@asyncio.coroutine
def go():
router = yield from aiozmq.create_zmq_stream(
zmq.ROUTER,
bind='tcp://127.0.0.1:*')
addr = list(router.transport.bindings())[0]
dealer = yield from aiozmq.create_zmq_stream(
zmq.DEALER,
connect=addr)
for i in range(10):
msg = (b'data', b'ask', str(i).encode('utf-8'))
dealer.write(msg)
data = yield from router.read()
router.write(data)
answer = yield from dealer.read()
print(answer)
dealer.close()
router.close()
def main():
asyncio.get_event_loop().run_until_complete(go())
print("DONE")
if __name__ == '__main__':
main()
|
Add example for zmq streams
|
Add example for zmq streams
|
Python
|
bsd-2-clause
|
MetaMemoryT/aiozmq,aio-libs/aiozmq,asteven/aiozmq,claws/aiozmq
|
Add example for zmq streams
|
import asyncio
import aiozmq
import zmq
@asyncio.coroutine
def go():
router = yield from aiozmq.create_zmq_stream(
zmq.ROUTER,
bind='tcp://127.0.0.1:*')
addr = list(router.transport.bindings())[0]
dealer = yield from aiozmq.create_zmq_stream(
zmq.DEALER,
connect=addr)
for i in range(10):
msg = (b'data', b'ask', str(i).encode('utf-8'))
dealer.write(msg)
data = yield from router.read()
router.write(data)
answer = yield from dealer.read()
print(answer)
dealer.close()
router.close()
def main():
asyncio.get_event_loop().run_until_complete(go())
print("DONE")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example for zmq streams<commit_after>
|
import asyncio
import aiozmq
import zmq
@asyncio.coroutine
def go():
router = yield from aiozmq.create_zmq_stream(
zmq.ROUTER,
bind='tcp://127.0.0.1:*')
addr = list(router.transport.bindings())[0]
dealer = yield from aiozmq.create_zmq_stream(
zmq.DEALER,
connect=addr)
for i in range(10):
msg = (b'data', b'ask', str(i).encode('utf-8'))
dealer.write(msg)
data = yield from router.read()
router.write(data)
answer = yield from dealer.read()
print(answer)
dealer.close()
router.close()
def main():
asyncio.get_event_loop().run_until_complete(go())
print("DONE")
if __name__ == '__main__':
main()
|
Add example for zmq streamsimport asyncio
import aiozmq
import zmq
@asyncio.coroutine
def go():
router = yield from aiozmq.create_zmq_stream(
zmq.ROUTER,
bind='tcp://127.0.0.1:*')
addr = list(router.transport.bindings())[0]
dealer = yield from aiozmq.create_zmq_stream(
zmq.DEALER,
connect=addr)
for i in range(10):
msg = (b'data', b'ask', str(i).encode('utf-8'))
dealer.write(msg)
data = yield from router.read()
router.write(data)
answer = yield from dealer.read()
print(answer)
dealer.close()
router.close()
def main():
asyncio.get_event_loop().run_until_complete(go())
print("DONE")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example for zmq streams<commit_after>import asyncio
import aiozmq
import zmq
@asyncio.coroutine
def go():
router = yield from aiozmq.create_zmq_stream(
zmq.ROUTER,
bind='tcp://127.0.0.1:*')
addr = list(router.transport.bindings())[0]
dealer = yield from aiozmq.create_zmq_stream(
zmq.DEALER,
connect=addr)
for i in range(10):
msg = (b'data', b'ask', str(i).encode('utf-8'))
dealer.write(msg)
data = yield from router.read()
router.write(data)
answer = yield from dealer.read()
print(answer)
dealer.close()
router.close()
def main():
asyncio.get_event_loop().run_until_complete(go())
print("DONE")
if __name__ == '__main__':
main()
|
|
2050a52b9b3065a9d6211cb029a308dce054c28d
|
calexicon/dates/tests/test_bce.py
|
calexicon/dates/tests/test_bce.py
|
import unittest
from calexicon.dates import BCEDate
class TestBCEDate(unittest.TestCase):
def test_make_bce_date(self):
bd = BCEDate(-4713, 1, 1)
self.assertEqual(bd.julian_representation(), (-4713, 1, 1))
def test_equality(self):
self.assertEqual(BCEDate(-44, 3, 15), BCEDate(-44, 3, 15))
|
Add some tests for BCEDate.
|
Add some tests for BCEDate.
|
Python
|
apache-2.0
|
jwg4/calexicon,jwg4/qual
|
Add some tests for BCEDate.
|
import unittest
from calexicon.dates import BCEDate
class TestBCEDate(unittest.TestCase):
def test_make_bce_date(self):
bd = BCEDate(-4713, 1, 1)
self.assertEqual(bd.julian_representation(), (-4713, 1, 1))
def test_equality(self):
self.assertEqual(BCEDate(-44, 3, 15), BCEDate(-44, 3, 15))
|
<commit_before><commit_msg>Add some tests for BCEDate.<commit_after>
|
import unittest
from calexicon.dates import BCEDate
class TestBCEDate(unittest.TestCase):
def test_make_bce_date(self):
bd = BCEDate(-4713, 1, 1)
self.assertEqual(bd.julian_representation(), (-4713, 1, 1))
def test_equality(self):
self.assertEqual(BCEDate(-44, 3, 15), BCEDate(-44, 3, 15))
|
Add some tests for BCEDate.import unittest
from calexicon.dates import BCEDate
class TestBCEDate(unittest.TestCase):
def test_make_bce_date(self):
bd = BCEDate(-4713, 1, 1)
self.assertEqual(bd.julian_representation(), (-4713, 1, 1))
def test_equality(self):
self.assertEqual(BCEDate(-44, 3, 15), BCEDate(-44, 3, 15))
|
<commit_before><commit_msg>Add some tests for BCEDate.<commit_after>import unittest
from calexicon.dates import BCEDate
class TestBCEDate(unittest.TestCase):
def test_make_bce_date(self):
bd = BCEDate(-4713, 1, 1)
self.assertEqual(bd.julian_representation(), (-4713, 1, 1))
def test_equality(self):
self.assertEqual(BCEDate(-44, 3, 15), BCEDate(-44, 3, 15))
|
|
80f11c95d09a3538a6d67e6a01152fac7d2b6893
|
tests/test_core_modules.py
|
tests/test_core_modules.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from i3pystatus.core.modules import IntervalModule
class IntervalModuleMetaTest(unittest.TestCase):
def test_no_settings(self):
class NoSettings(IntervalModule):
pass
self.assertEqual(NoSettings.settings, ('interval',))
def test_no_interval_setting(self):
class NoIntervalSetting(IntervalModule):
settings = (('option', 'desc'),)
self.assertEqual(NoIntervalSetting.settings,
(('option', 'desc'), 'interval'))
def test_settings_with_interval(self):
class SettingsInteval(IntervalModule):
settings = ('option', 'interval')
self.assertEqual(SettingsInteval.settings, ('option', 'interval'))
def test_settings_with_interval_desc(self):
class SetttingsIntervalDesc(IntervalModule):
settings = (('interval', 'desc'),)
self.assertEqual(SetttingsIntervalDesc.settings,
(('interval', 'desc'),))
|
Add tests for adding interval setting automatically
|
Add tests for adding interval setting automatically
|
Python
|
mit
|
teto/i3pystatus,enkore/i3pystatus,enkore/i3pystatus,fmarchenko/i3pystatus,Elder-of-Ozone/i3pystatus,ncoop/i3pystatus,onkelpit/i3pystatus,paulollivier/i3pystatus,Elder-of-Ozone/i3pystatus,eBrnd/i3pystatus,schroeji/i3pystatus,claria/i3pystatus,yang-ling/i3pystatus,opatut/i3pystatus,richese/i3pystatus,Arvedui/i3pystatus,yang-ling/i3pystatus,MaicoTimmerman/i3pystatus,paulollivier/i3pystatus,fmarchenko/i3pystatus,m45t3r/i3pystatus,asmikhailov/i3pystatus,opatut/i3pystatus,drwahl/i3pystatus,plumps/i3pystatus,plumps/i3pystatus,drwahl/i3pystatus,ncoop/i3pystatus,MaicoTimmerman/i3pystatus,teto/i3pystatus,Arvedui/i3pystatus,schroeji/i3pystatus,onkelpit/i3pystatus,asmikhailov/i3pystatus,juliushaertl/i3pystatus,eBrnd/i3pystatus,richese/i3pystatus,juliushaertl/i3pystatus,facetoe/i3pystatus,ismaelpuerto/i3pystatus,m45t3r/i3pystatus,facetoe/i3pystatus,ismaelpuerto/i3pystatus,claria/i3pystatus
|
Add tests for adding interval setting automatically
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from i3pystatus.core.modules import IntervalModule
class IntervalModuleMetaTest(unittest.TestCase):
def test_no_settings(self):
class NoSettings(IntervalModule):
pass
self.assertEqual(NoSettings.settings, ('interval',))
def test_no_interval_setting(self):
class NoIntervalSetting(IntervalModule):
settings = (('option', 'desc'),)
self.assertEqual(NoIntervalSetting.settings,
(('option', 'desc'), 'interval'))
def test_settings_with_interval(self):
class SettingsInteval(IntervalModule):
settings = ('option', 'interval')
self.assertEqual(SettingsInteval.settings, ('option', 'interval'))
def test_settings_with_interval_desc(self):
class SetttingsIntervalDesc(IntervalModule):
settings = (('interval', 'desc'),)
self.assertEqual(SetttingsIntervalDesc.settings,
(('interval', 'desc'),))
|
<commit_before><commit_msg>Add tests for adding interval setting automatically<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from i3pystatus.core.modules import IntervalModule
class IntervalModuleMetaTest(unittest.TestCase):
def test_no_settings(self):
class NoSettings(IntervalModule):
pass
self.assertEqual(NoSettings.settings, ('interval',))
def test_no_interval_setting(self):
class NoIntervalSetting(IntervalModule):
settings = (('option', 'desc'),)
self.assertEqual(NoIntervalSetting.settings,
(('option', 'desc'), 'interval'))
def test_settings_with_interval(self):
class SettingsInteval(IntervalModule):
settings = ('option', 'interval')
self.assertEqual(SettingsInteval.settings, ('option', 'interval'))
def test_settings_with_interval_desc(self):
class SetttingsIntervalDesc(IntervalModule):
settings = (('interval', 'desc'),)
self.assertEqual(SetttingsIntervalDesc.settings,
(('interval', 'desc'),))
|
Add tests for adding interval setting automatically#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from i3pystatus.core.modules import IntervalModule
class IntervalModuleMetaTest(unittest.TestCase):
def test_no_settings(self):
class NoSettings(IntervalModule):
pass
self.assertEqual(NoSettings.settings, ('interval',))
def test_no_interval_setting(self):
class NoIntervalSetting(IntervalModule):
settings = (('option', 'desc'),)
self.assertEqual(NoIntervalSetting.settings,
(('option', 'desc'), 'interval'))
def test_settings_with_interval(self):
class SettingsInteval(IntervalModule):
settings = ('option', 'interval')
self.assertEqual(SettingsInteval.settings, ('option', 'interval'))
def test_settings_with_interval_desc(self):
class SetttingsIntervalDesc(IntervalModule):
settings = (('interval', 'desc'),)
self.assertEqual(SetttingsIntervalDesc.settings,
(('interval', 'desc'),))
|
<commit_before><commit_msg>Add tests for adding interval setting automatically<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from i3pystatus.core.modules import IntervalModule
class IntervalModuleMetaTest(unittest.TestCase):
def test_no_settings(self):
class NoSettings(IntervalModule):
pass
self.assertEqual(NoSettings.settings, ('interval',))
def test_no_interval_setting(self):
class NoIntervalSetting(IntervalModule):
settings = (('option', 'desc'),)
self.assertEqual(NoIntervalSetting.settings,
(('option', 'desc'), 'interval'))
def test_settings_with_interval(self):
class SettingsInteval(IntervalModule):
settings = ('option', 'interval')
self.assertEqual(SettingsInteval.settings, ('option', 'interval'))
def test_settings_with_interval_desc(self):
class SetttingsIntervalDesc(IntervalModule):
settings = (('interval', 'desc'),)
self.assertEqual(SetttingsIntervalDesc.settings,
(('interval', 'desc'),))
|
|
bc2314b31a35e586eba14c8305b223de30fd041a
|
switch_mod/hawaii/unserved_load.py
|
switch_mod/hawaii/unserved_load.py
|
"""Add an UnservedLoad component, which ensures the model is always feasible.
This is often useful when the model is constrained to the edge of infeasibility,
(e.g., when evaluating a pre-defined, just-feasible construction plan) to avoid
spurious reports of infeasibility."""
from pyomo.environ import *
def define_arguments(argparser):
argparser.add_argument("--unserved-load-penalty", type=float, default=None,
help="Penalty to charge per MWh of unserved load. Usually set high enough to force unserved load to zero (default is $10,000/MWh).")
def define_components(m):
# create an unserved load variable with a high penalty cost,
# to avoid infeasibilities when
# evaluating scenarios that are on the edge of infeasibility
# cost per MWh for unserved load (high)
if m.options.unserved_load_penalty is not None:
# always use penalty factor supplied on the command line, if any
m.unserved_load_penalty_per_mwh = Param(initialize=m.options.unserved_load_penalty)
else:
# no penalty on the command line, use whatever is in the parameter files, or 10000
m.unserved_load_penalty_per_mwh = Param(default=10000)
# amount of unserved load during each timepoint
m.UnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals)
# total cost for unserved load
m.UnservedLoadPenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp:
sum(m.UnservedLoad[lz, tp] * m.unserved_load_penalty_per_mwh for lz in m.LOAD_ZONES)
)
# add the unserved load to the model's energy balance
m.LZ_Energy_Components_Produce.append('UnservedLoad')
# add the unserved load penalty to the model's objective function
m.cost_components_tp.append('UnservedLoadPenalty')
|
Define UservedLoad variable and penalty factor to relax infeasibility when needed.
|
Define UservedLoad variable and penalty factor to relax infeasibility when needed.
|
Python
|
apache-2.0
|
OCM-Lab-PUC/switch-chile,OCM-Lab-PUC/switch-chile,bmaluenda/switch
|
Define UservedLoad variable and penalty factor to relax infeasibility when needed.
|
"""Add an UnservedLoad component, which ensures the model is always feasible.
This is often useful when the model is constrained to the edge of infeasibility,
(e.g., when evaluating a pre-defined, just-feasible construction plan) to avoid
spurious reports of infeasibility."""
from pyomo.environ import *
def define_arguments(argparser):
argparser.add_argument("--unserved-load-penalty", type=float, default=None,
help="Penalty to charge per MWh of unserved load. Usually set high enough to force unserved load to zero (default is $10,000/MWh).")
def define_components(m):
# create an unserved load variable with a high penalty cost,
# to avoid infeasibilities when
# evaluating scenarios that are on the edge of infeasibility
# cost per MWh for unserved load (high)
if m.options.unserved_load_penalty is not None:
# always use penalty factor supplied on the command line, if any
m.unserved_load_penalty_per_mwh = Param(initialize=m.options.unserved_load_penalty)
else:
# no penalty on the command line, use whatever is in the parameter files, or 10000
m.unserved_load_penalty_per_mwh = Param(default=10000)
# amount of unserved load during each timepoint
m.UnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals)
# total cost for unserved load
m.UnservedLoadPenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp:
sum(m.UnservedLoad[lz, tp] * m.unserved_load_penalty_per_mwh for lz in m.LOAD_ZONES)
)
# add the unserved load to the model's energy balance
m.LZ_Energy_Components_Produce.append('UnservedLoad')
# add the unserved load penalty to the model's objective function
m.cost_components_tp.append('UnservedLoadPenalty')
|
<commit_before><commit_msg>Define UservedLoad variable and penalty factor to relax infeasibility when needed.<commit_after>
|
"""Add an UnservedLoad component, which ensures the model is always feasible.
This is often useful when the model is constrained to the edge of infeasibility,
(e.g., when evaluating a pre-defined, just-feasible construction plan) to avoid
spurious reports of infeasibility."""
from pyomo.environ import *
def define_arguments(argparser):
argparser.add_argument("--unserved-load-penalty", type=float, default=None,
help="Penalty to charge per MWh of unserved load. Usually set high enough to force unserved load to zero (default is $10,000/MWh).")
def define_components(m):
# create an unserved load variable with a high penalty cost,
# to avoid infeasibilities when
# evaluating scenarios that are on the edge of infeasibility
# cost per MWh for unserved load (high)
if m.options.unserved_load_penalty is not None:
# always use penalty factor supplied on the command line, if any
m.unserved_load_penalty_per_mwh = Param(initialize=m.options.unserved_load_penalty)
else:
# no penalty on the command line, use whatever is in the parameter files, or 10000
m.unserved_load_penalty_per_mwh = Param(default=10000)
# amount of unserved load during each timepoint
m.UnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals)
# total cost for unserved load
m.UnservedLoadPenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp:
sum(m.UnservedLoad[lz, tp] * m.unserved_load_penalty_per_mwh for lz in m.LOAD_ZONES)
)
# add the unserved load to the model's energy balance
m.LZ_Energy_Components_Produce.append('UnservedLoad')
# add the unserved load penalty to the model's objective function
m.cost_components_tp.append('UnservedLoadPenalty')
|
Define UservedLoad variable and penalty factor to relax infeasibility when needed."""Add an UnservedLoad component, which ensures the model is always feasible.
This is often useful when the model is constrained to the edge of infeasibility,
(e.g., when evaluating a pre-defined, just-feasible construction plan) to avoid
spurious reports of infeasibility."""
from pyomo.environ import *
def define_arguments(argparser):
argparser.add_argument("--unserved-load-penalty", type=float, default=None,
help="Penalty to charge per MWh of unserved load. Usually set high enough to force unserved load to zero (default is $10,000/MWh).")
def define_components(m):
# create an unserved load variable with a high penalty cost,
# to avoid infeasibilities when
# evaluating scenarios that are on the edge of infeasibility
# cost per MWh for unserved load (high)
if m.options.unserved_load_penalty is not None:
# always use penalty factor supplied on the command line, if any
m.unserved_load_penalty_per_mwh = Param(initialize=m.options.unserved_load_penalty)
else:
# no penalty on the command line, use whatever is in the parameter files, or 10000
m.unserved_load_penalty_per_mwh = Param(default=10000)
# amount of unserved load during each timepoint
m.UnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals)
# total cost for unserved load
m.UnservedLoadPenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp:
sum(m.UnservedLoad[lz, tp] * m.unserved_load_penalty_per_mwh for lz in m.LOAD_ZONES)
)
# add the unserved load to the model's energy balance
m.LZ_Energy_Components_Produce.append('UnservedLoad')
# add the unserved load penalty to the model's objective function
m.cost_components_tp.append('UnservedLoadPenalty')
|
<commit_before><commit_msg>Define UservedLoad variable and penalty factor to relax infeasibility when needed.<commit_after>"""Add an UnservedLoad component, which ensures the model is always feasible.
This is often useful when the model is constrained to the edge of infeasibility,
(e.g., when evaluating a pre-defined, just-feasible construction plan) to avoid
spurious reports of infeasibility."""
from pyomo.environ import *
def define_arguments(argparser):
argparser.add_argument("--unserved-load-penalty", type=float, default=None,
help="Penalty to charge per MWh of unserved load. Usually set high enough to force unserved load to zero (default is $10,000/MWh).")
def define_components(m):
# create an unserved load variable with a high penalty cost,
# to avoid infeasibilities when
# evaluating scenarios that are on the edge of infeasibility
# cost per MWh for unserved load (high)
if m.options.unserved_load_penalty is not None:
# always use penalty factor supplied on the command line, if any
m.unserved_load_penalty_per_mwh = Param(initialize=m.options.unserved_load_penalty)
else:
# no penalty on the command line, use whatever is in the parameter files, or 10000
m.unserved_load_penalty_per_mwh = Param(default=10000)
# amount of unserved load during each timepoint
m.UnservedLoad = Var(m.LOAD_ZONES, m.TIMEPOINTS, within=NonNegativeReals)
# total cost for unserved load
m.UnservedLoadPenalty = Expression(m.TIMEPOINTS, rule=lambda m, tp:
sum(m.UnservedLoad[lz, tp] * m.unserved_load_penalty_per_mwh for lz in m.LOAD_ZONES)
)
# add the unserved load to the model's energy balance
m.LZ_Energy_Components_Produce.append('UnservedLoad')
# add the unserved load penalty to the model's objective function
m.cost_components_tp.append('UnservedLoadPenalty')
|
|
306416523edddb454ebb126c79e86a7dc7f2bd37
|
setup.py
|
setup.py
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import base32_crockford
package_data = {
'': ['LICENSE', 'README.rst'],
}
setup(
name='base32-crockford',
version='0.2.0',
description=("A Python implementation of Douglas Crockford's "
"base32 encoding scheme"),
long_description=base32_crockford.__doc__,
license='BSD',
author='Jason Bittel',
author_email='jason.bittel@gmail.com',
url='https://github.com/jbittel/base32-crockford',
download_url='https://github.com/jbittel/base32-crockford/downloads',
py_modules=['base32_crockford'],
package_data=package_data,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import base32_crockford
package_data = {
'': ['LICENSE', 'README.rst'],
}
setup(
name='base32-crockford',
version='0.2.0',
description=("A Python implementation of Douglas Crockford's "
"base32 encoding scheme"),
long_description=base32_crockford.__doc__,
license='BSD',
author='Jason Bittel',
author_email='jason.bittel@gmail.com',
url='https://github.com/jbittel/base32-crockford',
download_url='https://github.com/jbittel/base32-crockford/downloads',
py_modules=['base32_crockford'],
package_data=package_data,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
|
Add Python 2.6 to the classifiers list
|
Add Python 2.6 to the classifiers list
|
Python
|
bsd-3-clause
|
jbittel/base32-crockford,klaplong/baas32
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import base32_crockford
package_data = {
'': ['LICENSE', 'README.rst'],
}
setup(
name='base32-crockford',
version='0.2.0',
description=("A Python implementation of Douglas Crockford's "
"base32 encoding scheme"),
long_description=base32_crockford.__doc__,
license='BSD',
author='Jason Bittel',
author_email='jason.bittel@gmail.com',
url='https://github.com/jbittel/base32-crockford',
download_url='https://github.com/jbittel/base32-crockford/downloads',
py_modules=['base32_crockford'],
package_data=package_data,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
Add Python 2.6 to the classifiers list
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import base32_crockford
package_data = {
'': ['LICENSE', 'README.rst'],
}
setup(
name='base32-crockford',
version='0.2.0',
description=("A Python implementation of Douglas Crockford's "
"base32 encoding scheme"),
long_description=base32_crockford.__doc__,
license='BSD',
author='Jason Bittel',
author_email='jason.bittel@gmail.com',
url='https://github.com/jbittel/base32-crockford',
download_url='https://github.com/jbittel/base32-crockford/downloads',
py_modules=['base32_crockford'],
package_data=package_data,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
|
<commit_before>#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import base32_crockford
package_data = {
'': ['LICENSE', 'README.rst'],
}
setup(
name='base32-crockford',
version='0.2.0',
description=("A Python implementation of Douglas Crockford's "
"base32 encoding scheme"),
long_description=base32_crockford.__doc__,
license='BSD',
author='Jason Bittel',
author_email='jason.bittel@gmail.com',
url='https://github.com/jbittel/base32-crockford',
download_url='https://github.com/jbittel/base32-crockford/downloads',
py_modules=['base32_crockford'],
package_data=package_data,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
<commit_msg>Add Python 2.6 to the classifiers list<commit_after>
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import base32_crockford
package_data = {
'': ['LICENSE', 'README.rst'],
}
setup(
name='base32-crockford',
version='0.2.0',
description=("A Python implementation of Douglas Crockford's "
"base32 encoding scheme"),
long_description=base32_crockford.__doc__,
license='BSD',
author='Jason Bittel',
author_email='jason.bittel@gmail.com',
url='https://github.com/jbittel/base32-crockford',
download_url='https://github.com/jbittel/base32-crockford/downloads',
py_modules=['base32_crockford'],
package_data=package_data,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
|
#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import base32_crockford
package_data = {
'': ['LICENSE', 'README.rst'],
}
setup(
name='base32-crockford',
version='0.2.0',
description=("A Python implementation of Douglas Crockford's "
"base32 encoding scheme"),
long_description=base32_crockford.__doc__,
license='BSD',
author='Jason Bittel',
author_email='jason.bittel@gmail.com',
url='https://github.com/jbittel/base32-crockford',
download_url='https://github.com/jbittel/base32-crockford/downloads',
py_modules=['base32_crockford'],
package_data=package_data,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
Add Python 2.6 to the classifiers list#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import base32_crockford
package_data = {
'': ['LICENSE', 'README.rst'],
}
setup(
name='base32-crockford',
version='0.2.0',
description=("A Python implementation of Douglas Crockford's "
"base32 encoding scheme"),
long_description=base32_crockford.__doc__,
license='BSD',
author='Jason Bittel',
author_email='jason.bittel@gmail.com',
url='https://github.com/jbittel/base32-crockford',
download_url='https://github.com/jbittel/base32-crockford/downloads',
py_modules=['base32_crockford'],
package_data=package_data,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
|
<commit_before>#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import base32_crockford
package_data = {
'': ['LICENSE', 'README.rst'],
}
setup(
name='base32-crockford',
version='0.2.0',
description=("A Python implementation of Douglas Crockford's "
"base32 encoding scheme"),
long_description=base32_crockford.__doc__,
license='BSD',
author='Jason Bittel',
author_email='jason.bittel@gmail.com',
url='https://github.com/jbittel/base32-crockford',
download_url='https://github.com/jbittel/base32-crockford/downloads',
py_modules=['base32_crockford'],
package_data=package_data,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
<commit_msg>Add Python 2.6 to the classifiers list<commit_after>#!/usr/bin/env python
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import base32_crockford
package_data = {
'': ['LICENSE', 'README.rst'],
}
setup(
name='base32-crockford',
version='0.2.0',
description=("A Python implementation of Douglas Crockford's "
"base32 encoding scheme"),
long_description=base32_crockford.__doc__,
license='BSD',
author='Jason Bittel',
author_email='jason.bittel@gmail.com',
url='https://github.com/jbittel/base32-crockford',
download_url='https://github.com/jbittel/base32-crockford/downloads',
py_modules=['base32_crockford'],
package_data=package_data,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
],
)
|
a5bae9a5a9ffa53ab0c7da654e511ab150cb8ac9
|
scripts/create_user.py
|
scripts/create_user.py
|
import sys
from brain.models.sqlobjects import User
from brain.helpers.sql import session_transaction
if len(sys.argv) not in (4, 5):
print("usage: {0} <username> <rmqvhost> <ftpuser> [quota]\n"
" with <username> a string\n"
" <rmqvhost> the rmqvhost used for the frontend\n"
" <ftpuser> the ftpuser used by the frontend\n"
" [quota] the number of file scan quota\n"
"example: {0} test1 mqfrontend frontend"
"".format(sys.argv[0]))
sys.exit(1)
# quota is in number of files (0 means disabled)
quota = int(sys.argv[4]) if len(sys.argv) == 5 else 0
with session_transaction() as session:
user = User(name=sys.argv[1],
rmqvhost=sys.argv[2],
ftpuser=sys.argv[3],
quota=quota)
user.save(session)
|
Add sql user creation script
|
Add sql user creation script
|
Python
|
apache-2.0
|
quarkslab/irma,hirokihamasaki/irma,quarkslab/irma,quarkslab/irma,hirokihamasaki/irma,hirokihamasaki/irma,hirokihamasaki/irma,quarkslab/irma,hirokihamasaki/irma
|
Add sql user creation script
|
import sys
from brain.models.sqlobjects import User
from brain.helpers.sql import session_transaction
if len(sys.argv) not in (4, 5):
print("usage: {0} <username> <rmqvhost> <ftpuser> [quota]\n"
" with <username> a string\n"
" <rmqvhost> the rmqvhost used for the frontend\n"
" <ftpuser> the ftpuser used by the frontend\n"
" [quota] the number of file scan quota\n"
"example: {0} test1 mqfrontend frontend"
"".format(sys.argv[0]))
sys.exit(1)
# quota is in number of files (0 means disabled)
quota = int(sys.argv[4]) if len(sys.argv) == 5 else 0
with session_transaction() as session:
user = User(name=sys.argv[1],
rmqvhost=sys.argv[2],
ftpuser=sys.argv[3],
quota=quota)
user.save(session)
|
<commit_before><commit_msg>Add sql user creation script<commit_after>
|
import sys
from brain.models.sqlobjects import User
from brain.helpers.sql import session_transaction
if len(sys.argv) not in (4, 5):
print("usage: {0} <username> <rmqvhost> <ftpuser> [quota]\n"
" with <username> a string\n"
" <rmqvhost> the rmqvhost used for the frontend\n"
" <ftpuser> the ftpuser used by the frontend\n"
" [quota] the number of file scan quota\n"
"example: {0} test1 mqfrontend frontend"
"".format(sys.argv[0]))
sys.exit(1)
# quota is in number of files (0 means disabled)
quota = int(sys.argv[4]) if len(sys.argv) == 5 else 0
with session_transaction() as session:
user = User(name=sys.argv[1],
rmqvhost=sys.argv[2],
ftpuser=sys.argv[3],
quota=quota)
user.save(session)
|
Add sql user creation scriptimport sys
from brain.models.sqlobjects import User
from brain.helpers.sql import session_transaction
if len(sys.argv) not in (4, 5):
print("usage: {0} <username> <rmqvhost> <ftpuser> [quota]\n"
" with <username> a string\n"
" <rmqvhost> the rmqvhost used for the frontend\n"
" <ftpuser> the ftpuser used by the frontend\n"
" [quota] the number of file scan quota\n"
"example: {0} test1 mqfrontend frontend"
"".format(sys.argv[0]))
sys.exit(1)
# quota is in number of files (0 means disabled)
quota = int(sys.argv[4]) if len(sys.argv) == 5 else 0
with session_transaction() as session:
user = User(name=sys.argv[1],
rmqvhost=sys.argv[2],
ftpuser=sys.argv[3],
quota=quota)
user.save(session)
|
<commit_before><commit_msg>Add sql user creation script<commit_after>import sys
from brain.models.sqlobjects import User
from brain.helpers.sql import session_transaction
if len(sys.argv) not in (4, 5):
print("usage: {0} <username> <rmqvhost> <ftpuser> [quota]\n"
" with <username> a string\n"
" <rmqvhost> the rmqvhost used for the frontend\n"
" <ftpuser> the ftpuser used by the frontend\n"
" [quota] the number of file scan quota\n"
"example: {0} test1 mqfrontend frontend"
"".format(sys.argv[0]))
sys.exit(1)
# quota is in number of files (0 means disabled)
quota = int(sys.argv[4]) if len(sys.argv) == 5 else 0
with session_transaction() as session:
user = User(name=sys.argv[1],
rmqvhost=sys.argv[2],
ftpuser=sys.argv[3],
quota=quota)
user.save(session)
|
|
b13ca26da67855f9d32f3209176af052ed46617f
|
wqflask/tests/utility/test_hmac.py
|
wqflask/tests/utility/test_hmac.py
|
# -*- coding: utf-8 -*-
"""Test hmac utility functions"""
import unittest
import mock
from utility.hmac import data_hmac
from utility.hmac import url_for_hmac
from utility.hmac import hmac_creation
class TestHmacUtil(unittest.TestCase):
"""Test Utility method for hmac creation"""
def test_hmac_creation(self):
"""Test hmac creation with a utf-8 string"""
self.assertEqual(hmac_creation("ファイ"), "21fa1d935bbbb07a7875")
def test_data_hmac(self):
"""Test data_hmac fn with a utf-8 string"""
self.assertEqual(data_hmac("ファイ"), "ファイ:21fa1d935bbbb07a7875")
@mock.patch("utility.hmac.url_for")
def test_url_for_hmac_with_plain_url(self, mock_url):
"""Test url_for_hmac without params"""
mock_url.return_value = "https://mock_url.com/ファイ/"
self.assertEqual(url_for_hmac("ファイ"),
"https://mock_url.com/ファイ/?hm=a62896a50d9ffcff7deb")
@mock.patch("utility.hmac.url_for")
def test_url_for_hmac_with_param_in_url(self, mock_url):
"""Test url_for_hmac with params"""
mock_url.return_value = "https://mock_url.com/?ファイ=1"
self.assertEqual(url_for_hmac("ファイ"),
"https://mock_url.com/?ファイ=1&hm=b2128fb28bc32da3b5b7")
|
Add tests for hmac utility
|
Add tests for hmac utility
* wqflask/tests/utility/test_hmac.py: New tests.
|
Python
|
agpl-3.0
|
pjotrp/genenetwork2,zsloan/genenetwork2,zsloan/genenetwork2,genenetwork/genenetwork2,zsloan/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,zsloan/genenetwork2,genenetwork/genenetwork2,pjotrp/genenetwork2,pjotrp/genenetwork2,genenetwork/genenetwork2,genenetwork/genenetwork2
|
Add tests for hmac utility
* wqflask/tests/utility/test_hmac.py: New tests.
|
# -*- coding: utf-8 -*-
"""Test hmac utility functions"""
import unittest
import mock
from utility.hmac import data_hmac
from utility.hmac import url_for_hmac
from utility.hmac import hmac_creation
class TestHmacUtil(unittest.TestCase):
"""Test Utility method for hmac creation"""
def test_hmac_creation(self):
"""Test hmac creation with a utf-8 string"""
self.assertEqual(hmac_creation("ファイ"), "21fa1d935bbbb07a7875")
def test_data_hmac(self):
"""Test data_hmac fn with a utf-8 string"""
self.assertEqual(data_hmac("ファイ"), "ファイ:21fa1d935bbbb07a7875")
@mock.patch("utility.hmac.url_for")
def test_url_for_hmac_with_plain_url(self, mock_url):
"""Test url_for_hmac without params"""
mock_url.return_value = "https://mock_url.com/ファイ/"
self.assertEqual(url_for_hmac("ファイ"),
"https://mock_url.com/ファイ/?hm=a62896a50d9ffcff7deb")
@mock.patch("utility.hmac.url_for")
def test_url_for_hmac_with_param_in_url(self, mock_url):
"""Test url_for_hmac with params"""
mock_url.return_value = "https://mock_url.com/?ファイ=1"
self.assertEqual(url_for_hmac("ファイ"),
"https://mock_url.com/?ファイ=1&hm=b2128fb28bc32da3b5b7")
|
<commit_before><commit_msg>Add tests for hmac utility
* wqflask/tests/utility/test_hmac.py: New tests.<commit_after>
|
# -*- coding: utf-8 -*-
"""Test hmac utility functions"""
import unittest
import mock
from utility.hmac import data_hmac
from utility.hmac import url_for_hmac
from utility.hmac import hmac_creation
class TestHmacUtil(unittest.TestCase):
"""Test Utility method for hmac creation"""
def test_hmac_creation(self):
"""Test hmac creation with a utf-8 string"""
self.assertEqual(hmac_creation("ファイ"), "21fa1d935bbbb07a7875")
def test_data_hmac(self):
"""Test data_hmac fn with a utf-8 string"""
self.assertEqual(data_hmac("ファイ"), "ファイ:21fa1d935bbbb07a7875")
@mock.patch("utility.hmac.url_for")
def test_url_for_hmac_with_plain_url(self, mock_url):
"""Test url_for_hmac without params"""
mock_url.return_value = "https://mock_url.com/ファイ/"
self.assertEqual(url_for_hmac("ファイ"),
"https://mock_url.com/ファイ/?hm=a62896a50d9ffcff7deb")
@mock.patch("utility.hmac.url_for")
def test_url_for_hmac_with_param_in_url(self, mock_url):
"""Test url_for_hmac with params"""
mock_url.return_value = "https://mock_url.com/?ファイ=1"
self.assertEqual(url_for_hmac("ファイ"),
"https://mock_url.com/?ファイ=1&hm=b2128fb28bc32da3b5b7")
|
Add tests for hmac utility
* wqflask/tests/utility/test_hmac.py: New tests.# -*- coding: utf-8 -*-
"""Test hmac utility functions"""
import unittest
import mock
from utility.hmac import data_hmac
from utility.hmac import url_for_hmac
from utility.hmac import hmac_creation
class TestHmacUtil(unittest.TestCase):
"""Test Utility method for hmac creation"""
def test_hmac_creation(self):
"""Test hmac creation with a utf-8 string"""
self.assertEqual(hmac_creation("ファイ"), "21fa1d935bbbb07a7875")
def test_data_hmac(self):
"""Test data_hmac fn with a utf-8 string"""
self.assertEqual(data_hmac("ファイ"), "ファイ:21fa1d935bbbb07a7875")
@mock.patch("utility.hmac.url_for")
def test_url_for_hmac_with_plain_url(self, mock_url):
"""Test url_for_hmac without params"""
mock_url.return_value = "https://mock_url.com/ファイ/"
self.assertEqual(url_for_hmac("ファイ"),
"https://mock_url.com/ファイ/?hm=a62896a50d9ffcff7deb")
@mock.patch("utility.hmac.url_for")
def test_url_for_hmac_with_param_in_url(self, mock_url):
"""Test url_for_hmac with params"""
mock_url.return_value = "https://mock_url.com/?ファイ=1"
self.assertEqual(url_for_hmac("ファイ"),
"https://mock_url.com/?ファイ=1&hm=b2128fb28bc32da3b5b7")
|
<commit_before><commit_msg>Add tests for hmac utility
* wqflask/tests/utility/test_hmac.py: New tests.<commit_after># -*- coding: utf-8 -*-
"""Test hmac utility functions"""
import unittest
import mock
from utility.hmac import data_hmac
from utility.hmac import url_for_hmac
from utility.hmac import hmac_creation
class TestHmacUtil(unittest.TestCase):
"""Test Utility method for hmac creation"""
def test_hmac_creation(self):
"""Test hmac creation with a utf-8 string"""
self.assertEqual(hmac_creation("ファイ"), "21fa1d935bbbb07a7875")
def test_data_hmac(self):
"""Test data_hmac fn with a utf-8 string"""
self.assertEqual(data_hmac("ファイ"), "ファイ:21fa1d935bbbb07a7875")
@mock.patch("utility.hmac.url_for")
def test_url_for_hmac_with_plain_url(self, mock_url):
"""Test url_for_hmac without params"""
mock_url.return_value = "https://mock_url.com/ファイ/"
self.assertEqual(url_for_hmac("ファイ"),
"https://mock_url.com/ファイ/?hm=a62896a50d9ffcff7deb")
@mock.patch("utility.hmac.url_for")
def test_url_for_hmac_with_param_in_url(self, mock_url):
"""Test url_for_hmac with params"""
mock_url.return_value = "https://mock_url.com/?ファイ=1"
self.assertEqual(url_for_hmac("ファイ"),
"https://mock_url.com/?ファイ=1&hm=b2128fb28bc32da3b5b7")
|
|
0eeb5baed7a4417ac083221805842019185ccec0
|
web_page_replay_go/PRESUBMIT.py
|
web_page_replay_go/PRESUBMIT.py
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting web_page_replay_go/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def _RunArgs(args, input_api, cwd):
p = input_api.subprocess.Popen(args, stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.STDOUT,
cwd=cwd)
out, _ = p.communicate()
return (out, p.returncode)
def _CommonChecks(input_api, output_api):
"""Performs common checks."""
results = []
if input_api.subprocess.call(
"go version",
shell=True,
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE) != 0:
results.append(output_api.PresubmitPromptOrNotify(
'go binary is not found. Make sure to run unit tests if you change any '
'Go files.'))
return results
wpr_dir = input_api.PresubmitLocalPath()
cmd = ['go', 'test', 'webpagereplay']
out, return_code = _RunArgs(cmd, input_api, wpr_dir)
if return_code:
results.append(output_api.PresubmitError(
'webpagereplay tests failed.', long_text=out))
print out
return results
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
|
Add a presubmit script to run tests
|
[wpr-go] Add a presubmit script to run tests
Eventually, we would like to run webpagereplay tests on catapult continuous
build bots. For the time being, we run them as a presubmit step.
This is patched from nednguyen@'s CL at http://crrev.com/2989573002#ps1.
BUG=catapult:#3669
Review-Url: https://codereview.chromium.org/2998733002
|
Python
|
bsd-3-clause
|
catapult-project/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult
|
[wpr-go] Add a presubmit script to run tests
Eventually, we would like to run webpagereplay tests on catapult continuous
build bots. For the time being, we run them as a presubmit step.
This is patched from nednguyen@'s CL at http://crrev.com/2989573002#ps1.
BUG=catapult:#3669
Review-Url: https://codereview.chromium.org/2998733002
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting web_page_replay_go/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def _RunArgs(args, input_api, cwd):
p = input_api.subprocess.Popen(args, stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.STDOUT,
cwd=cwd)
out, _ = p.communicate()
return (out, p.returncode)
def _CommonChecks(input_api, output_api):
"""Performs common checks."""
results = []
if input_api.subprocess.call(
"go version",
shell=True,
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE) != 0:
results.append(output_api.PresubmitPromptOrNotify(
'go binary is not found. Make sure to run unit tests if you change any '
'Go files.'))
return results
wpr_dir = input_api.PresubmitLocalPath()
cmd = ['go', 'test', 'webpagereplay']
out, return_code = _RunArgs(cmd, input_api, wpr_dir)
if return_code:
results.append(output_api.PresubmitError(
'webpagereplay tests failed.', long_text=out))
print out
return results
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
|
<commit_before><commit_msg>[wpr-go] Add a presubmit script to run tests
Eventually, we would like to run webpagereplay tests on catapult continuous
build bots. For the time being, we run them as a presubmit step.
This is patched from nednguyen@'s CL at http://crrev.com/2989573002#ps1.
BUG=catapult:#3669
Review-Url: https://codereview.chromium.org/2998733002<commit_after>
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting web_page_replay_go/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def _RunArgs(args, input_api, cwd):
p = input_api.subprocess.Popen(args, stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.STDOUT,
cwd=cwd)
out, _ = p.communicate()
return (out, p.returncode)
def _CommonChecks(input_api, output_api):
"""Performs common checks."""
results = []
if input_api.subprocess.call(
"go version",
shell=True,
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE) != 0:
results.append(output_api.PresubmitPromptOrNotify(
'go binary is not found. Make sure to run unit tests if you change any '
'Go files.'))
return results
wpr_dir = input_api.PresubmitLocalPath()
cmd = ['go', 'test', 'webpagereplay']
out, return_code = _RunArgs(cmd, input_api, wpr_dir)
if return_code:
results.append(output_api.PresubmitError(
'webpagereplay tests failed.', long_text=out))
print out
return results
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
|
[wpr-go] Add a presubmit script to run tests
Eventually, we would like to run webpagereplay tests on catapult continuous
build bots. For the time being, we run them as a presubmit step.
This is patched from nednguyen@'s CL at http://crrev.com/2989573002#ps1.
BUG=catapult:#3669
Review-Url: https://codereview.chromium.org/2998733002# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting web_page_replay_go/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def _RunArgs(args, input_api, cwd):
p = input_api.subprocess.Popen(args, stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.STDOUT,
cwd=cwd)
out, _ = p.communicate()
return (out, p.returncode)
def _CommonChecks(input_api, output_api):
"""Performs common checks."""
results = []
if input_api.subprocess.call(
"go version",
shell=True,
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE) != 0:
results.append(output_api.PresubmitPromptOrNotify(
'go binary is not found. Make sure to run unit tests if you change any '
'Go files.'))
return results
wpr_dir = input_api.PresubmitLocalPath()
cmd = ['go', 'test', 'webpagereplay']
out, return_code = _RunArgs(cmd, input_api, wpr_dir)
if return_code:
results.append(output_api.PresubmitError(
'webpagereplay tests failed.', long_text=out))
print out
return results
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
|
<commit_before><commit_msg>[wpr-go] Add a presubmit script to run tests
Eventually, we would like to run webpagereplay tests on catapult continuous
build bots. For the time being, we run them as a presubmit step.
This is patched from nednguyen@'s CL at http://crrev.com/2989573002#ps1.
BUG=catapult:#3669
Review-Url: https://codereview.chromium.org/2998733002<commit_after># Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting web_page_replay_go/.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def _RunArgs(args, input_api, cwd):
p = input_api.subprocess.Popen(args, stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.STDOUT,
cwd=cwd)
out, _ = p.communicate()
return (out, p.returncode)
def _CommonChecks(input_api, output_api):
"""Performs common checks."""
results = []
if input_api.subprocess.call(
"go version",
shell=True,
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE) != 0:
results.append(output_api.PresubmitPromptOrNotify(
'go binary is not found. Make sure to run unit tests if you change any '
'Go files.'))
return results
wpr_dir = input_api.PresubmitLocalPath()
cmd = ['go', 'test', 'webpagereplay']
out, return_code = _RunArgs(cmd, input_api, wpr_dir)
if return_code:
results.append(output_api.PresubmitError(
'webpagereplay tests failed.', long_text=out))
print out
return results
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
report.extend(_CommonChecks(input_api, output_api))
return report
|
|
a9853ca2bbaa5a1c3a36e0c03aa08a2279d1b1aa
|
hashing_passwords.py
|
hashing_passwords.py
|
# coding: utf8
"""
Securely hash and check passwords using PBKDF2.
Use random salts to protect againt rainbow tables, many iterations against
brute-force, and constant-time comparaison againt timing attacks.
Keep parameters to the algorithm together with the hash so that we can
change the parameters and keep older hashes working.
See more details at http://exyr.org/2011/hashing-passwords/
Author: Simon Sapin
License: BSD
"""
import hashlib
from os import urandom
from base64 import b64encode, b64decode
from itertools import izip
# From https://github.com/mitsuhiko/python-pbkdf2
from pbkdf2 import pbkdf2_bin
# Parameters to PBKDF2. Only affect new passwords.
SALT_LENGTH = 12
KEY_LENGTH = 24
HASH_FUNCTION = 'sha256' # Must be in hashlib.
# Linear to the hashing time. Adjust to be high but take a reasonable
# amount of time on your server. Measure with:
# python -m timeit -s 'import passwords as p' 'p.make_hash("something")'
COST_FACTOR = 10000
def make_hash(password):
"""Generate a random salt and return a new hash for the password."""
if isinstance(password, unicode):
password = password.encode('utf-8')
salt = b64encode(urandom(SALT_LENGTH))
return 'PBKDF2${}${}${}${}'.format(
HASH_FUNCTION,
COST_FACTOR,
salt,
b64encode(pbkdf2_bin(password, salt, COST_FACTOR, KEY_LENGTH,
getattr(hashlib, HASH_FUNCTION))))
def check_hash(password, hash_):
"""Check a password against an existing hash."""
if isinstance(password, unicode):
password = password.encode('utf-8')
algorithm, hash_function, cost_factor, salt, hash_a = hash_.split('$')
assert algorithm == 'PBKDF2'
hash_a = b64decode(hash_a)
hash_b = pbkdf2_bin(password, salt, int(cost_factor), len(hash_a),
getattr(hashlib, hash_function))
assert len(hash_a) == len(hash_b) # we requested this from pbkdf2_bin()
# Same as "return hash_a == hash_b" but takes a constant time.
# See http://carlos.bueno.org/2011/10/timing.html
diff = 0
for char_a, char_b in izip(hash_a, hash_b):
diff |= ord(char_a) ^ ord(char_b)
return diff == 0
|
Add some glue code to hash salted passwords wit PBKDF2.
|
Add some glue code to hash salted passwords wit PBKDF2.
|
Python
|
bsd-3-clause
|
SimonSapin/snippets,SimonSapin/snippets
|
Add some glue code to hash salted passwords wit PBKDF2.
|
# coding: utf8
"""
Securely hash and check passwords using PBKDF2.
Use random salts to protect againt rainbow tables, many iterations against
brute-force, and constant-time comparaison againt timing attacks.
Keep parameters to the algorithm together with the hash so that we can
change the parameters and keep older hashes working.
See more details at http://exyr.org/2011/hashing-passwords/
Author: Simon Sapin
License: BSD
"""
import hashlib
from os import urandom
from base64 import b64encode, b64decode
from itertools import izip
# From https://github.com/mitsuhiko/python-pbkdf2
from pbkdf2 import pbkdf2_bin
# Parameters to PBKDF2. Only affect new passwords.
SALT_LENGTH = 12
KEY_LENGTH = 24
HASH_FUNCTION = 'sha256' # Must be in hashlib.
# Linear to the hashing time. Adjust to be high but take a reasonable
# amount of time on your server. Measure with:
# python -m timeit -s 'import passwords as p' 'p.make_hash("something")'
COST_FACTOR = 10000
def make_hash(password):
"""Generate a random salt and return a new hash for the password."""
if isinstance(password, unicode):
password = password.encode('utf-8')
salt = b64encode(urandom(SALT_LENGTH))
return 'PBKDF2${}${}${}${}'.format(
HASH_FUNCTION,
COST_FACTOR,
salt,
b64encode(pbkdf2_bin(password, salt, COST_FACTOR, KEY_LENGTH,
getattr(hashlib, HASH_FUNCTION))))
def check_hash(password, hash_):
"""Check a password against an existing hash."""
if isinstance(password, unicode):
password = password.encode('utf-8')
algorithm, hash_function, cost_factor, salt, hash_a = hash_.split('$')
assert algorithm == 'PBKDF2'
hash_a = b64decode(hash_a)
hash_b = pbkdf2_bin(password, salt, int(cost_factor), len(hash_a),
getattr(hashlib, hash_function))
assert len(hash_a) == len(hash_b) # we requested this from pbkdf2_bin()
# Same as "return hash_a == hash_b" but takes a constant time.
# See http://carlos.bueno.org/2011/10/timing.html
diff = 0
for char_a, char_b in izip(hash_a, hash_b):
diff |= ord(char_a) ^ ord(char_b)
return diff == 0
|
<commit_before><commit_msg>Add some glue code to hash salted passwords wit PBKDF2.<commit_after>
|
# coding: utf8
"""
Securely hash and check passwords using PBKDF2.
Use random salts to protect againt rainbow tables, many iterations against
brute-force, and constant-time comparaison againt timing attacks.
Keep parameters to the algorithm together with the hash so that we can
change the parameters and keep older hashes working.
See more details at http://exyr.org/2011/hashing-passwords/
Author: Simon Sapin
License: BSD
"""
import hashlib
from os import urandom
from base64 import b64encode, b64decode
from itertools import izip
# From https://github.com/mitsuhiko/python-pbkdf2
from pbkdf2 import pbkdf2_bin
# Parameters to PBKDF2. Only affect new passwords.
SALT_LENGTH = 12
KEY_LENGTH = 24
HASH_FUNCTION = 'sha256' # Must be in hashlib.
# Linear to the hashing time. Adjust to be high but take a reasonable
# amount of time on your server. Measure with:
# python -m timeit -s 'import passwords as p' 'p.make_hash("something")'
COST_FACTOR = 10000
def make_hash(password):
"""Generate a random salt and return a new hash for the password."""
if isinstance(password, unicode):
password = password.encode('utf-8')
salt = b64encode(urandom(SALT_LENGTH))
return 'PBKDF2${}${}${}${}'.format(
HASH_FUNCTION,
COST_FACTOR,
salt,
b64encode(pbkdf2_bin(password, salt, COST_FACTOR, KEY_LENGTH,
getattr(hashlib, HASH_FUNCTION))))
def check_hash(password, hash_):
"""Check a password against an existing hash."""
if isinstance(password, unicode):
password = password.encode('utf-8')
algorithm, hash_function, cost_factor, salt, hash_a = hash_.split('$')
assert algorithm == 'PBKDF2'
hash_a = b64decode(hash_a)
hash_b = pbkdf2_bin(password, salt, int(cost_factor), len(hash_a),
getattr(hashlib, hash_function))
assert len(hash_a) == len(hash_b) # we requested this from pbkdf2_bin()
# Same as "return hash_a == hash_b" but takes a constant time.
# See http://carlos.bueno.org/2011/10/timing.html
diff = 0
for char_a, char_b in izip(hash_a, hash_b):
diff |= ord(char_a) ^ ord(char_b)
return diff == 0
|
Add some glue code to hash salted passwords wit PBKDF2.# coding: utf8
"""
Securely hash and check passwords using PBKDF2.
Use random salts to protect againt rainbow tables, many iterations against
brute-force, and constant-time comparaison againt timing attacks.
Keep parameters to the algorithm together with the hash so that we can
change the parameters and keep older hashes working.
See more details at http://exyr.org/2011/hashing-passwords/
Author: Simon Sapin
License: BSD
"""
import hashlib
from os import urandom
from base64 import b64encode, b64decode
from itertools import izip
# From https://github.com/mitsuhiko/python-pbkdf2
from pbkdf2 import pbkdf2_bin
# Parameters to PBKDF2. Only affect new passwords.
SALT_LENGTH = 12
KEY_LENGTH = 24
HASH_FUNCTION = 'sha256' # Must be in hashlib.
# Linear to the hashing time. Adjust to be high but take a reasonable
# amount of time on your server. Measure with:
# python -m timeit -s 'import passwords as p' 'p.make_hash("something")'
COST_FACTOR = 10000
def make_hash(password):
"""Generate a random salt and return a new hash for the password."""
if isinstance(password, unicode):
password = password.encode('utf-8')
salt = b64encode(urandom(SALT_LENGTH))
return 'PBKDF2${}${}${}${}'.format(
HASH_FUNCTION,
COST_FACTOR,
salt,
b64encode(pbkdf2_bin(password, salt, COST_FACTOR, KEY_LENGTH,
getattr(hashlib, HASH_FUNCTION))))
def check_hash(password, hash_):
"""Check a password against an existing hash."""
if isinstance(password, unicode):
password = password.encode('utf-8')
algorithm, hash_function, cost_factor, salt, hash_a = hash_.split('$')
assert algorithm == 'PBKDF2'
hash_a = b64decode(hash_a)
hash_b = pbkdf2_bin(password, salt, int(cost_factor), len(hash_a),
getattr(hashlib, hash_function))
assert len(hash_a) == len(hash_b) # we requested this from pbkdf2_bin()
# Same as "return hash_a == hash_b" but takes a constant time.
# See http://carlos.bueno.org/2011/10/timing.html
diff = 0
for char_a, char_b in izip(hash_a, hash_b):
diff |= ord(char_a) ^ ord(char_b)
return diff == 0
|
<commit_before><commit_msg>Add some glue code to hash salted passwords wit PBKDF2.<commit_after># coding: utf8
"""
Securely hash and check passwords using PBKDF2.
Use random salts to protect againt rainbow tables, many iterations against
brute-force, and constant-time comparaison againt timing attacks.
Keep parameters to the algorithm together with the hash so that we can
change the parameters and keep older hashes working.
See more details at http://exyr.org/2011/hashing-passwords/
Author: Simon Sapin
License: BSD
"""
import hashlib
from os import urandom
from base64 import b64encode, b64decode
from itertools import izip
# From https://github.com/mitsuhiko/python-pbkdf2
from pbkdf2 import pbkdf2_bin
# Parameters to PBKDF2. Only affect new passwords.
SALT_LENGTH = 12
KEY_LENGTH = 24
HASH_FUNCTION = 'sha256' # Must be in hashlib.
# Linear to the hashing time. Adjust to be high but take a reasonable
# amount of time on your server. Measure with:
# python -m timeit -s 'import passwords as p' 'p.make_hash("something")'
COST_FACTOR = 10000
def make_hash(password):
"""Generate a random salt and return a new hash for the password."""
if isinstance(password, unicode):
password = password.encode('utf-8')
salt = b64encode(urandom(SALT_LENGTH))
return 'PBKDF2${}${}${}${}'.format(
HASH_FUNCTION,
COST_FACTOR,
salt,
b64encode(pbkdf2_bin(password, salt, COST_FACTOR, KEY_LENGTH,
getattr(hashlib, HASH_FUNCTION))))
def check_hash(password, hash_):
"""Check a password against an existing hash."""
if isinstance(password, unicode):
password = password.encode('utf-8')
algorithm, hash_function, cost_factor, salt, hash_a = hash_.split('$')
assert algorithm == 'PBKDF2'
hash_a = b64decode(hash_a)
hash_b = pbkdf2_bin(password, salt, int(cost_factor), len(hash_a),
getattr(hashlib, hash_function))
assert len(hash_a) == len(hash_b) # we requested this from pbkdf2_bin()
# Same as "return hash_a == hash_b" but takes a constant time.
# See http://carlos.bueno.org/2011/10/timing.html
diff = 0
for char_a, char_b in izip(hash_a, hash_b):
diff |= ord(char_a) ^ ord(char_b)
return diff == 0
|
|
9074375cd27de596eb5d697263b0a1096725a3a8
|
hbase/hbase/tests.py
|
hbase/hbase/tests.py
|
# -*- coding: utf-8 -*-
from hbase.compat import PY26
from hbase.compat import comp_bytes
import mock
if PY26: # pragma: no cover
import unittest2 as unittest
else:
import unittest
class TableListTests(unittest.TestCase):
def _make_one(self):
from hbase import HBase
return HBase('localhost', '8080')
@mock.patch("hbase.compat.urlopen")
def test_query_tables_list(self, urlopen):
urlopen().readall.return_value = b'{"table":[{"name":"test"}]}'
hb_con = self._make_one()
self.assertEqual(list(hb_con.get_tables()), ['test'])
@mock.patch("hbase.compat.urlopen")
def test_query_version_information(self, urlopen):
urlopen().readall.return_value = comp_bytes(
','.join(['{"JVM":"Oracle Corporation 1.7.0_07-23.3-b01"',
'"Jersey":"1.8"',
'"OS":"Linux 3.4.2-x86_64-linode25 amd64"',
'"REST":"0.0.2","Server":"jetty/6.1.26"}']), 'utf-8')
hb_con = self._make_one()
self.assertEqual(hb_con.get_version(),
{"JVM": "Oracle Corporation 1.7.0_07-23.3-b01",
"Jersey": "1.8",
"OS": "Linux 3.4.2-x86_64-linode25 amd64",
"REST": "0.0.2",
"Server": "jetty/6.1.26"})
|
Revert "haven't found a sensible way to test things"
|
Revert "haven't found a sensible way to test things"
This reverts commit 80821d7f46e0072f9df586a14b3d69fc85c216e1.
|
Python
|
bsd-3-clause
|
fuzzy-id/midas,fuzzy-id/midas,fuzzy-id/midas
|
Revert "haven't found a sensible way to test things"
This reverts commit 80821d7f46e0072f9df586a14b3d69fc85c216e1.
|
# -*- coding: utf-8 -*-
from hbase.compat import PY26
from hbase.compat import comp_bytes
import mock
if PY26: # pragma: no cover
import unittest2 as unittest
else:
import unittest
class TableListTests(unittest.TestCase):
def _make_one(self):
from hbase import HBase
return HBase('localhost', '8080')
@mock.patch("hbase.compat.urlopen")
def test_query_tables_list(self, urlopen):
urlopen().readall.return_value = b'{"table":[{"name":"test"}]}'
hb_con = self._make_one()
self.assertEqual(list(hb_con.get_tables()), ['test'])
@mock.patch("hbase.compat.urlopen")
def test_query_version_information(self, urlopen):
urlopen().readall.return_value = comp_bytes(
','.join(['{"JVM":"Oracle Corporation 1.7.0_07-23.3-b01"',
'"Jersey":"1.8"',
'"OS":"Linux 3.4.2-x86_64-linode25 amd64"',
'"REST":"0.0.2","Server":"jetty/6.1.26"}']), 'utf-8')
hb_con = self._make_one()
self.assertEqual(hb_con.get_version(),
{"JVM": "Oracle Corporation 1.7.0_07-23.3-b01",
"Jersey": "1.8",
"OS": "Linux 3.4.2-x86_64-linode25 amd64",
"REST": "0.0.2",
"Server": "jetty/6.1.26"})
|
<commit_before><commit_msg>Revert "haven't found a sensible way to test things"
This reverts commit 80821d7f46e0072f9df586a14b3d69fc85c216e1.<commit_after>
|
# -*- coding: utf-8 -*-
from hbase.compat import PY26
from hbase.compat import comp_bytes
import mock
if PY26: # pragma: no cover
import unittest2 as unittest
else:
import unittest
class TableListTests(unittest.TestCase):
def _make_one(self):
from hbase import HBase
return HBase('localhost', '8080')
@mock.patch("hbase.compat.urlopen")
def test_query_tables_list(self, urlopen):
urlopen().readall.return_value = b'{"table":[{"name":"test"}]}'
hb_con = self._make_one()
self.assertEqual(list(hb_con.get_tables()), ['test'])
@mock.patch("hbase.compat.urlopen")
def test_query_version_information(self, urlopen):
urlopen().readall.return_value = comp_bytes(
','.join(['{"JVM":"Oracle Corporation 1.7.0_07-23.3-b01"',
'"Jersey":"1.8"',
'"OS":"Linux 3.4.2-x86_64-linode25 amd64"',
'"REST":"0.0.2","Server":"jetty/6.1.26"}']), 'utf-8')
hb_con = self._make_one()
self.assertEqual(hb_con.get_version(),
{"JVM": "Oracle Corporation 1.7.0_07-23.3-b01",
"Jersey": "1.8",
"OS": "Linux 3.4.2-x86_64-linode25 amd64",
"REST": "0.0.2",
"Server": "jetty/6.1.26"})
|
Revert "haven't found a sensible way to test things"
This reverts commit 80821d7f46e0072f9df586a14b3d69fc85c216e1.# -*- coding: utf-8 -*-
from hbase.compat import PY26
from hbase.compat import comp_bytes
import mock
if PY26: # pragma: no cover
import unittest2 as unittest
else:
import unittest
class TableListTests(unittest.TestCase):
def _make_one(self):
from hbase import HBase
return HBase('localhost', '8080')
@mock.patch("hbase.compat.urlopen")
def test_query_tables_list(self, urlopen):
urlopen().readall.return_value = b'{"table":[{"name":"test"}]}'
hb_con = self._make_one()
self.assertEqual(list(hb_con.get_tables()), ['test'])
@mock.patch("hbase.compat.urlopen")
def test_query_version_information(self, urlopen):
urlopen().readall.return_value = comp_bytes(
','.join(['{"JVM":"Oracle Corporation 1.7.0_07-23.3-b01"',
'"Jersey":"1.8"',
'"OS":"Linux 3.4.2-x86_64-linode25 amd64"',
'"REST":"0.0.2","Server":"jetty/6.1.26"}']), 'utf-8')
hb_con = self._make_one()
self.assertEqual(hb_con.get_version(),
{"JVM": "Oracle Corporation 1.7.0_07-23.3-b01",
"Jersey": "1.8",
"OS": "Linux 3.4.2-x86_64-linode25 amd64",
"REST": "0.0.2",
"Server": "jetty/6.1.26"})
|
<commit_before><commit_msg>Revert "haven't found a sensible way to test things"
This reverts commit 80821d7f46e0072f9df586a14b3d69fc85c216e1.<commit_after># -*- coding: utf-8 -*-
from hbase.compat import PY26
from hbase.compat import comp_bytes
import mock
if PY26: # pragma: no cover
import unittest2 as unittest
else:
import unittest
class TableListTests(unittest.TestCase):
def _make_one(self):
from hbase import HBase
return HBase('localhost', '8080')
@mock.patch("hbase.compat.urlopen")
def test_query_tables_list(self, urlopen):
urlopen().readall.return_value = b'{"table":[{"name":"test"}]}'
hb_con = self._make_one()
self.assertEqual(list(hb_con.get_tables()), ['test'])
@mock.patch("hbase.compat.urlopen")
def test_query_version_information(self, urlopen):
urlopen().readall.return_value = comp_bytes(
','.join(['{"JVM":"Oracle Corporation 1.7.0_07-23.3-b01"',
'"Jersey":"1.8"',
'"OS":"Linux 3.4.2-x86_64-linode25 amd64"',
'"REST":"0.0.2","Server":"jetty/6.1.26"}']), 'utf-8')
hb_con = self._make_one()
self.assertEqual(hb_con.get_version(),
{"JVM": "Oracle Corporation 1.7.0_07-23.3-b01",
"Jersey": "1.8",
"OS": "Linux 3.4.2-x86_64-linode25 amd64",
"REST": "0.0.2",
"Server": "jetty/6.1.26"})
|
|
3090f80fb75e28d76a2a9f5e25c507d095a695c8
|
middleware/python/test_auth_middleware.py
|
middleware/python/test_auth_middleware.py
|
from tyk.decorators import *
from gateway import TykGateway as tyk
@CustomKeyCheck
def MyKeyCheck(request, session, metadata, spec):
print("Running MyKeyCheck?")
print("request:", request)
print("session:", session)
print("spec:", spec)
valid_token = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
request_token = request.get_header('Authorization')
if request_token == valid_token:
print("Token is OK")
session.rate = 1000
session.per = 1
metadata['token'] = "mytoken"
else:
print("Token is WRONG")
request.return_overrides = { 'response_code': 401, 'response_error': 'Not authorized (by the Python middleware)' }
return request, session, metadata
|
from tyk.decorators import *
from gateway import TykGateway as tyk
@CustomKeyCheck
def MyKeyCheck(request, session, metadata, spec):
print("Running MyKeyCheck?")
print("request:", request)
print("session:", session)
print("spec:", spec)
valid_token = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
request_token = request.get_header('Authorization')
print("(python) request_token =", request_token)
if request_token == valid_token:
print("Token is OK")
session.rate = 1000.0
session.per = 1.0
metadata['token'] = "mytoken"
else:
print("Token is WRONG")
request.return_overrides = { 'response_code': 401, 'response_error': 'Not authorized (by the Python middleware)' }
return request, session, metadata
|
Use float in session fields
|
Use float in session fields
|
Python
|
mpl-2.0
|
nebolsin/tyk,nebolsin/tyk,mvdan/tyk,lonelycode/tyk,mvdan/tyk,lonelycode/tyk,nebolsin/tyk,nebolsin/tyk,mvdan/tyk,mvdan/tyk,mvdan/tyk,mvdan/tyk,lonelycode/tyk,nebolsin/tyk,mvdan/tyk,nebolsin/tyk,nebolsin/tyk,nebolsin/tyk,mvdan/tyk
|
from tyk.decorators import *
from gateway import TykGateway as tyk
@CustomKeyCheck
def MyKeyCheck(request, session, metadata, spec):
print("Running MyKeyCheck?")
print("request:", request)
print("session:", session)
print("spec:", spec)
valid_token = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
request_token = request.get_header('Authorization')
if request_token == valid_token:
print("Token is OK")
session.rate = 1000
session.per = 1
metadata['token'] = "mytoken"
else:
print("Token is WRONG")
request.return_overrides = { 'response_code': 401, 'response_error': 'Not authorized (by the Python middleware)' }
return request, session, metadata
Use float in session fields
|
from tyk.decorators import *
from gateway import TykGateway as tyk
@CustomKeyCheck
def MyKeyCheck(request, session, metadata, spec):
print("Running MyKeyCheck?")
print("request:", request)
print("session:", session)
print("spec:", spec)
valid_token = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
request_token = request.get_header('Authorization')
print("(python) request_token =", request_token)
if request_token == valid_token:
print("Token is OK")
session.rate = 1000.0
session.per = 1.0
metadata['token'] = "mytoken"
else:
print("Token is WRONG")
request.return_overrides = { 'response_code': 401, 'response_error': 'Not authorized (by the Python middleware)' }
return request, session, metadata
|
<commit_before>from tyk.decorators import *
from gateway import TykGateway as tyk
@CustomKeyCheck
def MyKeyCheck(request, session, metadata, spec):
print("Running MyKeyCheck?")
print("request:", request)
print("session:", session)
print("spec:", spec)
valid_token = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
request_token = request.get_header('Authorization')
if request_token == valid_token:
print("Token is OK")
session.rate = 1000
session.per = 1
metadata['token'] = "mytoken"
else:
print("Token is WRONG")
request.return_overrides = { 'response_code': 401, 'response_error': 'Not authorized (by the Python middleware)' }
return request, session, metadata
<commit_msg>Use float in session fields<commit_after>
|
from tyk.decorators import *
from gateway import TykGateway as tyk
@CustomKeyCheck
def MyKeyCheck(request, session, metadata, spec):
print("Running MyKeyCheck?")
print("request:", request)
print("session:", session)
print("spec:", spec)
valid_token = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
request_token = request.get_header('Authorization')
print("(python) request_token =", request_token)
if request_token == valid_token:
print("Token is OK")
session.rate = 1000.0
session.per = 1.0
metadata['token'] = "mytoken"
else:
print("Token is WRONG")
request.return_overrides = { 'response_code': 401, 'response_error': 'Not authorized (by the Python middleware)' }
return request, session, metadata
|
from tyk.decorators import *
from gateway import TykGateway as tyk
@CustomKeyCheck
def MyKeyCheck(request, session, metadata, spec):
print("Running MyKeyCheck?")
print("request:", request)
print("session:", session)
print("spec:", spec)
valid_token = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
request_token = request.get_header('Authorization')
if request_token == valid_token:
print("Token is OK")
session.rate = 1000
session.per = 1
metadata['token'] = "mytoken"
else:
print("Token is WRONG")
request.return_overrides = { 'response_code': 401, 'response_error': 'Not authorized (by the Python middleware)' }
return request, session, metadata
Use float in session fieldsfrom tyk.decorators import *
from gateway import TykGateway as tyk
@CustomKeyCheck
def MyKeyCheck(request, session, metadata, spec):
print("Running MyKeyCheck?")
print("request:", request)
print("session:", session)
print("spec:", spec)
valid_token = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
request_token = request.get_header('Authorization')
print("(python) request_token =", request_token)
if request_token == valid_token:
print("Token is OK")
session.rate = 1000.0
session.per = 1.0
metadata['token'] = "mytoken"
else:
print("Token is WRONG")
request.return_overrides = { 'response_code': 401, 'response_error': 'Not authorized (by the Python middleware)' }
return request, session, metadata
|
<commit_before>from tyk.decorators import *
from gateway import TykGateway as tyk
@CustomKeyCheck
def MyKeyCheck(request, session, metadata, spec):
print("Running MyKeyCheck?")
print("request:", request)
print("session:", session)
print("spec:", spec)
valid_token = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
request_token = request.get_header('Authorization')
if request_token == valid_token:
print("Token is OK")
session.rate = 1000
session.per = 1
metadata['token'] = "mytoken"
else:
print("Token is WRONG")
request.return_overrides = { 'response_code': 401, 'response_error': 'Not authorized (by the Python middleware)' }
return request, session, metadata
<commit_msg>Use float in session fields<commit_after>from tyk.decorators import *
from gateway import TykGateway as tyk
@CustomKeyCheck
def MyKeyCheck(request, session, metadata, spec):
print("Running MyKeyCheck?")
print("request:", request)
print("session:", session)
print("spec:", spec)
valid_token = 'aaf4c61ddcc5e8a2dabede0f3b482cd9aea9434d'
request_token = request.get_header('Authorization')
print("(python) request_token =", request_token)
if request_token == valid_token:
print("Token is OK")
session.rate = 1000.0
session.per = 1.0
metadata['token'] = "mytoken"
else:
print("Token is WRONG")
request.return_overrides = { 'response_code': 401, 'response_error': 'Not authorized (by the Python middleware)' }
return request, session, metadata
|
8f70af55a1cbe58eea64ca8b40b0f8d67d6a0b8b
|
migrations/versions/032_tighten_easuid.py
|
migrations/versions/032_tighten_easuid.py
|
"""Tighten EAS constraints and fix easfoldersync state enum.
Revision ID: 3f96e92953e1
Revises: 55f0ff54c776
Create Date: 2014-05-21 17:43:44.556716
"""
# revision identifiers, used by Alembic.
revision = '3f96e92953e1'
down_revision = '55f0ff54c776'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
from inbox.server.models import engine
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial keyinvalid',
'poll', 'poll keyinvalid', 'finish'),
existing_nullable=False,
server_default='initial')
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=False)
def downgrade():
from inbox.server.models import engine
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial uidinvalid',
'poll', 'poll uidinvalid', 'finish'),
existing_nullable=False)
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=True)
|
Fix nullable constraints on EASUid and EASFoldersync state enum.
|
[SCHEMA] Fix nullable constraints on EASUid and EASFoldersync state enum.
Summary:
The former we tightened on Gmail at one point and didn't trickle it back
to EAS. The latter is a bug from way back.
We really need to figure out a better deal for what to do with EAS-only
migrations, but putting it here for now.
Depends on D153
Test Plan: Upgrade/downgrade.
Reviewers: kav-ya
Reviewed By: kav-ya
Differential Revision: https://review.inboxapp.com/D154
|
Python
|
agpl-3.0
|
ErinCall/sync-engine,jobscore/sync-engine,gale320/sync-engine,Eagles2F/sync-engine,nylas/sync-engine,Eagles2F/sync-engine,rmasters/inbox,gale320/sync-engine,Eagles2F/sync-engine,rmasters/inbox,PriviPK/privipk-sync-engine,gale320/sync-engine,nylas/sync-engine,Eagles2F/sync-engine,gale320/sync-engine,closeio/nylas,EthanBlackburn/sync-engine,wakermahmud/sync-engine,nylas/sync-engine,rmasters/inbox,EthanBlackburn/sync-engine,EthanBlackburn/sync-engine,PriviPK/privipk-sync-engine,ErinCall/sync-engine,PriviPK/privipk-sync-engine,gale320/sync-engine,ErinCall/sync-engine,wakermahmud/sync-engine,jobscore/sync-engine,wakermahmud/sync-engine,closeio/nylas,wakermahmud/sync-engine,nylas/sync-engine,Eagles2F/sync-engine,jobscore/sync-engine,ErinCall/sync-engine,EthanBlackburn/sync-engine,PriviPK/privipk-sync-engine,jobscore/sync-engine,EthanBlackburn/sync-engine,wakermahmud/sync-engine,closeio/nylas,closeio/nylas,ErinCall/sync-engine,PriviPK/privipk-sync-engine,rmasters/inbox
|
[SCHEMA] Fix nullable constraints on EASUid and EASFoldersync state enum.
Summary:
The former we tightened on Gmail at one point and didn't trickle it back
to EAS. The latter is a bug from way back.
We really need to figure out a better deal for what to do with EAS-only
migrations, but putting it here for now.
Depends on D153
Test Plan: Upgrade/downgrade.
Reviewers: kav-ya
Reviewed By: kav-ya
Differential Revision: https://review.inboxapp.com/D154
|
"""Tighten EAS constraints and fix easfoldersync state enum.
Revision ID: 3f96e92953e1
Revises: 55f0ff54c776
Create Date: 2014-05-21 17:43:44.556716
"""
# revision identifiers, used by Alembic.
revision = '3f96e92953e1'
down_revision = '55f0ff54c776'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
from inbox.server.models import engine
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial keyinvalid',
'poll', 'poll keyinvalid', 'finish'),
existing_nullable=False,
server_default='initial')
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=False)
def downgrade():
from inbox.server.models import engine
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial uidinvalid',
'poll', 'poll uidinvalid', 'finish'),
existing_nullable=False)
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=True)
|
<commit_before><commit_msg>[SCHEMA] Fix nullable constraints on EASUid and EASFoldersync state enum.
Summary:
The former we tightened on Gmail at one point and didn't trickle it back
to EAS. The latter is a bug from way back.
We really need to figure out a better deal for what to do with EAS-only
migrations, but putting it here for now.
Depends on D153
Test Plan: Upgrade/downgrade.
Reviewers: kav-ya
Reviewed By: kav-ya
Differential Revision: https://review.inboxapp.com/D154<commit_after>
|
"""Tighten EAS constraints and fix easfoldersync state enum.
Revision ID: 3f96e92953e1
Revises: 55f0ff54c776
Create Date: 2014-05-21 17:43:44.556716
"""
# revision identifiers, used by Alembic.
revision = '3f96e92953e1'
down_revision = '55f0ff54c776'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
from inbox.server.models import engine
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial keyinvalid',
'poll', 'poll keyinvalid', 'finish'),
existing_nullable=False,
server_default='initial')
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=False)
def downgrade():
from inbox.server.models import engine
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial uidinvalid',
'poll', 'poll uidinvalid', 'finish'),
existing_nullable=False)
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=True)
|
[SCHEMA] Fix nullable constraints on EASUid and EASFoldersync state enum.
Summary:
The former we tightened on Gmail at one point and didn't trickle it back
to EAS. The latter is a bug from way back.
We really need to figure out a better deal for what to do with EAS-only
migrations, but putting it here for now.
Depends on D153
Test Plan: Upgrade/downgrade.
Reviewers: kav-ya
Reviewed By: kav-ya
Differential Revision: https://review.inboxapp.com/D154"""Tighten EAS constraints and fix easfoldersync state enum.
Revision ID: 3f96e92953e1
Revises: 55f0ff54c776
Create Date: 2014-05-21 17:43:44.556716
"""
# revision identifiers, used by Alembic.
revision = '3f96e92953e1'
down_revision = '55f0ff54c776'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
from inbox.server.models import engine
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial keyinvalid',
'poll', 'poll keyinvalid', 'finish'),
existing_nullable=False,
server_default='initial')
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=False)
def downgrade():
from inbox.server.models import engine
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial uidinvalid',
'poll', 'poll uidinvalid', 'finish'),
existing_nullable=False)
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=True)
|
<commit_before><commit_msg>[SCHEMA] Fix nullable constraints on EASUid and EASFoldersync state enum.
Summary:
The former we tightened on Gmail at one point and didn't trickle it back
to EAS. The latter is a bug from way back.
We really need to figure out a better deal for what to do with EAS-only
migrations, but putting it here for now.
Depends on D153
Test Plan: Upgrade/downgrade.
Reviewers: kav-ya
Reviewed By: kav-ya
Differential Revision: https://review.inboxapp.com/D154<commit_after>"""Tighten EAS constraints and fix easfoldersync state enum.
Revision ID: 3f96e92953e1
Revises: 55f0ff54c776
Create Date: 2014-05-21 17:43:44.556716
"""
# revision identifiers, used by Alembic.
revision = '3f96e92953e1'
down_revision = '55f0ff54c776'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
def upgrade():
from inbox.server.models import engine
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial keyinvalid',
'poll', 'poll keyinvalid', 'finish'),
existing_nullable=False,
server_default='initial')
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=False)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=False)
def downgrade():
from inbox.server.models import engine
Base = declarative_base()
Base.metadata.reflect(engine)
if 'easfoldersync' in Base.metadata.tables:
op.alter_column('easfoldersync', 'state',
type_=sa.Enum('initial', 'initial uidinvalid',
'poll', 'poll uidinvalid', 'finish'),
existing_nullable=False)
if 'easuid' in Base.metadata.tables:
op.alter_column('easuid', 'message_id', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'fld_uid', existing_type=sa.Integer(),
nullable=True)
op.alter_column('easuid', 'msg_uid', existing_type=sa.Integer(),
nullable=True)
|
|
4e2120a7cc2429729436c6a8c4fda39936ce3523
|
osfclient/tests/test_file.py
|
osfclient/tests/test_file.py
|
from unittest.mock import patch
from osfclient.models import OSFCore
from osfclient.models import File
from osfclient.models import Folder
from osfclient.tests import fake_responses
from osfclient.tests.mocks import FakeResponse
_files_url = 'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123'
@patch.object(OSFCore, '_get')
def test_iterate_files(OSFCore_get):
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
['foo/hello.txt', 'foo/bye.txt'])
response = FakeResponse(200, json)
OSFCore_get.return_value = response
files = list(store.files)
assert len(files) == 2
for file_ in files:
assert isinstance(file_, File)
assert file_.session == store.session
OSFCore_get.assert_called_once_with(
'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123')
@patch.object(OSFCore, '_get')
def test_iterate_folders(OSFCore_get):
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
folder_names=['foo/bar', 'foo/baz'])
response = FakeResponse(200, json)
OSFCore_get.return_value = response
folders = list(store.folders)
assert len(folders) == 2
for folder in folders:
assert isinstance(folder, Folder)
assert folder.session == store.session
assert folder.name in ('foo/bar', 'foo/baz')
OSFCore_get.assert_called_once_with(
'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123')
def test_iterate_files_and_folders():
# check we do not attempt to recurse into the subfolders
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
file_names=['hello.txt', 'bye.txt'],
folder_names=['bar'])
top_level_response = FakeResponse(200, json)
def simple_OSFCore_get(url):
if url == store._files_url:
return top_level_response
else:
print(url)
raise ValueError()
with patch.object(OSFCore, '_get',
side_effect=simple_OSFCore_get) as mock_osf_get:
files = list(store.files)
assert len(files) == 2
for file_ in files:
assert isinstance(file_, File)
assert file_.session == store.session
assert file_.name in ('hello.txt', 'bye.txt')
# check we did not try to recurse into subfolders
expected = [((_files_url,),)]
assert mock_osf_get.call_args_list == expected
|
Add test of Folder instances
|
Add test of Folder instances
|
Python
|
bsd-3-clause
|
betatim/osf-cli,betatim/osf-cli
|
Add test of Folder instances
|
from unittest.mock import patch
from osfclient.models import OSFCore
from osfclient.models import File
from osfclient.models import Folder
from osfclient.tests import fake_responses
from osfclient.tests.mocks import FakeResponse
_files_url = 'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123'
@patch.object(OSFCore, '_get')
def test_iterate_files(OSFCore_get):
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
['foo/hello.txt', 'foo/bye.txt'])
response = FakeResponse(200, json)
OSFCore_get.return_value = response
files = list(store.files)
assert len(files) == 2
for file_ in files:
assert isinstance(file_, File)
assert file_.session == store.session
OSFCore_get.assert_called_once_with(
'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123')
@patch.object(OSFCore, '_get')
def test_iterate_folders(OSFCore_get):
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
folder_names=['foo/bar', 'foo/baz'])
response = FakeResponse(200, json)
OSFCore_get.return_value = response
folders = list(store.folders)
assert len(folders) == 2
for folder in folders:
assert isinstance(folder, Folder)
assert folder.session == store.session
assert folder.name in ('foo/bar', 'foo/baz')
OSFCore_get.assert_called_once_with(
'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123')
def test_iterate_files_and_folders():
# check we do not attempt to recurse into the subfolders
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
file_names=['hello.txt', 'bye.txt'],
folder_names=['bar'])
top_level_response = FakeResponse(200, json)
def simple_OSFCore_get(url):
if url == store._files_url:
return top_level_response
else:
print(url)
raise ValueError()
with patch.object(OSFCore, '_get',
side_effect=simple_OSFCore_get) as mock_osf_get:
files = list(store.files)
assert len(files) == 2
for file_ in files:
assert isinstance(file_, File)
assert file_.session == store.session
assert file_.name in ('hello.txt', 'bye.txt')
# check we did not try to recurse into subfolders
expected = [((_files_url,),)]
assert mock_osf_get.call_args_list == expected
|
<commit_before><commit_msg>Add test of Folder instances<commit_after>
|
from unittest.mock import patch
from osfclient.models import OSFCore
from osfclient.models import File
from osfclient.models import Folder
from osfclient.tests import fake_responses
from osfclient.tests.mocks import FakeResponse
_files_url = 'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123'
@patch.object(OSFCore, '_get')
def test_iterate_files(OSFCore_get):
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
['foo/hello.txt', 'foo/bye.txt'])
response = FakeResponse(200, json)
OSFCore_get.return_value = response
files = list(store.files)
assert len(files) == 2
for file_ in files:
assert isinstance(file_, File)
assert file_.session == store.session
OSFCore_get.assert_called_once_with(
'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123')
@patch.object(OSFCore, '_get')
def test_iterate_folders(OSFCore_get):
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
folder_names=['foo/bar', 'foo/baz'])
response = FakeResponse(200, json)
OSFCore_get.return_value = response
folders = list(store.folders)
assert len(folders) == 2
for folder in folders:
assert isinstance(folder, Folder)
assert folder.session == store.session
assert folder.name in ('foo/bar', 'foo/baz')
OSFCore_get.assert_called_once_with(
'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123')
def test_iterate_files_and_folders():
# check we do not attempt to recurse into the subfolders
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
file_names=['hello.txt', 'bye.txt'],
folder_names=['bar'])
top_level_response = FakeResponse(200, json)
def simple_OSFCore_get(url):
if url == store._files_url:
return top_level_response
else:
print(url)
raise ValueError()
with patch.object(OSFCore, '_get',
side_effect=simple_OSFCore_get) as mock_osf_get:
files = list(store.files)
assert len(files) == 2
for file_ in files:
assert isinstance(file_, File)
assert file_.session == store.session
assert file_.name in ('hello.txt', 'bye.txt')
# check we did not try to recurse into subfolders
expected = [((_files_url,),)]
assert mock_osf_get.call_args_list == expected
|
Add test of Folder instancesfrom unittest.mock import patch
from osfclient.models import OSFCore
from osfclient.models import File
from osfclient.models import Folder
from osfclient.tests import fake_responses
from osfclient.tests.mocks import FakeResponse
_files_url = 'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123'
@patch.object(OSFCore, '_get')
def test_iterate_files(OSFCore_get):
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
['foo/hello.txt', 'foo/bye.txt'])
response = FakeResponse(200, json)
OSFCore_get.return_value = response
files = list(store.files)
assert len(files) == 2
for file_ in files:
assert isinstance(file_, File)
assert file_.session == store.session
OSFCore_get.assert_called_once_with(
'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123')
@patch.object(OSFCore, '_get')
def test_iterate_folders(OSFCore_get):
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
folder_names=['foo/bar', 'foo/baz'])
response = FakeResponse(200, json)
OSFCore_get.return_value = response
folders = list(store.folders)
assert len(folders) == 2
for folder in folders:
assert isinstance(folder, Folder)
assert folder.session == store.session
assert folder.name in ('foo/bar', 'foo/baz')
OSFCore_get.assert_called_once_with(
'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123')
def test_iterate_files_and_folders():
# check we do not attempt to recurse into the subfolders
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
file_names=['hello.txt', 'bye.txt'],
folder_names=['bar'])
top_level_response = FakeResponse(200, json)
def simple_OSFCore_get(url):
if url == store._files_url:
return top_level_response
else:
print(url)
raise ValueError()
with patch.object(OSFCore, '_get',
side_effect=simple_OSFCore_get) as mock_osf_get:
files = list(store.files)
assert len(files) == 2
for file_ in files:
assert isinstance(file_, File)
assert file_.session == store.session
assert file_.name in ('hello.txt', 'bye.txt')
# check we did not try to recurse into subfolders
expected = [((_files_url,),)]
assert mock_osf_get.call_args_list == expected
|
<commit_before><commit_msg>Add test of Folder instances<commit_after>from unittest.mock import patch
from osfclient.models import OSFCore
from osfclient.models import File
from osfclient.models import Folder
from osfclient.tests import fake_responses
from osfclient.tests.mocks import FakeResponse
_files_url = 'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123'
@patch.object(OSFCore, '_get')
def test_iterate_files(OSFCore_get):
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
['foo/hello.txt', 'foo/bye.txt'])
response = FakeResponse(200, json)
OSFCore_get.return_value = response
files = list(store.files)
assert len(files) == 2
for file_ in files:
assert isinstance(file_, File)
assert file_.session == store.session
OSFCore_get.assert_called_once_with(
'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123')
@patch.object(OSFCore, '_get')
def test_iterate_folders(OSFCore_get):
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
folder_names=['foo/bar', 'foo/baz'])
response = FakeResponse(200, json)
OSFCore_get.return_value = response
folders = list(store.folders)
assert len(folders) == 2
for folder in folders:
assert isinstance(folder, Folder)
assert folder.session == store.session
assert folder.name in ('foo/bar', 'foo/baz')
OSFCore_get.assert_called_once_with(
'https://api.osf.io/v2//nodes/f3szh/files/osfstorage/foo123')
def test_iterate_files_and_folders():
# check we do not attempt to recurse into the subfolders
store = Folder({})
store._files_url = _files_url
json = fake_responses.files_node('f3szh', 'osfstorage',
file_names=['hello.txt', 'bye.txt'],
folder_names=['bar'])
top_level_response = FakeResponse(200, json)
def simple_OSFCore_get(url):
if url == store._files_url:
return top_level_response
else:
print(url)
raise ValueError()
with patch.object(OSFCore, '_get',
side_effect=simple_OSFCore_get) as mock_osf_get:
files = list(store.files)
assert len(files) == 2
for file_ in files:
assert isinstance(file_, File)
assert file_.session == store.session
assert file_.name in ('hello.txt', 'bye.txt')
# check we did not try to recurse into subfolders
expected = [((_files_url,),)]
assert mock_osf_get.call_args_list == expected
|
|
dcb230c1d06bc556c0b52df0c605a58f43baac64
|
dipy/denoise/gibbs.py
|
dipy/denoise/gibbs.py
|
import numpy as np
def image_tv(x, fn=0, nn=3, a=0):
""" Computes total variation (TV) of matrix x along axis a in two
directions.
Parameters
----------
x : 2D ndarray
matrix x
fn : int
Distance of first neighbor to be included in TV calculation. If fn=0
the own point is also included in the TV calculation.
nn : int
Number of points to be included in TV calculation.
a : int (0 or 1)
Axis along which TV will be calculated. Default a is set to 0.
Returns
-------
PTV : 2D ndarray
Total variation calculated from the right neighbors of each point
NTV : 2D ndarray
Total variation calculated from the left neighbors of each point
Note
----
This function was created to deal with gibbs artefacts of MR images.
Assuming that MR images are reconstructed from estimates of their Fourier
expansion coefficients, during TV calculation matrix x can taken as and
periodic signal. In this way NTV values on the image left boundary is
computed using the time series values on the right boundary and vice versa.
"""
if a:
xs = x.copy()
else:
xs = x.T.copy()
xs = np.concatenate((xs[:, (-nn-fn):], xs, xs[:, 0:(nn+fn)]), axis=1)
PTV = np.absolute(xs[:, (nn+fn):(-nn-fn)] - xs[:, (nn+fn+1):(-nn-fn+1)])
NTV = np.absolute(xs[:, (nn+fn):(-nn-fn)] - xs[:, (nn+fn-1):(-nn-fn-1)])
for n in np.linspace(fn+1, nn-2, num=nn-2):
PTV = PTV + np.absolute(xs[:, (nn+fn+n):(-nn-fn+n)] -
xs[:, (nn+fn+n+1):(-nn-fn+n+1)])
NTV = NTV + np.absolute(xs[:, (nn+fn-n):(-nn-fn-n)] -
xs[:, (nn+fn-n-1):(-nn-fn-n-1)])
if a:
return PTV, NTV
else:
return PTV.T, NTV.T
|
Add total variation function need to access sub-voxel shifts
|
NF: Add total variation function need to access sub-voxel shifts
|
Python
|
bsd-3-clause
|
FrancoisRheaultUS/dipy,FrancoisRheaultUS/dipy
|
NF: Add total variation function need to access sub-voxel shifts
|
import numpy as np
def image_tv(x, fn=0, nn=3, a=0):
""" Computes total variation (TV) of matrix x along axis a in two
directions.
Parameters
----------
x : 2D ndarray
matrix x
fn : int
Distance of first neighbor to be included in TV calculation. If fn=0
the own point is also included in the TV calculation.
nn : int
Number of points to be included in TV calculation.
a : int (0 or 1)
Axis along which TV will be calculated. Default a is set to 0.
Returns
-------
PTV : 2D ndarray
Total variation calculated from the right neighbors of each point
NTV : 2D ndarray
Total variation calculated from the left neighbors of each point
Note
----
This function was created to deal with gibbs artefacts of MR images.
Assuming that MR images are reconstructed from estimates of their Fourier
expansion coefficients, during TV calculation matrix x can taken as and
periodic signal. In this way NTV values on the image left boundary is
computed using the time series values on the right boundary and vice versa.
"""
if a:
xs = x.copy()
else:
xs = x.T.copy()
xs = np.concatenate((xs[:, (-nn-fn):], xs, xs[:, 0:(nn+fn)]), axis=1)
PTV = np.absolute(xs[:, (nn+fn):(-nn-fn)] - xs[:, (nn+fn+1):(-nn-fn+1)])
NTV = np.absolute(xs[:, (nn+fn):(-nn-fn)] - xs[:, (nn+fn-1):(-nn-fn-1)])
for n in np.linspace(fn+1, nn-2, num=nn-2):
PTV = PTV + np.absolute(xs[:, (nn+fn+n):(-nn-fn+n)] -
xs[:, (nn+fn+n+1):(-nn-fn+n+1)])
NTV = NTV + np.absolute(xs[:, (nn+fn-n):(-nn-fn-n)] -
xs[:, (nn+fn-n-1):(-nn-fn-n-1)])
if a:
return PTV, NTV
else:
return PTV.T, NTV.T
|
<commit_before><commit_msg>NF: Add total variation function need to access sub-voxel shifts<commit_after>
|
import numpy as np
def image_tv(x, fn=0, nn=3, a=0):
""" Computes total variation (TV) of matrix x along axis a in two
directions.
Parameters
----------
x : 2D ndarray
matrix x
fn : int
Distance of first neighbor to be included in TV calculation. If fn=0
the own point is also included in the TV calculation.
nn : int
Number of points to be included in TV calculation.
a : int (0 or 1)
Axis along which TV will be calculated. Default a is set to 0.
Returns
-------
PTV : 2D ndarray
Total variation calculated from the right neighbors of each point
NTV : 2D ndarray
Total variation calculated from the left neighbors of each point
Note
----
This function was created to deal with gibbs artefacts of MR images.
Assuming that MR images are reconstructed from estimates of their Fourier
expansion coefficients, during TV calculation matrix x can taken as and
periodic signal. In this way NTV values on the image left boundary is
computed using the time series values on the right boundary and vice versa.
"""
if a:
xs = x.copy()
else:
xs = x.T.copy()
xs = np.concatenate((xs[:, (-nn-fn):], xs, xs[:, 0:(nn+fn)]), axis=1)
PTV = np.absolute(xs[:, (nn+fn):(-nn-fn)] - xs[:, (nn+fn+1):(-nn-fn+1)])
NTV = np.absolute(xs[:, (nn+fn):(-nn-fn)] - xs[:, (nn+fn-1):(-nn-fn-1)])
for n in np.linspace(fn+1, nn-2, num=nn-2):
PTV = PTV + np.absolute(xs[:, (nn+fn+n):(-nn-fn+n)] -
xs[:, (nn+fn+n+1):(-nn-fn+n+1)])
NTV = NTV + np.absolute(xs[:, (nn+fn-n):(-nn-fn-n)] -
xs[:, (nn+fn-n-1):(-nn-fn-n-1)])
if a:
return PTV, NTV
else:
return PTV.T, NTV.T
|
NF: Add total variation function need to access sub-voxel shiftsimport numpy as np
def image_tv(x, fn=0, nn=3, a=0):
""" Computes total variation (TV) of matrix x along axis a in two
directions.
Parameters
----------
x : 2D ndarray
matrix x
fn : int
Distance of first neighbor to be included in TV calculation. If fn=0
the own point is also included in the TV calculation.
nn : int
Number of points to be included in TV calculation.
a : int (0 or 1)
Axis along which TV will be calculated. Default a is set to 0.
Returns
-------
PTV : 2D ndarray
Total variation calculated from the right neighbors of each point
NTV : 2D ndarray
Total variation calculated from the left neighbors of each point
Note
----
This function was created to deal with gibbs artefacts of MR images.
Assuming that MR images are reconstructed from estimates of their Fourier
expansion coefficients, during TV calculation matrix x can taken as and
periodic signal. In this way NTV values on the image left boundary is
computed using the time series values on the right boundary and vice versa.
"""
if a:
xs = x.copy()
else:
xs = x.T.copy()
xs = np.concatenate((xs[:, (-nn-fn):], xs, xs[:, 0:(nn+fn)]), axis=1)
PTV = np.absolute(xs[:, (nn+fn):(-nn-fn)] - xs[:, (nn+fn+1):(-nn-fn+1)])
NTV = np.absolute(xs[:, (nn+fn):(-nn-fn)] - xs[:, (nn+fn-1):(-nn-fn-1)])
for n in np.linspace(fn+1, nn-2, num=nn-2):
PTV = PTV + np.absolute(xs[:, (nn+fn+n):(-nn-fn+n)] -
xs[:, (nn+fn+n+1):(-nn-fn+n+1)])
NTV = NTV + np.absolute(xs[:, (nn+fn-n):(-nn-fn-n)] -
xs[:, (nn+fn-n-1):(-nn-fn-n-1)])
if a:
return PTV, NTV
else:
return PTV.T, NTV.T
|
<commit_before><commit_msg>NF: Add total variation function need to access sub-voxel shifts<commit_after>import numpy as np
def image_tv(x, fn=0, nn=3, a=0):
""" Computes total variation (TV) of matrix x along axis a in two
directions.
Parameters
----------
x : 2D ndarray
matrix x
fn : int
Distance of first neighbor to be included in TV calculation. If fn=0
the own point is also included in the TV calculation.
nn : int
Number of points to be included in TV calculation.
a : int (0 or 1)
Axis along which TV will be calculated. Default a is set to 0.
Returns
-------
PTV : 2D ndarray
Total variation calculated from the right neighbors of each point
NTV : 2D ndarray
Total variation calculated from the left neighbors of each point
Note
----
This function was created to deal with gibbs artefacts of MR images.
Assuming that MR images are reconstructed from estimates of their Fourier
expansion coefficients, during TV calculation matrix x can taken as and
periodic signal. In this way NTV values on the image left boundary is
computed using the time series values on the right boundary and vice versa.
"""
if a:
xs = x.copy()
else:
xs = x.T.copy()
xs = np.concatenate((xs[:, (-nn-fn):], xs, xs[:, 0:(nn+fn)]), axis=1)
PTV = np.absolute(xs[:, (nn+fn):(-nn-fn)] - xs[:, (nn+fn+1):(-nn-fn+1)])
NTV = np.absolute(xs[:, (nn+fn):(-nn-fn)] - xs[:, (nn+fn-1):(-nn-fn-1)])
for n in np.linspace(fn+1, nn-2, num=nn-2):
PTV = PTV + np.absolute(xs[:, (nn+fn+n):(-nn-fn+n)] -
xs[:, (nn+fn+n+1):(-nn-fn+n+1)])
NTV = NTV + np.absolute(xs[:, (nn+fn-n):(-nn-fn-n)] -
xs[:, (nn+fn-n-1):(-nn-fn-n-1)])
if a:
return PTV, NTV
else:
return PTV.T, NTV.T
|
|
5504aa44a1b47d6533c65c0f5884f11eda06359b
|
tests/rules/test_no_such_file.py
|
tests/rules/test_no_such_file.py
|
import pytest
from thefuck.rules.no_such_file import match, get_new_command
from tests.utils import Command
@pytest.mark.parametrize('command', [
Command(script='mv foo bar/foo', stderr="mv: cannot move 'foo' to 'bar/foo': No such file or directory"),
Command(script='mv foo bar/', stderr="mv: cannot move 'foo' to 'bar/': No such file or directory"),
])
def test_match(command):
assert match(command, None)
@pytest.mark.parametrize('command, new_command', [
(Command(script='mv foo bar/foo', stderr="mv: cannot move 'foo' to 'bar/foo': No such file or directory"), 'mkdir -p bar && mv foo bar/foo'),
(Command(script='mv foo bar/', stderr="mv: cannot move 'foo' to 'bar/': No such file or directory"), 'mkdir -p bar && mv foo bar/'),
])
def test_get_new_command(command, new_command):
assert get_new_command(command, None) == new_command
|
Add tests for the `no_such_file` rule
|
Add tests for the `no_such_file` rule
|
Python
|
mit
|
petr-tichy/thefuck,princeofdarkness76/thefuck,PLNech/thefuck,mlk/thefuck,manashmndl/thefuck,bugaevc/thefuck,thinkerchan/thefuck,subajat1/thefuck,BertieJim/thefuck,suxinde2009/thefuck,ostree/thefuck,ytjiang/thefuck,bigplus/thefuck,mcarton/thefuck,vanita5/thefuck,BertieJim/thefuck,sekaiamber/thefuck,vanita5/thefuck,roth1002/thefuck,ostree/thefuck,hxddh/thefuck,lawrencebenson/thefuck,lawrencebenson/thefuck,MJerty/thefuck,scorphus/thefuck,redreamality/thefuck,barneyElDinosaurio/thefuck,hxddh/thefuck,AntonChankin/thefuck,LawrenceHan/thefuck,qingying5810/thefuck,AntonChankin/thefuck,gogobebe2/thefuck,gaurav9991/thefuck,mbbill/thefuck,bigplus/thefuck,barneyElDinosaurio/thefuck,nvbn/thefuck,redreamality/thefuck,levythu/thefuck,thesoulkiller/thefuck,nwinkler/thefuck,nvbn/thefuck,mcarton/thefuck,roth1002/thefuck,Aeron/thefuck,scorphus/thefuck,beni55/thefuck,SimenB/thefuck,LawrenceHan/thefuck,MJerty/thefuck,PLNech/thefuck,mlk/thefuck,SimenB/thefuck,beni55/thefuck,artiya4u/thefuck,princeofdarkness76/thefuck,zhangzhishan/thefuck,thesoulkiller/thefuck,Clpsplug/thefuck,thinkerchan/thefuck,Clpsplug/thefuck,qrqiuren/thefuck,subajat1/thefuck,NguyenHoaiNam/thefuck,levythu/thefuck,manashmndl/thefuck
|
Add tests for the `no_such_file` rule
|
import pytest
from thefuck.rules.no_such_file import match, get_new_command
from tests.utils import Command
@pytest.mark.parametrize('command', [
Command(script='mv foo bar/foo', stderr="mv: cannot move 'foo' to 'bar/foo': No such file or directory"),
Command(script='mv foo bar/', stderr="mv: cannot move 'foo' to 'bar/': No such file or directory"),
])
def test_match(command):
assert match(command, None)
@pytest.mark.parametrize('command, new_command', [
(Command(script='mv foo bar/foo', stderr="mv: cannot move 'foo' to 'bar/foo': No such file or directory"), 'mkdir -p bar && mv foo bar/foo'),
(Command(script='mv foo bar/', stderr="mv: cannot move 'foo' to 'bar/': No such file or directory"), 'mkdir -p bar && mv foo bar/'),
])
def test_get_new_command(command, new_command):
assert get_new_command(command, None) == new_command
|
<commit_before><commit_msg>Add tests for the `no_such_file` rule<commit_after>
|
import pytest
from thefuck.rules.no_such_file import match, get_new_command
from tests.utils import Command
@pytest.mark.parametrize('command', [
Command(script='mv foo bar/foo', stderr="mv: cannot move 'foo' to 'bar/foo': No such file or directory"),
Command(script='mv foo bar/', stderr="mv: cannot move 'foo' to 'bar/': No such file or directory"),
])
def test_match(command):
assert match(command, None)
@pytest.mark.parametrize('command, new_command', [
(Command(script='mv foo bar/foo', stderr="mv: cannot move 'foo' to 'bar/foo': No such file or directory"), 'mkdir -p bar && mv foo bar/foo'),
(Command(script='mv foo bar/', stderr="mv: cannot move 'foo' to 'bar/': No such file or directory"), 'mkdir -p bar && mv foo bar/'),
])
def test_get_new_command(command, new_command):
assert get_new_command(command, None) == new_command
|
Add tests for the `no_such_file` ruleimport pytest
from thefuck.rules.no_such_file import match, get_new_command
from tests.utils import Command
@pytest.mark.parametrize('command', [
Command(script='mv foo bar/foo', stderr="mv: cannot move 'foo' to 'bar/foo': No such file or directory"),
Command(script='mv foo bar/', stderr="mv: cannot move 'foo' to 'bar/': No such file or directory"),
])
def test_match(command):
assert match(command, None)
@pytest.mark.parametrize('command, new_command', [
(Command(script='mv foo bar/foo', stderr="mv: cannot move 'foo' to 'bar/foo': No such file or directory"), 'mkdir -p bar && mv foo bar/foo'),
(Command(script='mv foo bar/', stderr="mv: cannot move 'foo' to 'bar/': No such file or directory"), 'mkdir -p bar && mv foo bar/'),
])
def test_get_new_command(command, new_command):
assert get_new_command(command, None) == new_command
|
<commit_before><commit_msg>Add tests for the `no_such_file` rule<commit_after>import pytest
from thefuck.rules.no_such_file import match, get_new_command
from tests.utils import Command
@pytest.mark.parametrize('command', [
Command(script='mv foo bar/foo', stderr="mv: cannot move 'foo' to 'bar/foo': No such file or directory"),
Command(script='mv foo bar/', stderr="mv: cannot move 'foo' to 'bar/': No such file or directory"),
])
def test_match(command):
assert match(command, None)
@pytest.mark.parametrize('command, new_command', [
(Command(script='mv foo bar/foo', stderr="mv: cannot move 'foo' to 'bar/foo': No such file or directory"), 'mkdir -p bar && mv foo bar/foo'),
(Command(script='mv foo bar/', stderr="mv: cannot move 'foo' to 'bar/': No such file or directory"), 'mkdir -p bar && mv foo bar/'),
])
def test_get_new_command(command, new_command):
assert get_new_command(command, None) == new_command
|
|
fddfccdf1e9092a137aeadfd37a82dd6979205a6
|
proc_opener.py
|
proc_opener.py
|
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Print out each tab process created during a test run, and the test it was created during.
import sys
import re
import time
procStartPatt = re.compile('^\d\d:\d\d:\d\d\W+INFO - ### XPCOM\_MEM\_BLOAT\_LOG defined -- logging bloat\/leaks to .+runtests\_leaks\_tab\_pid(\d+)\.log')
def testDir(testName):
return currTest.rsplit('/', 1)[0] + '/'
# Parse the input looking for when tests run.
pidTests = {}
currTest = None
for l in sys.stdin:
if l.find("TEST-START") > -1:
currTest = l.split('|')[1].strip()
m = procStartPatt.match(l)
if not m:
continue
currProc = int(m.group(1))
if currProc in pidTests and testDir(currTest) == testDir(pidTests[currProc]):
# This assumes run-by-dir.
print('WARNING! Possible replay of pid ' + str(currProc) + ' in test dir ' + testDir(currTest))
pidTests.setdefault(currProc, []).append(currTest)
print 'Found proc start:', currProc, currTest
|
Add analyzer for which pids are open during which tests
|
Add analyzer for which pids are open during which tests
|
Python
|
mpl-2.0
|
amccreight/mochitest-logs
|
Add analyzer for which pids are open during which tests
|
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Print out each tab process created during a test run, and the test it was created during.
import sys
import re
import time
procStartPatt = re.compile('^\d\d:\d\d:\d\d\W+INFO - ### XPCOM\_MEM\_BLOAT\_LOG defined -- logging bloat\/leaks to .+runtests\_leaks\_tab\_pid(\d+)\.log')
def testDir(testName):
return currTest.rsplit('/', 1)[0] + '/'
# Parse the input looking for when tests run.
pidTests = {}
currTest = None
for l in sys.stdin:
if l.find("TEST-START") > -1:
currTest = l.split('|')[1].strip()
m = procStartPatt.match(l)
if not m:
continue
currProc = int(m.group(1))
if currProc in pidTests and testDir(currTest) == testDir(pidTests[currProc]):
# This assumes run-by-dir.
print('WARNING! Possible replay of pid ' + str(currProc) + ' in test dir ' + testDir(currTest))
pidTests.setdefault(currProc, []).append(currTest)
print 'Found proc start:', currProc, currTest
|
<commit_before><commit_msg>Add analyzer for which pids are open during which tests<commit_after>
|
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Print out each tab process created during a test run, and the test it was created during.
import sys
import re
import time
procStartPatt = re.compile('^\d\d:\d\d:\d\d\W+INFO - ### XPCOM\_MEM\_BLOAT\_LOG defined -- logging bloat\/leaks to .+runtests\_leaks\_tab\_pid(\d+)\.log')
def testDir(testName):
return currTest.rsplit('/', 1)[0] + '/'
# Parse the input looking for when tests run.
pidTests = {}
currTest = None
for l in sys.stdin:
if l.find("TEST-START") > -1:
currTest = l.split('|')[1].strip()
m = procStartPatt.match(l)
if not m:
continue
currProc = int(m.group(1))
if currProc in pidTests and testDir(currTest) == testDir(pidTests[currProc]):
# This assumes run-by-dir.
print('WARNING! Possible replay of pid ' + str(currProc) + ' in test dir ' + testDir(currTest))
pidTests.setdefault(currProc, []).append(currTest)
print 'Found proc start:', currProc, currTest
|
Add analyzer for which pids are open during which tests#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Print out each tab process created during a test run, and the test it was created during.
import sys
import re
import time
procStartPatt = re.compile('^\d\d:\d\d:\d\d\W+INFO - ### XPCOM\_MEM\_BLOAT\_LOG defined -- logging bloat\/leaks to .+runtests\_leaks\_tab\_pid(\d+)\.log')
def testDir(testName):
return currTest.rsplit('/', 1)[0] + '/'
# Parse the input looking for when tests run.
pidTests = {}
currTest = None
for l in sys.stdin:
if l.find("TEST-START") > -1:
currTest = l.split('|')[1].strip()
m = procStartPatt.match(l)
if not m:
continue
currProc = int(m.group(1))
if currProc in pidTests and testDir(currTest) == testDir(pidTests[currProc]):
# This assumes run-by-dir.
print('WARNING! Possible replay of pid ' + str(currProc) + ' in test dir ' + testDir(currTest))
pidTests.setdefault(currProc, []).append(currTest)
print 'Found proc start:', currProc, currTest
|
<commit_before><commit_msg>Add analyzer for which pids are open during which tests<commit_after>#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Print out each tab process created during a test run, and the test it was created during.
import sys
import re
import time
procStartPatt = re.compile('^\d\d:\d\d:\d\d\W+INFO - ### XPCOM\_MEM\_BLOAT\_LOG defined -- logging bloat\/leaks to .+runtests\_leaks\_tab\_pid(\d+)\.log')
def testDir(testName):
return currTest.rsplit('/', 1)[0] + '/'
# Parse the input looking for when tests run.
pidTests = {}
currTest = None
for l in sys.stdin:
if l.find("TEST-START") > -1:
currTest = l.split('|')[1].strip()
m = procStartPatt.match(l)
if not m:
continue
currProc = int(m.group(1))
if currProc in pidTests and testDir(currTest) == testDir(pidTests[currProc]):
# This assumes run-by-dir.
print('WARNING! Possible replay of pid ' + str(currProc) + ' in test dir ' + testDir(currTest))
pidTests.setdefault(currProc, []).append(currTest)
print 'Found proc start:', currProc, currTest
|
|
4232c1160008201d8d06f447695ad316a0c4a416
|
examples/python_csq_xml_example.py
|
examples/python_csq_xml_example.py
|
__author__ = 'Dimitry Lvovsky'
# Copyright 2014 ReviewRank S.A ( ReviewPro )
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from connect_py import Connect
def main():
connect = Connect.Connect("<your api_key>", "<your shared sec>")
j = '<product id="102531" source="LimeSurvey CSQ Hotel: "><review><author>SAMPLE AUTHOR NAME</author><text></text><language>es</language><review_id>123456</review_id><date_of_stay date_format="dd/MM/yyyy">10/12/2013</date_of_stay><email>sampleaddress@example.es</email><reservation_source>ABC</reservation_source><overall value="100" out_of="100"/><service value="100" out_of="100"/><cleanliness value="100" out_of="100"/><location value="100" out_of="100"/><value value="100" out_of="100"/><gastronomy value="100" out_of="100"/><room value="100" out_of="100"/><reception value="100" out_of="100"/></review></product>'
resp = connect.pushCSQXML(j, 102531)
print resp.status_code
if __name__ == "__main__":
main()
|
Add example for CSQ XML
|
Add example for CSQ XML
|
Python
|
apache-2.0
|
reviewpro/api_connect,reviewpro/api_connect,reviewpro/connect_py,reviewpro/connect_py
|
Add example for CSQ XML
|
__author__ = 'Dimitry Lvovsky'
# Copyright 2014 ReviewRank S.A ( ReviewPro )
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from connect_py import Connect
def main():
connect = Connect.Connect("<your api_key>", "<your shared sec>")
j = '<product id="102531" source="LimeSurvey CSQ Hotel: "><review><author>SAMPLE AUTHOR NAME</author><text></text><language>es</language><review_id>123456</review_id><date_of_stay date_format="dd/MM/yyyy">10/12/2013</date_of_stay><email>sampleaddress@example.es</email><reservation_source>ABC</reservation_source><overall value="100" out_of="100"/><service value="100" out_of="100"/><cleanliness value="100" out_of="100"/><location value="100" out_of="100"/><value value="100" out_of="100"/><gastronomy value="100" out_of="100"/><room value="100" out_of="100"/><reception value="100" out_of="100"/></review></product>'
resp = connect.pushCSQXML(j, 102531)
print resp.status_code
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add example for CSQ XML<commit_after>
|
__author__ = 'Dimitry Lvovsky'
# Copyright 2014 ReviewRank S.A ( ReviewPro )
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from connect_py import Connect
def main():
connect = Connect.Connect("<your api_key>", "<your shared sec>")
j = '<product id="102531" source="LimeSurvey CSQ Hotel: "><review><author>SAMPLE AUTHOR NAME</author><text></text><language>es</language><review_id>123456</review_id><date_of_stay date_format="dd/MM/yyyy">10/12/2013</date_of_stay><email>sampleaddress@example.es</email><reservation_source>ABC</reservation_source><overall value="100" out_of="100"/><service value="100" out_of="100"/><cleanliness value="100" out_of="100"/><location value="100" out_of="100"/><value value="100" out_of="100"/><gastronomy value="100" out_of="100"/><room value="100" out_of="100"/><reception value="100" out_of="100"/></review></product>'
resp = connect.pushCSQXML(j, 102531)
print resp.status_code
if __name__ == "__main__":
main()
|
Add example for CSQ XML__author__ = 'Dimitry Lvovsky'
# Copyright 2014 ReviewRank S.A ( ReviewPro )
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from connect_py import Connect
def main():
connect = Connect.Connect("<your api_key>", "<your shared sec>")
j = '<product id="102531" source="LimeSurvey CSQ Hotel: "><review><author>SAMPLE AUTHOR NAME</author><text></text><language>es</language><review_id>123456</review_id><date_of_stay date_format="dd/MM/yyyy">10/12/2013</date_of_stay><email>sampleaddress@example.es</email><reservation_source>ABC</reservation_source><overall value="100" out_of="100"/><service value="100" out_of="100"/><cleanliness value="100" out_of="100"/><location value="100" out_of="100"/><value value="100" out_of="100"/><gastronomy value="100" out_of="100"/><room value="100" out_of="100"/><reception value="100" out_of="100"/></review></product>'
resp = connect.pushCSQXML(j, 102531)
print resp.status_code
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add example for CSQ XML<commit_after>__author__ = 'Dimitry Lvovsky'
# Copyright 2014 ReviewRank S.A ( ReviewPro )
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from connect_py import Connect
def main():
connect = Connect.Connect("<your api_key>", "<your shared sec>")
j = '<product id="102531" source="LimeSurvey CSQ Hotel: "><review><author>SAMPLE AUTHOR NAME</author><text></text><language>es</language><review_id>123456</review_id><date_of_stay date_format="dd/MM/yyyy">10/12/2013</date_of_stay><email>sampleaddress@example.es</email><reservation_source>ABC</reservation_source><overall value="100" out_of="100"/><service value="100" out_of="100"/><cleanliness value="100" out_of="100"/><location value="100" out_of="100"/><value value="100" out_of="100"/><gastronomy value="100" out_of="100"/><room value="100" out_of="100"/><reception value="100" out_of="100"/></review></product>'
resp = connect.pushCSQXML(j, 102531)
print resp.status_code
if __name__ == "__main__":
main()
|
|
d3211f577e4c7f4d99f3c7bed8178310f9a48b83
|
tests/test_lesson_1_greetings.py
|
tests/test_lesson_1_greetings.py
|
# This lesson teaches you how to define a function that takes an input, and then uses
# it to alter the output of that function.
# Much like last time, run your python command in your terminal to see your first failure.
# `python -m unittest -vf tests.test_lesson_1_greetings`
# Again, like last time, you should see an import error indicating that you need a file titled
# `greetings` in your lesson folder.
# This challenge is very similar to the last one, but instead of always outputting the same string - "Hello World!",
# the output will change based on the input you give to the function.
import unittest
from lessons.lesson_1_greetings import greetings
class GreetingsTestCase(unittest.TestCase):
def test_greetings_function_exists(self):
func = greetings.greetings
self.assertIsNotNone(func)
def test_greetings_function_with_input(self):
greet = greetings.greetings("Amy")
self.assertEqual(greet, "Hi, Amy!")
def test_grettings_function_with_another_input(self):
greet = greetings.greetings("Belle")
self.assertEqual(greet, "Hi, Belle!")
|
Add tests for lesson 1: greetings
|
Add tests for lesson 1: greetings
|
Python
|
mit
|
thejessleigh/test_driven_python,thejessleigh/test_driven_python,thejessleigh/test_driven_python
|
Add tests for lesson 1: greetings
|
# This lesson teaches you how to define a function that takes an input, and then uses
# it to alter the output of that function.
# Much like last time, run your python command in your terminal to see your first failure.
# `python -m unittest -vf tests.test_lesson_1_greetings`
# Again, like last time, you should see an import error indicating that you need a file titled
# `greetings` in your lesson folder.
# This challenge is very similar to the last one, but instead of always outputting the same string - "Hello World!",
# the output will change based on the input you give to the function.
import unittest
from lessons.lesson_1_greetings import greetings
class GreetingsTestCase(unittest.TestCase):
def test_greetings_function_exists(self):
func = greetings.greetings
self.assertIsNotNone(func)
def test_greetings_function_with_input(self):
greet = greetings.greetings("Amy")
self.assertEqual(greet, "Hi, Amy!")
def test_grettings_function_with_another_input(self):
greet = greetings.greetings("Belle")
self.assertEqual(greet, "Hi, Belle!")
|
<commit_before><commit_msg>Add tests for lesson 1: greetings<commit_after>
|
# This lesson teaches you how to define a function that takes an input, and then uses
# it to alter the output of that function.
# Much like last time, run your python command in your terminal to see your first failure.
# `python -m unittest -vf tests.test_lesson_1_greetings`
# Again, like last time, you should see an import error indicating that you need a file titled
# `greetings` in your lesson folder.
# This challenge is very similar to the last one, but instead of always outputting the same string - "Hello World!",
# the output will change based on the input you give to the function.
import unittest
from lessons.lesson_1_greetings import greetings
class GreetingsTestCase(unittest.TestCase):
def test_greetings_function_exists(self):
func = greetings.greetings
self.assertIsNotNone(func)
def test_greetings_function_with_input(self):
greet = greetings.greetings("Amy")
self.assertEqual(greet, "Hi, Amy!")
def test_grettings_function_with_another_input(self):
greet = greetings.greetings("Belle")
self.assertEqual(greet, "Hi, Belle!")
|
Add tests for lesson 1: greetings# This lesson teaches you how to define a function that takes an input, and then uses
# it to alter the output of that function.
# Much like last time, run your python command in your terminal to see your first failure.
# `python -m unittest -vf tests.test_lesson_1_greetings`
# Again, like last time, you should see an import error indicating that you need a file titled
# `greetings` in your lesson folder.
# This challenge is very similar to the last one, but instead of always outputting the same string - "Hello World!",
# the output will change based on the input you give to the function.
import unittest
from lessons.lesson_1_greetings import greetings
class GreetingsTestCase(unittest.TestCase):
def test_greetings_function_exists(self):
func = greetings.greetings
self.assertIsNotNone(func)
def test_greetings_function_with_input(self):
greet = greetings.greetings("Amy")
self.assertEqual(greet, "Hi, Amy!")
def test_grettings_function_with_another_input(self):
greet = greetings.greetings("Belle")
self.assertEqual(greet, "Hi, Belle!")
|
<commit_before><commit_msg>Add tests for lesson 1: greetings<commit_after># This lesson teaches you how to define a function that takes an input, and then uses
# it to alter the output of that function.
# Much like last time, run your python command in your terminal to see your first failure.
# `python -m unittest -vf tests.test_lesson_1_greetings`
# Again, like last time, you should see an import error indicating that you need a file titled
# `greetings` in your lesson folder.
# This challenge is very similar to the last one, but instead of always outputting the same string - "Hello World!",
# the output will change based on the input you give to the function.
import unittest
from lessons.lesson_1_greetings import greetings
class GreetingsTestCase(unittest.TestCase):
def test_greetings_function_exists(self):
func = greetings.greetings
self.assertIsNotNone(func)
def test_greetings_function_with_input(self):
greet = greetings.greetings("Amy")
self.assertEqual(greet, "Hi, Amy!")
def test_grettings_function_with_another_input(self):
greet = greetings.greetings("Belle")
self.assertEqual(greet, "Hi, Belle!")
|
|
e8f8b08ffb011ed705701f40c6a1a952c13d7c41
|
analytics/test_analytics.py
|
analytics/test_analytics.py
|
# -*- encoding: utf-8
import pytest
from reports import NGINX_LOG_REGEX
@pytest.mark.parametrize('log_line', [
# Unusual methods
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "HEAD /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "OPTIONS /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
# Referrer is empty
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "GET /example HTTP/1.0" 200 0 "" "Example user agent" "1.2.3.4"',
])
def test_nginx_regex(log_line):
assert NGINX_LOG_REGEX.match(log_line) is not None
|
Add a few tests for the analytics code
|
Add a few tests for the analytics code
|
Python
|
mit
|
alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net,alexwlchan/alexwlchan.net
|
Add a few tests for the analytics code
|
# -*- encoding: utf-8
import pytest
from reports import NGINX_LOG_REGEX
@pytest.mark.parametrize('log_line', [
# Unusual methods
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "HEAD /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "OPTIONS /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
# Referrer is empty
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "GET /example HTTP/1.0" 200 0 "" "Example user agent" "1.2.3.4"',
])
def test_nginx_regex(log_line):
assert NGINX_LOG_REGEX.match(log_line) is not None
|
<commit_before><commit_msg>Add a few tests for the analytics code<commit_after>
|
# -*- encoding: utf-8
import pytest
from reports import NGINX_LOG_REGEX
@pytest.mark.parametrize('log_line', [
# Unusual methods
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "HEAD /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "OPTIONS /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
# Referrer is empty
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "GET /example HTTP/1.0" 200 0 "" "Example user agent" "1.2.3.4"',
])
def test_nginx_regex(log_line):
assert NGINX_LOG_REGEX.match(log_line) is not None
|
Add a few tests for the analytics code# -*- encoding: utf-8
import pytest
from reports import NGINX_LOG_REGEX
@pytest.mark.parametrize('log_line', [
# Unusual methods
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "HEAD /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "OPTIONS /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
# Referrer is empty
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "GET /example HTTP/1.0" 200 0 "" "Example user agent" "1.2.3.4"',
])
def test_nginx_regex(log_line):
assert NGINX_LOG_REGEX.match(log_line) is not None
|
<commit_before><commit_msg>Add a few tests for the analytics code<commit_after># -*- encoding: utf-8
import pytest
from reports import NGINX_LOG_REGEX
@pytest.mark.parametrize('log_line', [
# Unusual methods
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "HEAD /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "OPTIONS /example HTTP/1.0" 200 0 "http://referrer.org" "Example user agent" "1.2.3.4"',
# Referrer is empty
'0.0.0.0 - - [01/Jan/2001:00:00:00 +0000] "GET /example HTTP/1.0" 200 0 "" "Example user agent" "1.2.3.4"',
])
def test_nginx_regex(log_line):
assert NGINX_LOG_REGEX.match(log_line) is not None
|
|
8f15591e90945b780a3a900f6eda7ffd3c712d1d
|
tools/3.7-migration.py
|
tools/3.7-migration.py
|
import argparse
import ConfigParser
import os
from qisys import ui
import qisys.parsers
import qisys.sh
import qisys.qixml
import qitoolchain.toolchain
def get_old_toolchains():
""" Return a dict name -> feed from the previous config format
"""
res = dict()
cfg_path = qisys.sh.get_config_path("qi", "toolchains.cfg")
config = ConfigParser.ConfigParser()
config.read(cfg_path)
if not config.has_section("toolchains"):
return res
tc_items = config.items("toolchains")
for name, value in tc_items:
res[name] = value
return res
def recreate_toolchains():
old_toolchains = get_old_toolchains()
old_names = old_toolchains.keys()
old_names.sort()
errors = list()
for i, name in enumerate(old_names):
n = len(old_names)
ui.info(ui.bold, "[ %d on %d ]" % (i+1, n), name)
feed_url = old_toolchains[name]
if feed_url:
toolchain = qitoolchain.toolchain.Toolchain(name)
try:
toolchain.update(feed_url=feed_url)
except Exception as e:
errors.append((name, e))
if errors:
ui.error("Could not update some toolchains")
for name, error in errors:
ui.error(" * ", name, error)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--no-toolchains", action="store_false", dest="toolchains",
help="Do not try to recreate the toolchains")
parser.add_argument("--no-backup", action="store_false", dest="backup",
help="Do not backup build profiles")
parser.set_defaults(toolchains=True, backup=True)
qisys.parsers.worktree_parser(parser)
args = parser.parse_args()
worktree = qisys.parsers.get_worktree(args)
ui.info(ui.bold, "Starting 3.7 migration")
if args.toolchains:
ui.info(ui.bold, ":: Re-creating toolchains ...")
recreate_toolchains()
ui.info(ui.bold, ":: Removing build profiles ...")
qibuild_xml_path = os.path.join(worktree.dot_qi, "qibuild.xml")
tree = qisys.qixml.read(qibuild_xml_path)
root = tree.getroot()
profiles = tree.find("profiles")
if profiles is not None:
if args.backup:
profiles.tag = "profiles.back"
else:
root.remove(profiles)
qisys.qixml.write(root, qibuild_xml_path)
if __name__ == "__main__":
main()
|
Add a tool to ease 3.7 migration
|
Add a tool to ease 3.7 migration
Change-Id: I0fcb3a1dd3bffe719f1abfa45a328640002affa0
Reviewed-on: http://gerrit.aldebaran.lan/49630
Reviewed-by: dmerejkowsky <1e4b13000fa8777df4cefd2fd8215187e3deb062@aldebaran-robotics.com>
Tested-by: dmerejkowsky <1e4b13000fa8777df4cefd2fd8215187e3deb062@aldebaran-robotics.com>
|
Python
|
bsd-3-clause
|
dmerejkowsky/qibuild,dmerejkowsky/qibuild,dmerejkowsky/qibuild,aldebaran/qibuild,aldebaran/qibuild,dmerejkowsky/qibuild,aldebaran/qibuild,dmerejkowsky/qibuild,aldebaran/qibuild
|
Add a tool to ease 3.7 migration
Change-Id: I0fcb3a1dd3bffe719f1abfa45a328640002affa0
Reviewed-on: http://gerrit.aldebaran.lan/49630
Reviewed-by: dmerejkowsky <1e4b13000fa8777df4cefd2fd8215187e3deb062@aldebaran-robotics.com>
Tested-by: dmerejkowsky <1e4b13000fa8777df4cefd2fd8215187e3deb062@aldebaran-robotics.com>
|
import argparse
import ConfigParser
import os
from qisys import ui
import qisys.parsers
import qisys.sh
import qisys.qixml
import qitoolchain.toolchain
def get_old_toolchains():
""" Return a dict name -> feed from the previous config format
"""
res = dict()
cfg_path = qisys.sh.get_config_path("qi", "toolchains.cfg")
config = ConfigParser.ConfigParser()
config.read(cfg_path)
if not config.has_section("toolchains"):
return res
tc_items = config.items("toolchains")
for name, value in tc_items:
res[name] = value
return res
def recreate_toolchains():
old_toolchains = get_old_toolchains()
old_names = old_toolchains.keys()
old_names.sort()
errors = list()
for i, name in enumerate(old_names):
n = len(old_names)
ui.info(ui.bold, "[ %d on %d ]" % (i+1, n), name)
feed_url = old_toolchains[name]
if feed_url:
toolchain = qitoolchain.toolchain.Toolchain(name)
try:
toolchain.update(feed_url=feed_url)
except Exception as e:
errors.append((name, e))
if errors:
ui.error("Could not update some toolchains")
for name, error in errors:
ui.error(" * ", name, error)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--no-toolchains", action="store_false", dest="toolchains",
help="Do not try to recreate the toolchains")
parser.add_argument("--no-backup", action="store_false", dest="backup",
help="Do not backup build profiles")
parser.set_defaults(toolchains=True, backup=True)
qisys.parsers.worktree_parser(parser)
args = parser.parse_args()
worktree = qisys.parsers.get_worktree(args)
ui.info(ui.bold, "Starting 3.7 migration")
if args.toolchains:
ui.info(ui.bold, ":: Re-creating toolchains ...")
recreate_toolchains()
ui.info(ui.bold, ":: Removing build profiles ...")
qibuild_xml_path = os.path.join(worktree.dot_qi, "qibuild.xml")
tree = qisys.qixml.read(qibuild_xml_path)
root = tree.getroot()
profiles = tree.find("profiles")
if profiles is not None:
if args.backup:
profiles.tag = "profiles.back"
else:
root.remove(profiles)
qisys.qixml.write(root, qibuild_xml_path)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a tool to ease 3.7 migration
Change-Id: I0fcb3a1dd3bffe719f1abfa45a328640002affa0
Reviewed-on: http://gerrit.aldebaran.lan/49630
Reviewed-by: dmerejkowsky <1e4b13000fa8777df4cefd2fd8215187e3deb062@aldebaran-robotics.com>
Tested-by: dmerejkowsky <1e4b13000fa8777df4cefd2fd8215187e3deb062@aldebaran-robotics.com><commit_after>
|
import argparse
import ConfigParser
import os
from qisys import ui
import qisys.parsers
import qisys.sh
import qisys.qixml
import qitoolchain.toolchain
def get_old_toolchains():
""" Return a dict name -> feed from the previous config format
"""
res = dict()
cfg_path = qisys.sh.get_config_path("qi", "toolchains.cfg")
config = ConfigParser.ConfigParser()
config.read(cfg_path)
if not config.has_section("toolchains"):
return res
tc_items = config.items("toolchains")
for name, value in tc_items:
res[name] = value
return res
def recreate_toolchains():
old_toolchains = get_old_toolchains()
old_names = old_toolchains.keys()
old_names.sort()
errors = list()
for i, name in enumerate(old_names):
n = len(old_names)
ui.info(ui.bold, "[ %d on %d ]" % (i+1, n), name)
feed_url = old_toolchains[name]
if feed_url:
toolchain = qitoolchain.toolchain.Toolchain(name)
try:
toolchain.update(feed_url=feed_url)
except Exception as e:
errors.append((name, e))
if errors:
ui.error("Could not update some toolchains")
for name, error in errors:
ui.error(" * ", name, error)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--no-toolchains", action="store_false", dest="toolchains",
help="Do not try to recreate the toolchains")
parser.add_argument("--no-backup", action="store_false", dest="backup",
help="Do not backup build profiles")
parser.set_defaults(toolchains=True, backup=True)
qisys.parsers.worktree_parser(parser)
args = parser.parse_args()
worktree = qisys.parsers.get_worktree(args)
ui.info(ui.bold, "Starting 3.7 migration")
if args.toolchains:
ui.info(ui.bold, ":: Re-creating toolchains ...")
recreate_toolchains()
ui.info(ui.bold, ":: Removing build profiles ...")
qibuild_xml_path = os.path.join(worktree.dot_qi, "qibuild.xml")
tree = qisys.qixml.read(qibuild_xml_path)
root = tree.getroot()
profiles = tree.find("profiles")
if profiles is not None:
if args.backup:
profiles.tag = "profiles.back"
else:
root.remove(profiles)
qisys.qixml.write(root, qibuild_xml_path)
if __name__ == "__main__":
main()
|
Add a tool to ease 3.7 migration
Change-Id: I0fcb3a1dd3bffe719f1abfa45a328640002affa0
Reviewed-on: http://gerrit.aldebaran.lan/49630
Reviewed-by: dmerejkowsky <1e4b13000fa8777df4cefd2fd8215187e3deb062@aldebaran-robotics.com>
Tested-by: dmerejkowsky <1e4b13000fa8777df4cefd2fd8215187e3deb062@aldebaran-robotics.com>import argparse
import ConfigParser
import os
from qisys import ui
import qisys.parsers
import qisys.sh
import qisys.qixml
import qitoolchain.toolchain
def get_old_toolchains():
""" Return a dict name -> feed from the previous config format
"""
res = dict()
cfg_path = qisys.sh.get_config_path("qi", "toolchains.cfg")
config = ConfigParser.ConfigParser()
config.read(cfg_path)
if not config.has_section("toolchains"):
return res
tc_items = config.items("toolchains")
for name, value in tc_items:
res[name] = value
return res
def recreate_toolchains():
old_toolchains = get_old_toolchains()
old_names = old_toolchains.keys()
old_names.sort()
errors = list()
for i, name in enumerate(old_names):
n = len(old_names)
ui.info(ui.bold, "[ %d on %d ]" % (i+1, n), name)
feed_url = old_toolchains[name]
if feed_url:
toolchain = qitoolchain.toolchain.Toolchain(name)
try:
toolchain.update(feed_url=feed_url)
except Exception as e:
errors.append((name, e))
if errors:
ui.error("Could not update some toolchains")
for name, error in errors:
ui.error(" * ", name, error)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--no-toolchains", action="store_false", dest="toolchains",
help="Do not try to recreate the toolchains")
parser.add_argument("--no-backup", action="store_false", dest="backup",
help="Do not backup build profiles")
parser.set_defaults(toolchains=True, backup=True)
qisys.parsers.worktree_parser(parser)
args = parser.parse_args()
worktree = qisys.parsers.get_worktree(args)
ui.info(ui.bold, "Starting 3.7 migration")
if args.toolchains:
ui.info(ui.bold, ":: Re-creating toolchains ...")
recreate_toolchains()
ui.info(ui.bold, ":: Removing build profiles ...")
qibuild_xml_path = os.path.join(worktree.dot_qi, "qibuild.xml")
tree = qisys.qixml.read(qibuild_xml_path)
root = tree.getroot()
profiles = tree.find("profiles")
if profiles is not None:
if args.backup:
profiles.tag = "profiles.back"
else:
root.remove(profiles)
qisys.qixml.write(root, qibuild_xml_path)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a tool to ease 3.7 migration
Change-Id: I0fcb3a1dd3bffe719f1abfa45a328640002affa0
Reviewed-on: http://gerrit.aldebaran.lan/49630
Reviewed-by: dmerejkowsky <1e4b13000fa8777df4cefd2fd8215187e3deb062@aldebaran-robotics.com>
Tested-by: dmerejkowsky <1e4b13000fa8777df4cefd2fd8215187e3deb062@aldebaran-robotics.com><commit_after>import argparse
import ConfigParser
import os
from qisys import ui
import qisys.parsers
import qisys.sh
import qisys.qixml
import qitoolchain.toolchain
def get_old_toolchains():
""" Return a dict name -> feed from the previous config format
"""
res = dict()
cfg_path = qisys.sh.get_config_path("qi", "toolchains.cfg")
config = ConfigParser.ConfigParser()
config.read(cfg_path)
if not config.has_section("toolchains"):
return res
tc_items = config.items("toolchains")
for name, value in tc_items:
res[name] = value
return res
def recreate_toolchains():
old_toolchains = get_old_toolchains()
old_names = old_toolchains.keys()
old_names.sort()
errors = list()
for i, name in enumerate(old_names):
n = len(old_names)
ui.info(ui.bold, "[ %d on %d ]" % (i+1, n), name)
feed_url = old_toolchains[name]
if feed_url:
toolchain = qitoolchain.toolchain.Toolchain(name)
try:
toolchain.update(feed_url=feed_url)
except Exception as e:
errors.append((name, e))
if errors:
ui.error("Could not update some toolchains")
for name, error in errors:
ui.error(" * ", name, error)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--no-toolchains", action="store_false", dest="toolchains",
help="Do not try to recreate the toolchains")
parser.add_argument("--no-backup", action="store_false", dest="backup",
help="Do not backup build profiles")
parser.set_defaults(toolchains=True, backup=True)
qisys.parsers.worktree_parser(parser)
args = parser.parse_args()
worktree = qisys.parsers.get_worktree(args)
ui.info(ui.bold, "Starting 3.7 migration")
if args.toolchains:
ui.info(ui.bold, ":: Re-creating toolchains ...")
recreate_toolchains()
ui.info(ui.bold, ":: Removing build profiles ...")
qibuild_xml_path = os.path.join(worktree.dot_qi, "qibuild.xml")
tree = qisys.qixml.read(qibuild_xml_path)
root = tree.getroot()
profiles = tree.find("profiles")
if profiles is not None:
if args.backup:
profiles.tag = "profiles.back"
else:
root.remove(profiles)
qisys.qixml.write(root, qibuild_xml_path)
if __name__ == "__main__":
main()
|
|
4f2a7c64e8871b05679b71d2f79b9525c3ed40dd
|
dev/benchmark_for_elasticsearch.py
|
dev/benchmark_for_elasticsearch.py
|
# Simple benchmark script when searching similar strings by using elasticsearch instead of SimString.
# Since Elasticsearch uses Apache Lucene, TF/IDF based searching algorithm, the purpose for searching text will be different from this library.
from elasticsearch import Elasticsearch
from benchmarker import Benchmarker
es = Elasticsearch('http://localhost:9200/')
SEARCH_COUNT_LIMIT = 10**4
index = 'simstring'
type = 'sample'
path = './dev/data/company_names.txt'
number_of_lines = len(open(path).readlines())
with Benchmarker(width=20) as bench:
with open(path, 'r') as lines:
for i, line in enumerate(lines):
strings = line.rstrip('\r\n')
res = es.index(index=index, doc_type=type, id=i, body={'strings': line})
@bench("search text({0} times)".format(min(number_of_lines, SEARCH_COUNT_LIMIT)))
def _(bm):
with open(path, 'r') as lines:
for i, line in enumerate(lines):
strings = line.rstrip('\r\n')
res = es.search(index=index, body={"query": {"match": {'strings': strings}}, "min_score": 20})
# print(strings)
# print("Got %d Hits:" % res['hits']['total'])
# for hit in res['hits']['hits']:
# print(hit)
# $ python dev/benchmark_for_elasticsearch.py
# ## benchmarker: release 4.0.1 (for python)
# ## python version: 3.5.5
# ## python compiler: GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)
# ## python platform: Darwin-17.6.0-x86_64-i386-64bit
# ## python executable: /usr/local/miniconda3/envs/myenv/bin/python
# ## cpu model: Intel(R) Core(TM) i7-6567U CPU @ 3.30GHz
# ## parameters: loop=1, cycle=1, extra=0
#
# ## real (total = user + sys)
# search text(5797 times) 18.0541 4.9900 4.6500 0.3400
#
# ## Ranking real
# search text(5797 times) 18.0541 (100.0) ********************
#
# ## Matrix real [01]
# [01] search text(5797 times) 18.0541 100.0
|
Add benchmark script for elasticsearch
|
Add benchmark script for elasticsearch
|
Python
|
mit
|
nullnull/simstring
|
Add benchmark script for elasticsearch
|
# Simple benchmark script when searching similar strings by using elasticsearch instead of SimString.
# Since Elasticsearch uses Apache Lucene, TF/IDF based searching algorithm, the purpose for searching text will be different from this library.
from elasticsearch import Elasticsearch
from benchmarker import Benchmarker
es = Elasticsearch('http://localhost:9200/')
SEARCH_COUNT_LIMIT = 10**4
index = 'simstring'
type = 'sample'
path = './dev/data/company_names.txt'
number_of_lines = len(open(path).readlines())
with Benchmarker(width=20) as bench:
with open(path, 'r') as lines:
for i, line in enumerate(lines):
strings = line.rstrip('\r\n')
res = es.index(index=index, doc_type=type, id=i, body={'strings': line})
@bench("search text({0} times)".format(min(number_of_lines, SEARCH_COUNT_LIMIT)))
def _(bm):
with open(path, 'r') as lines:
for i, line in enumerate(lines):
strings = line.rstrip('\r\n')
res = es.search(index=index, body={"query": {"match": {'strings': strings}}, "min_score": 20})
# print(strings)
# print("Got %d Hits:" % res['hits']['total'])
# for hit in res['hits']['hits']:
# print(hit)
# $ python dev/benchmark_for_elasticsearch.py
# ## benchmarker: release 4.0.1 (for python)
# ## python version: 3.5.5
# ## python compiler: GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)
# ## python platform: Darwin-17.6.0-x86_64-i386-64bit
# ## python executable: /usr/local/miniconda3/envs/myenv/bin/python
# ## cpu model: Intel(R) Core(TM) i7-6567U CPU @ 3.30GHz
# ## parameters: loop=1, cycle=1, extra=0
#
# ## real (total = user + sys)
# search text(5797 times) 18.0541 4.9900 4.6500 0.3400
#
# ## Ranking real
# search text(5797 times) 18.0541 (100.0) ********************
#
# ## Matrix real [01]
# [01] search text(5797 times) 18.0541 100.0
|
<commit_before><commit_msg>Add benchmark script for elasticsearch<commit_after>
|
# Simple benchmark script when searching similar strings by using elasticsearch instead of SimString.
# Since Elasticsearch uses Apache Lucene, TF/IDF based searching algorithm, the purpose for searching text will be different from this library.
from elasticsearch import Elasticsearch
from benchmarker import Benchmarker
es = Elasticsearch('http://localhost:9200/')
SEARCH_COUNT_LIMIT = 10**4
index = 'simstring'
type = 'sample'
path = './dev/data/company_names.txt'
number_of_lines = len(open(path).readlines())
with Benchmarker(width=20) as bench:
with open(path, 'r') as lines:
for i, line in enumerate(lines):
strings = line.rstrip('\r\n')
res = es.index(index=index, doc_type=type, id=i, body={'strings': line})
@bench("search text({0} times)".format(min(number_of_lines, SEARCH_COUNT_LIMIT)))
def _(bm):
with open(path, 'r') as lines:
for i, line in enumerate(lines):
strings = line.rstrip('\r\n')
res = es.search(index=index, body={"query": {"match": {'strings': strings}}, "min_score": 20})
# print(strings)
# print("Got %d Hits:" % res['hits']['total'])
# for hit in res['hits']['hits']:
# print(hit)
# $ python dev/benchmark_for_elasticsearch.py
# ## benchmarker: release 4.0.1 (for python)
# ## python version: 3.5.5
# ## python compiler: GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)
# ## python platform: Darwin-17.6.0-x86_64-i386-64bit
# ## python executable: /usr/local/miniconda3/envs/myenv/bin/python
# ## cpu model: Intel(R) Core(TM) i7-6567U CPU @ 3.30GHz
# ## parameters: loop=1, cycle=1, extra=0
#
# ## real (total = user + sys)
# search text(5797 times) 18.0541 4.9900 4.6500 0.3400
#
# ## Ranking real
# search text(5797 times) 18.0541 (100.0) ********************
#
# ## Matrix real [01]
# [01] search text(5797 times) 18.0541 100.0
|
Add benchmark script for elasticsearch# Simple benchmark script when searching similar strings by using elasticsearch instead of SimString.
# Since Elasticsearch uses Apache Lucene, TF/IDF based searching algorithm, the purpose for searching text will be different from this library.
from elasticsearch import Elasticsearch
from benchmarker import Benchmarker
es = Elasticsearch('http://localhost:9200/')
SEARCH_COUNT_LIMIT = 10**4
index = 'simstring'
type = 'sample'
path = './dev/data/company_names.txt'
number_of_lines = len(open(path).readlines())
with Benchmarker(width=20) as bench:
with open(path, 'r') as lines:
for i, line in enumerate(lines):
strings = line.rstrip('\r\n')
res = es.index(index=index, doc_type=type, id=i, body={'strings': line})
@bench("search text({0} times)".format(min(number_of_lines, SEARCH_COUNT_LIMIT)))
def _(bm):
with open(path, 'r') as lines:
for i, line in enumerate(lines):
strings = line.rstrip('\r\n')
res = es.search(index=index, body={"query": {"match": {'strings': strings}}, "min_score": 20})
# print(strings)
# print("Got %d Hits:" % res['hits']['total'])
# for hit in res['hits']['hits']:
# print(hit)
# $ python dev/benchmark_for_elasticsearch.py
# ## benchmarker: release 4.0.1 (for python)
# ## python version: 3.5.5
# ## python compiler: GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)
# ## python platform: Darwin-17.6.0-x86_64-i386-64bit
# ## python executable: /usr/local/miniconda3/envs/myenv/bin/python
# ## cpu model: Intel(R) Core(TM) i7-6567U CPU @ 3.30GHz
# ## parameters: loop=1, cycle=1, extra=0
#
# ## real (total = user + sys)
# search text(5797 times) 18.0541 4.9900 4.6500 0.3400
#
# ## Ranking real
# search text(5797 times) 18.0541 (100.0) ********************
#
# ## Matrix real [01]
# [01] search text(5797 times) 18.0541 100.0
|
<commit_before><commit_msg>Add benchmark script for elasticsearch<commit_after># Simple benchmark script when searching similar strings by using elasticsearch instead of SimString.
# Since Elasticsearch uses Apache Lucene, TF/IDF based searching algorithm, the purpose for searching text will be different from this library.
from elasticsearch import Elasticsearch
from benchmarker import Benchmarker
es = Elasticsearch('http://localhost:9200/')
SEARCH_COUNT_LIMIT = 10**4
index = 'simstring'
type = 'sample'
path = './dev/data/company_names.txt'
number_of_lines = len(open(path).readlines())
with Benchmarker(width=20) as bench:
with open(path, 'r') as lines:
for i, line in enumerate(lines):
strings = line.rstrip('\r\n')
res = es.index(index=index, doc_type=type, id=i, body={'strings': line})
@bench("search text({0} times)".format(min(number_of_lines, SEARCH_COUNT_LIMIT)))
def _(bm):
with open(path, 'r') as lines:
for i, line in enumerate(lines):
strings = line.rstrip('\r\n')
res = es.search(index=index, body={"query": {"match": {'strings': strings}}, "min_score": 20})
# print(strings)
# print("Got %d Hits:" % res['hits']['total'])
# for hit in res['hits']['hits']:
# print(hit)
# $ python dev/benchmark_for_elasticsearch.py
# ## benchmarker: release 4.0.1 (for python)
# ## python version: 3.5.5
# ## python compiler: GCC 4.2.1 Compatible Clang 4.0.1 (tags/RELEASE_401/final)
# ## python platform: Darwin-17.6.0-x86_64-i386-64bit
# ## python executable: /usr/local/miniconda3/envs/myenv/bin/python
# ## cpu model: Intel(R) Core(TM) i7-6567U CPU @ 3.30GHz
# ## parameters: loop=1, cycle=1, extra=0
#
# ## real (total = user + sys)
# search text(5797 times) 18.0541 4.9900 4.6500 0.3400
#
# ## Ranking real
# search text(5797 times) 18.0541 (100.0) ********************
#
# ## Matrix real [01]
# [01] search text(5797 times) 18.0541 100.0
|
|
c3bd992d3b42255205b4f90cb3578bd81982f773
|
opps/core/tests/source_models.py
|
opps/core/tests/source_models.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import IntegrityError
from django.test import TestCase
from opps.core.models.source import Source
class SourceModelTest(TestCase):
def setUp(self):
self.source = Source.objects.create(name=u'Test site',
slug=u'test-site')
|
Create setup source model test use testcase
|
Create setup source model test use testcase
|
Python
|
mit
|
jeanmask/opps,YACOWS/opps,jeanmask/opps,YACOWS/opps,YACOWS/opps,YACOWS/opps,jeanmask/opps,opps/opps,williamroot/opps,opps/opps,opps/opps,jeanmask/opps,williamroot/opps,williamroot/opps,opps/opps,williamroot/opps
|
Create setup source model test use testcase
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import IntegrityError
from django.test import TestCase
from opps.core.models.source import Source
class SourceModelTest(TestCase):
def setUp(self):
self.source = Source.objects.create(name=u'Test site',
slug=u'test-site')
|
<commit_before><commit_msg>Create setup source model test use testcase<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import IntegrityError
from django.test import TestCase
from opps.core.models.source import Source
class SourceModelTest(TestCase):
def setUp(self):
self.source = Source.objects.create(name=u'Test site',
slug=u'test-site')
|
Create setup source model test use testcase#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import IntegrityError
from django.test import TestCase
from opps.core.models.source import Source
class SourceModelTest(TestCase):
def setUp(self):
self.source = Source.objects.create(name=u'Test site',
slug=u'test-site')
|
<commit_before><commit_msg>Create setup source model test use testcase<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import IntegrityError
from django.test import TestCase
from opps.core.models.source import Source
class SourceModelTest(TestCase):
def setUp(self):
self.source = Source.objects.create(name=u'Test site',
slug=u'test-site')
|
|
aee00af4c8d04825b1f532bcac096c0f4e245fbd
|
samples/util/test.py
|
samples/util/test.py
|
# coding: utf-8
import os
import shutil
import cv2
def delete_and_make_directory(dir_path='./image_dir/'):
# Delete the entire directory tree if it exists.
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
# Make the directory if it doesn't exist.
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def video_2_frames(video_file='./IMG_2140.MOV', image_dir='./image_dir/', image_file='img_%s.png'):
# Delete the entire directory tree if it exists.
if os.path.exists(image_dir):
shutil.rmtree(image_dir)
# Make the directory if it doesn't exist.
if not os.path.exists(image_dir):
os.makedirs(image_dir)
# Video to frames
i = 0
cap = cv2.VideoCapture(video_file)
while(cap.isOpened()):
flag, frame = cap.read() # Capture frame-by-frame
if flag == False: # Is a frame left?
break
cv2.imwrite(image_dir+image_file % str(i).zfill(6), frame) # Save a frame
print('Save', image_dir+image_file % str(i).zfill(6))
i += 1
cap.release() # When everything done, release the capture
|
Add util directory and a code
|
Add util directory and a code
|
Python
|
mit
|
iShoto/testpy
|
Add util directory and a code
|
# coding: utf-8
import os
import shutil
import cv2
def delete_and_make_directory(dir_path='./image_dir/'):
# Delete the entire directory tree if it exists.
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
# Make the directory if it doesn't exist.
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def video_2_frames(video_file='./IMG_2140.MOV', image_dir='./image_dir/', image_file='img_%s.png'):
# Delete the entire directory tree if it exists.
if os.path.exists(image_dir):
shutil.rmtree(image_dir)
# Make the directory if it doesn't exist.
if not os.path.exists(image_dir):
os.makedirs(image_dir)
# Video to frames
i = 0
cap = cv2.VideoCapture(video_file)
while(cap.isOpened()):
flag, frame = cap.read() # Capture frame-by-frame
if flag == False: # Is a frame left?
break
cv2.imwrite(image_dir+image_file % str(i).zfill(6), frame) # Save a frame
print('Save', image_dir+image_file % str(i).zfill(6))
i += 1
cap.release() # When everything done, release the capture
|
<commit_before><commit_msg>Add util directory and a code<commit_after>
|
# coding: utf-8
import os
import shutil
import cv2
def delete_and_make_directory(dir_path='./image_dir/'):
# Delete the entire directory tree if it exists.
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
# Make the directory if it doesn't exist.
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def video_2_frames(video_file='./IMG_2140.MOV', image_dir='./image_dir/', image_file='img_%s.png'):
# Delete the entire directory tree if it exists.
if os.path.exists(image_dir):
shutil.rmtree(image_dir)
# Make the directory if it doesn't exist.
if not os.path.exists(image_dir):
os.makedirs(image_dir)
# Video to frames
i = 0
cap = cv2.VideoCapture(video_file)
while(cap.isOpened()):
flag, frame = cap.read() # Capture frame-by-frame
if flag == False: # Is a frame left?
break
cv2.imwrite(image_dir+image_file % str(i).zfill(6), frame) # Save a frame
print('Save', image_dir+image_file % str(i).zfill(6))
i += 1
cap.release() # When everything done, release the capture
|
Add util directory and a code# coding: utf-8
import os
import shutil
import cv2
def delete_and_make_directory(dir_path='./image_dir/'):
# Delete the entire directory tree if it exists.
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
# Make the directory if it doesn't exist.
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def video_2_frames(video_file='./IMG_2140.MOV', image_dir='./image_dir/', image_file='img_%s.png'):
# Delete the entire directory tree if it exists.
if os.path.exists(image_dir):
shutil.rmtree(image_dir)
# Make the directory if it doesn't exist.
if not os.path.exists(image_dir):
os.makedirs(image_dir)
# Video to frames
i = 0
cap = cv2.VideoCapture(video_file)
while(cap.isOpened()):
flag, frame = cap.read() # Capture frame-by-frame
if flag == False: # Is a frame left?
break
cv2.imwrite(image_dir+image_file % str(i).zfill(6), frame) # Save a frame
print('Save', image_dir+image_file % str(i).zfill(6))
i += 1
cap.release() # When everything done, release the capture
|
<commit_before><commit_msg>Add util directory and a code<commit_after># coding: utf-8
import os
import shutil
import cv2
def delete_and_make_directory(dir_path='./image_dir/'):
# Delete the entire directory tree if it exists.
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
# Make the directory if it doesn't exist.
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def video_2_frames(video_file='./IMG_2140.MOV', image_dir='./image_dir/', image_file='img_%s.png'):
# Delete the entire directory tree if it exists.
if os.path.exists(image_dir):
shutil.rmtree(image_dir)
# Make the directory if it doesn't exist.
if not os.path.exists(image_dir):
os.makedirs(image_dir)
# Video to frames
i = 0
cap = cv2.VideoCapture(video_file)
while(cap.isOpened()):
flag, frame = cap.read() # Capture frame-by-frame
if flag == False: # Is a frame left?
break
cv2.imwrite(image_dir+image_file % str(i).zfill(6), frame) # Save a frame
print('Save', image_dir+image_file % str(i).zfill(6))
i += 1
cap.release() # When everything done, release the capture
|
|
d1ee4b6d30f40653588bc5fc1b287e770457a79e
|
filters/remove-strong.py
|
filters/remove-strong.py
|
"""
Convert strong text to normal text
EXAMPLE:
>>>> echo Lorem **ip sum** example | pandoc -F remove-strong.py
<p>Lorem ip sum example</p>
"""
import panflute as pf
def action(elem, doc):
if isinstance(elem, pf.Strong):
return list(elem.content)
#return elem.content.list
#return pf.Span(*elem.content)
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
|
Add example that removes bold text
|
Add example that removes bold text
|
Python
|
bsd-3-clause
|
sergiocorreia/panflute-filters
|
Add example that removes bold text
|
"""
Convert strong text to normal text
EXAMPLE:
>>>> echo Lorem **ip sum** example | pandoc -F remove-strong.py
<p>Lorem ip sum example</p>
"""
import panflute as pf
def action(elem, doc):
if isinstance(elem, pf.Strong):
return list(elem.content)
#return elem.content.list
#return pf.Span(*elem.content)
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example that removes bold text<commit_after>
|
"""
Convert strong text to normal text
EXAMPLE:
>>>> echo Lorem **ip sum** example | pandoc -F remove-strong.py
<p>Lorem ip sum example</p>
"""
import panflute as pf
def action(elem, doc):
if isinstance(elem, pf.Strong):
return list(elem.content)
#return elem.content.list
#return pf.Span(*elem.content)
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
|
Add example that removes bold text"""
Convert strong text to normal text
EXAMPLE:
>>>> echo Lorem **ip sum** example | pandoc -F remove-strong.py
<p>Lorem ip sum example</p>
"""
import panflute as pf
def action(elem, doc):
if isinstance(elem, pf.Strong):
return list(elem.content)
#return elem.content.list
#return pf.Span(*elem.content)
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example that removes bold text<commit_after>"""
Convert strong text to normal text
EXAMPLE:
>>>> echo Lorem **ip sum** example | pandoc -F remove-strong.py
<p>Lorem ip sum example</p>
"""
import panflute as pf
def action(elem, doc):
if isinstance(elem, pf.Strong):
return list(elem.content)
#return elem.content.list
#return pf.Span(*elem.content)
def main(doc=None):
return pf.run_filter(action, doc=doc)
if __name__ == '__main__':
main()
|
|
32590cf52bfc699d9e003a5f64c125d9f232c141
|
vumi/middleware/tests/test_address_translator.py
|
vumi/middleware/tests/test_address_translator.py
|
"""Tests from vumi.middleware.address_translator."""
from twisted.trial.unittest import TestCase
from vumi.middleware.address_translator import AddressTranslationMiddleware
class AddressTranslationMiddlewareTestCase(TestCase):
def mk_addr_trans(self, outbound_map):
worker = object()
config = {'outbound_map': outbound_map}
mw = AddressTranslationMiddleware("test_addr_trans", config, worker)
mw.setup_middleware()
return mw
def mk_msg(self, to_addr='unknown', from_addr='unknown'):
return {
'to_addr': to_addr,
'from_addr': from_addr,
}
def test_handle_outbound(self):
mw = self.mk_addr_trans({'555OUT': '555IN'})
msg = mw.handle_outbound(self.mk_msg(to_addr="555OUT"), "outbound")
self.assertEqual(msg['to_addr'], "555IN")
msg = mw.handle_outbound(self.mk_msg(to_addr="555UNK"), "outbound")
self.assertEqual(msg['to_addr'], "555UNK")
def test_handle_inbound(self):
mw = self.mk_addr_trans({'555OUT': '555IN'})
msg = mw.handle_inbound(self.mk_msg(from_addr="555OUT"), "inbound")
self.assertEqual(msg['from_addr'], "555OUT")
msg = mw.handle_inbound(self.mk_msg(from_addr="555UNK"), "inbound")
self.assertEqual(msg['from_addr'], "555UNK")
|
Add test for address translation middleware.
|
Add test for address translation middleware.
|
Python
|
bsd-3-clause
|
vishwaprakashmishra/xmatrix,TouK/vumi,vishwaprakashmishra/xmatrix,TouK/vumi,vishwaprakashmishra/xmatrix,harrissoerja/vumi,TouK/vumi,harrissoerja/vumi,harrissoerja/vumi
|
Add test for address translation middleware.
|
"""Tests from vumi.middleware.address_translator."""
from twisted.trial.unittest import TestCase
from vumi.middleware.address_translator import AddressTranslationMiddleware
class AddressTranslationMiddlewareTestCase(TestCase):
def mk_addr_trans(self, outbound_map):
worker = object()
config = {'outbound_map': outbound_map}
mw = AddressTranslationMiddleware("test_addr_trans", config, worker)
mw.setup_middleware()
return mw
def mk_msg(self, to_addr='unknown', from_addr='unknown'):
return {
'to_addr': to_addr,
'from_addr': from_addr,
}
def test_handle_outbound(self):
mw = self.mk_addr_trans({'555OUT': '555IN'})
msg = mw.handle_outbound(self.mk_msg(to_addr="555OUT"), "outbound")
self.assertEqual(msg['to_addr'], "555IN")
msg = mw.handle_outbound(self.mk_msg(to_addr="555UNK"), "outbound")
self.assertEqual(msg['to_addr'], "555UNK")
def test_handle_inbound(self):
mw = self.mk_addr_trans({'555OUT': '555IN'})
msg = mw.handle_inbound(self.mk_msg(from_addr="555OUT"), "inbound")
self.assertEqual(msg['from_addr'], "555OUT")
msg = mw.handle_inbound(self.mk_msg(from_addr="555UNK"), "inbound")
self.assertEqual(msg['from_addr'], "555UNK")
|
<commit_before><commit_msg>Add test for address translation middleware.<commit_after>
|
"""Tests from vumi.middleware.address_translator."""
from twisted.trial.unittest import TestCase
from vumi.middleware.address_translator import AddressTranslationMiddleware
class AddressTranslationMiddlewareTestCase(TestCase):
def mk_addr_trans(self, outbound_map):
worker = object()
config = {'outbound_map': outbound_map}
mw = AddressTranslationMiddleware("test_addr_trans", config, worker)
mw.setup_middleware()
return mw
def mk_msg(self, to_addr='unknown', from_addr='unknown'):
return {
'to_addr': to_addr,
'from_addr': from_addr,
}
def test_handle_outbound(self):
mw = self.mk_addr_trans({'555OUT': '555IN'})
msg = mw.handle_outbound(self.mk_msg(to_addr="555OUT"), "outbound")
self.assertEqual(msg['to_addr'], "555IN")
msg = mw.handle_outbound(self.mk_msg(to_addr="555UNK"), "outbound")
self.assertEqual(msg['to_addr'], "555UNK")
def test_handle_inbound(self):
mw = self.mk_addr_trans({'555OUT': '555IN'})
msg = mw.handle_inbound(self.mk_msg(from_addr="555OUT"), "inbound")
self.assertEqual(msg['from_addr'], "555OUT")
msg = mw.handle_inbound(self.mk_msg(from_addr="555UNK"), "inbound")
self.assertEqual(msg['from_addr'], "555UNK")
|
Add test for address translation middleware."""Tests from vumi.middleware.address_translator."""
from twisted.trial.unittest import TestCase
from vumi.middleware.address_translator import AddressTranslationMiddleware
class AddressTranslationMiddlewareTestCase(TestCase):
def mk_addr_trans(self, outbound_map):
worker = object()
config = {'outbound_map': outbound_map}
mw = AddressTranslationMiddleware("test_addr_trans", config, worker)
mw.setup_middleware()
return mw
def mk_msg(self, to_addr='unknown', from_addr='unknown'):
return {
'to_addr': to_addr,
'from_addr': from_addr,
}
def test_handle_outbound(self):
mw = self.mk_addr_trans({'555OUT': '555IN'})
msg = mw.handle_outbound(self.mk_msg(to_addr="555OUT"), "outbound")
self.assertEqual(msg['to_addr'], "555IN")
msg = mw.handle_outbound(self.mk_msg(to_addr="555UNK"), "outbound")
self.assertEqual(msg['to_addr'], "555UNK")
def test_handle_inbound(self):
mw = self.mk_addr_trans({'555OUT': '555IN'})
msg = mw.handle_inbound(self.mk_msg(from_addr="555OUT"), "inbound")
self.assertEqual(msg['from_addr'], "555OUT")
msg = mw.handle_inbound(self.mk_msg(from_addr="555UNK"), "inbound")
self.assertEqual(msg['from_addr'], "555UNK")
|
<commit_before><commit_msg>Add test for address translation middleware.<commit_after>"""Tests from vumi.middleware.address_translator."""
from twisted.trial.unittest import TestCase
from vumi.middleware.address_translator import AddressTranslationMiddleware
class AddressTranslationMiddlewareTestCase(TestCase):
def mk_addr_trans(self, outbound_map):
worker = object()
config = {'outbound_map': outbound_map}
mw = AddressTranslationMiddleware("test_addr_trans", config, worker)
mw.setup_middleware()
return mw
def mk_msg(self, to_addr='unknown', from_addr='unknown'):
return {
'to_addr': to_addr,
'from_addr': from_addr,
}
def test_handle_outbound(self):
mw = self.mk_addr_trans({'555OUT': '555IN'})
msg = mw.handle_outbound(self.mk_msg(to_addr="555OUT"), "outbound")
self.assertEqual(msg['to_addr'], "555IN")
msg = mw.handle_outbound(self.mk_msg(to_addr="555UNK"), "outbound")
self.assertEqual(msg['to_addr'], "555UNK")
def test_handle_inbound(self):
mw = self.mk_addr_trans({'555OUT': '555IN'})
msg = mw.handle_inbound(self.mk_msg(from_addr="555OUT"), "inbound")
self.assertEqual(msg['from_addr'], "555OUT")
msg = mw.handle_inbound(self.mk_msg(from_addr="555UNK"), "inbound")
self.assertEqual(msg['from_addr'], "555UNK")
|
|
885ec3ddcdb49f4f913c8c7230fc7613290ee5bb
|
python/rename_files.py
|
python/rename_files.py
|
# coding: utf-8
import argparse
import os
import re
import shutil
def rename_files_in_dir(directory, live_run=False):
if not live_run:
print("Performing dry run...")
files = [f for f in os.listdir(directory) if os.path.isfile(f)]
for old_path in files:
new_path = re.sub('[^0-9a-zA-Z-\.,_]', '_', old_path)
new_path = re.sub('_+', '_', new_path)
new_path = re.sub('_-_', '-', new_path)
new_path = re.sub('(^_|_$|\.$)', '', new_path)
if live_run:
shutil.move(old_path, new_path)
else:
print("{} -> {}".format(old_path, new_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('directory', type=str, help="which directory's files to rename")
parser.add_argument('-l', '--live_run', default=False, action='store_true', help="If not given, perform a dry run by showing what would be renamed.")
args = parser.parse_args()
rename_files_in_dir(args.directory, args.live_run)
|
Add conservative rename Python script
|
Add conservative rename Python script
|
Python
|
mit
|
joshsziegler/dotfiles,joshsziegler/dotfiles
|
Add conservative rename Python script
|
# coding: utf-8
import argparse
import os
import re
import shutil
def rename_files_in_dir(directory, live_run=False):
if not live_run:
print("Performing dry run...")
files = [f for f in os.listdir(directory) if os.path.isfile(f)]
for old_path in files:
new_path = re.sub('[^0-9a-zA-Z-\.,_]', '_', old_path)
new_path = re.sub('_+', '_', new_path)
new_path = re.sub('_-_', '-', new_path)
new_path = re.sub('(^_|_$|\.$)', '', new_path)
if live_run:
shutil.move(old_path, new_path)
else:
print("{} -> {}".format(old_path, new_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('directory', type=str, help="which directory's files to rename")
parser.add_argument('-l', '--live_run', default=False, action='store_true', help="If not given, perform a dry run by showing what would be renamed.")
args = parser.parse_args()
rename_files_in_dir(args.directory, args.live_run)
|
<commit_before><commit_msg>Add conservative rename Python script<commit_after>
|
# coding: utf-8
import argparse
import os
import re
import shutil
def rename_files_in_dir(directory, live_run=False):
if not live_run:
print("Performing dry run...")
files = [f for f in os.listdir(directory) if os.path.isfile(f)]
for old_path in files:
new_path = re.sub('[^0-9a-zA-Z-\.,_]', '_', old_path)
new_path = re.sub('_+', '_', new_path)
new_path = re.sub('_-_', '-', new_path)
new_path = re.sub('(^_|_$|\.$)', '', new_path)
if live_run:
shutil.move(old_path, new_path)
else:
print("{} -> {}".format(old_path, new_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('directory', type=str, help="which directory's files to rename")
parser.add_argument('-l', '--live_run', default=False, action='store_true', help="If not given, perform a dry run by showing what would be renamed.")
args = parser.parse_args()
rename_files_in_dir(args.directory, args.live_run)
|
Add conservative rename Python script# coding: utf-8
import argparse
import os
import re
import shutil
def rename_files_in_dir(directory, live_run=False):
if not live_run:
print("Performing dry run...")
files = [f for f in os.listdir(directory) if os.path.isfile(f)]
for old_path in files:
new_path = re.sub('[^0-9a-zA-Z-\.,_]', '_', old_path)
new_path = re.sub('_+', '_', new_path)
new_path = re.sub('_-_', '-', new_path)
new_path = re.sub('(^_|_$|\.$)', '', new_path)
if live_run:
shutil.move(old_path, new_path)
else:
print("{} -> {}".format(old_path, new_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('directory', type=str, help="which directory's files to rename")
parser.add_argument('-l', '--live_run', default=False, action='store_true', help="If not given, perform a dry run by showing what would be renamed.")
args = parser.parse_args()
rename_files_in_dir(args.directory, args.live_run)
|
<commit_before><commit_msg>Add conservative rename Python script<commit_after># coding: utf-8
import argparse
import os
import re
import shutil
def rename_files_in_dir(directory, live_run=False):
if not live_run:
print("Performing dry run...")
files = [f for f in os.listdir(directory) if os.path.isfile(f)]
for old_path in files:
new_path = re.sub('[^0-9a-zA-Z-\.,_]', '_', old_path)
new_path = re.sub('_+', '_', new_path)
new_path = re.sub('_-_', '-', new_path)
new_path = re.sub('(^_|_$|\.$)', '', new_path)
if live_run:
shutil.move(old_path, new_path)
else:
print("{} -> {}".format(old_path, new_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('directory', type=str, help="which directory's files to rename")
parser.add_argument('-l', '--live_run', default=False, action='store_true', help="If not given, perform a dry run by showing what would be renamed.")
args = parser.parse_args()
rename_files_in_dir(args.directory, args.live_run)
|
|
e5586830fc2afb655e52563f29fa1f4271c836be
|
indra/resources/ontology_graph.py
|
indra/resources/ontology_graph.py
|
import networkx
from indra.databases import hgnc_client
from indra.databases import uniprot_client
from indra.databases import chebi_client
from indra.databases import mesh_client
class IndraOntology(networkx.MultiDiGraph):
def __init__(self):
super().__init__()
self.add_hgnc_nodes()
self.add_uniprot_nodes()
def add_hgnc_nodes(self):
nodes = [('HGNC:%s' % hid, {'name': hname}) for (hid, hname)
in hgnc_client.hgnc_names.items()]
self.add_nodes_from(nodes)
def add_uniprot_nodes(self):
nodes = [('UP:%s' % uid, {'name': uname}) for (uid, uname)
in uniprot_client.um.uniprot_gene_name.items()]
self.add_nodes_from(nodes)
|
Add initial implementation of ontology graph
|
Add initial implementation of ontology graph
|
Python
|
bsd-2-clause
|
sorgerlab/belpy,johnbachman/belpy,bgyori/indra,sorgerlab/belpy,johnbachman/indra,sorgerlab/indra,johnbachman/indra,sorgerlab/indra,bgyori/indra,sorgerlab/belpy,bgyori/indra,johnbachman/belpy,sorgerlab/indra,johnbachman/indra,johnbachman/belpy
|
Add initial implementation of ontology graph
|
import networkx
from indra.databases import hgnc_client
from indra.databases import uniprot_client
from indra.databases import chebi_client
from indra.databases import mesh_client
class IndraOntology(networkx.MultiDiGraph):
def __init__(self):
super().__init__()
self.add_hgnc_nodes()
self.add_uniprot_nodes()
def add_hgnc_nodes(self):
nodes = [('HGNC:%s' % hid, {'name': hname}) for (hid, hname)
in hgnc_client.hgnc_names.items()]
self.add_nodes_from(nodes)
def add_uniprot_nodes(self):
nodes = [('UP:%s' % uid, {'name': uname}) for (uid, uname)
in uniprot_client.um.uniprot_gene_name.items()]
self.add_nodes_from(nodes)
|
<commit_before><commit_msg>Add initial implementation of ontology graph<commit_after>
|
import networkx
from indra.databases import hgnc_client
from indra.databases import uniprot_client
from indra.databases import chebi_client
from indra.databases import mesh_client
class IndraOntology(networkx.MultiDiGraph):
def __init__(self):
super().__init__()
self.add_hgnc_nodes()
self.add_uniprot_nodes()
def add_hgnc_nodes(self):
nodes = [('HGNC:%s' % hid, {'name': hname}) for (hid, hname)
in hgnc_client.hgnc_names.items()]
self.add_nodes_from(nodes)
def add_uniprot_nodes(self):
nodes = [('UP:%s' % uid, {'name': uname}) for (uid, uname)
in uniprot_client.um.uniprot_gene_name.items()]
self.add_nodes_from(nodes)
|
Add initial implementation of ontology graphimport networkx
from indra.databases import hgnc_client
from indra.databases import uniprot_client
from indra.databases import chebi_client
from indra.databases import mesh_client
class IndraOntology(networkx.MultiDiGraph):
def __init__(self):
super().__init__()
self.add_hgnc_nodes()
self.add_uniprot_nodes()
def add_hgnc_nodes(self):
nodes = [('HGNC:%s' % hid, {'name': hname}) for (hid, hname)
in hgnc_client.hgnc_names.items()]
self.add_nodes_from(nodes)
def add_uniprot_nodes(self):
nodes = [('UP:%s' % uid, {'name': uname}) for (uid, uname)
in uniprot_client.um.uniprot_gene_name.items()]
self.add_nodes_from(nodes)
|
<commit_before><commit_msg>Add initial implementation of ontology graph<commit_after>import networkx
from indra.databases import hgnc_client
from indra.databases import uniprot_client
from indra.databases import chebi_client
from indra.databases import mesh_client
class IndraOntology(networkx.MultiDiGraph):
def __init__(self):
super().__init__()
self.add_hgnc_nodes()
self.add_uniprot_nodes()
def add_hgnc_nodes(self):
nodes = [('HGNC:%s' % hid, {'name': hname}) for (hid, hname)
in hgnc_client.hgnc_names.items()]
self.add_nodes_from(nodes)
def add_uniprot_nodes(self):
nodes = [('UP:%s' % uid, {'name': uname}) for (uid, uname)
in uniprot_client.um.uniprot_gene_name.items()]
self.add_nodes_from(nodes)
|
|
8b03ba1940a2e234d09d03ccd40cf43e5060d2b1
|
indra/sources/bbn/make_bbn_tsv.py
|
indra/sources/bbn/make_bbn_tsv.py
|
"""This script produces a TSV that help map the BBN ontology
to the Eidos UN ontology based on entries and examples."""
import yaml
import requests
from indra.sources import bbn
def build_examples(node, tree, prefix):
if not prefix or prefix in ('entity', 'event'):
this_prefix = node
else:
this_prefix = prefix + ',' + node if prefix else node
for entry in tree:
if isinstance(entry, str) and entry[0] != '_':
child = entry
elif isinstance(entry, dict):
for child in entry.keys():
if child[0] != '_':
if isinstance(entry[child], (list, dict)):
build_examples(child, entry[child], this_prefix)
if child[0] != '_':
if this_prefix in all_examples:
all_examples[this_prefix].add(child)
else:
parts = this_prefix.split(',')
all_examples[this_prefix] = set(parts + [child])
if __name__ == '__main__':
# BBN
bbn_ont_url = ('https://raw.githubusercontent.com/BBN-E/Hume/master/'
'resource/ontologies/hume_ontology.yaml')
all_examples = {}
yml = requests.get(bbn_ont_url).content
root = yaml.load(yml)
for top_entry in root:
node = list(top_entry.keys())[0]
build_examples(node, top_entry[node], None)
with open('bbn_ontology_examples.tsv', 'w') as fh:
for k, v in sorted(all_examples.items(), key=lambda x: x[0]):
fh.write('%s\t%s\n' % (k, ','.join(sorted(list(v)))))
|
Add script to generate BBN TSV
|
Add script to generate BBN TSV
|
Python
|
bsd-2-clause
|
pvtodorov/indra,sorgerlab/indra,sorgerlab/indra,johnbachman/belpy,sorgerlab/indra,johnbachman/indra,johnbachman/indra,sorgerlab/belpy,sorgerlab/belpy,pvtodorov/indra,johnbachman/belpy,sorgerlab/belpy,bgyori/indra,bgyori/indra,bgyori/indra,pvtodorov/indra,johnbachman/belpy,pvtodorov/indra,johnbachman/indra
|
Add script to generate BBN TSV
|
"""This script produces a TSV that help map the BBN ontology
to the Eidos UN ontology based on entries and examples."""
import yaml
import requests
from indra.sources import bbn
def build_examples(node, tree, prefix):
if not prefix or prefix in ('entity', 'event'):
this_prefix = node
else:
this_prefix = prefix + ',' + node if prefix else node
for entry in tree:
if isinstance(entry, str) and entry[0] != '_':
child = entry
elif isinstance(entry, dict):
for child in entry.keys():
if child[0] != '_':
if isinstance(entry[child], (list, dict)):
build_examples(child, entry[child], this_prefix)
if child[0] != '_':
if this_prefix in all_examples:
all_examples[this_prefix].add(child)
else:
parts = this_prefix.split(',')
all_examples[this_prefix] = set(parts + [child])
if __name__ == '__main__':
# BBN
bbn_ont_url = ('https://raw.githubusercontent.com/BBN-E/Hume/master/'
'resource/ontologies/hume_ontology.yaml')
all_examples = {}
yml = requests.get(bbn_ont_url).content
root = yaml.load(yml)
for top_entry in root:
node = list(top_entry.keys())[0]
build_examples(node, top_entry[node], None)
with open('bbn_ontology_examples.tsv', 'w') as fh:
for k, v in sorted(all_examples.items(), key=lambda x: x[0]):
fh.write('%s\t%s\n' % (k, ','.join(sorted(list(v)))))
|
<commit_before><commit_msg>Add script to generate BBN TSV<commit_after>
|
"""This script produces a TSV that help map the BBN ontology
to the Eidos UN ontology based on entries and examples."""
import yaml
import requests
from indra.sources import bbn
def build_examples(node, tree, prefix):
if not prefix or prefix in ('entity', 'event'):
this_prefix = node
else:
this_prefix = prefix + ',' + node if prefix else node
for entry in tree:
if isinstance(entry, str) and entry[0] != '_':
child = entry
elif isinstance(entry, dict):
for child in entry.keys():
if child[0] != '_':
if isinstance(entry[child], (list, dict)):
build_examples(child, entry[child], this_prefix)
if child[0] != '_':
if this_prefix in all_examples:
all_examples[this_prefix].add(child)
else:
parts = this_prefix.split(',')
all_examples[this_prefix] = set(parts + [child])
if __name__ == '__main__':
# BBN
bbn_ont_url = ('https://raw.githubusercontent.com/BBN-E/Hume/master/'
'resource/ontologies/hume_ontology.yaml')
all_examples = {}
yml = requests.get(bbn_ont_url).content
root = yaml.load(yml)
for top_entry in root:
node = list(top_entry.keys())[0]
build_examples(node, top_entry[node], None)
with open('bbn_ontology_examples.tsv', 'w') as fh:
for k, v in sorted(all_examples.items(), key=lambda x: x[0]):
fh.write('%s\t%s\n' % (k, ','.join(sorted(list(v)))))
|
Add script to generate BBN TSV"""This script produces a TSV that help map the BBN ontology
to the Eidos UN ontology based on entries and examples."""
import yaml
import requests
from indra.sources import bbn
def build_examples(node, tree, prefix):
if not prefix or prefix in ('entity', 'event'):
this_prefix = node
else:
this_prefix = prefix + ',' + node if prefix else node
for entry in tree:
if isinstance(entry, str) and entry[0] != '_':
child = entry
elif isinstance(entry, dict):
for child in entry.keys():
if child[0] != '_':
if isinstance(entry[child], (list, dict)):
build_examples(child, entry[child], this_prefix)
if child[0] != '_':
if this_prefix in all_examples:
all_examples[this_prefix].add(child)
else:
parts = this_prefix.split(',')
all_examples[this_prefix] = set(parts + [child])
if __name__ == '__main__':
# BBN
bbn_ont_url = ('https://raw.githubusercontent.com/BBN-E/Hume/master/'
'resource/ontologies/hume_ontology.yaml')
all_examples = {}
yml = requests.get(bbn_ont_url).content
root = yaml.load(yml)
for top_entry in root:
node = list(top_entry.keys())[0]
build_examples(node, top_entry[node], None)
with open('bbn_ontology_examples.tsv', 'w') as fh:
for k, v in sorted(all_examples.items(), key=lambda x: x[0]):
fh.write('%s\t%s\n' % (k, ','.join(sorted(list(v)))))
|
<commit_before><commit_msg>Add script to generate BBN TSV<commit_after>"""This script produces a TSV that help map the BBN ontology
to the Eidos UN ontology based on entries and examples."""
import yaml
import requests
from indra.sources import bbn
def build_examples(node, tree, prefix):
if not prefix or prefix in ('entity', 'event'):
this_prefix = node
else:
this_prefix = prefix + ',' + node if prefix else node
for entry in tree:
if isinstance(entry, str) and entry[0] != '_':
child = entry
elif isinstance(entry, dict):
for child in entry.keys():
if child[0] != '_':
if isinstance(entry[child], (list, dict)):
build_examples(child, entry[child], this_prefix)
if child[0] != '_':
if this_prefix in all_examples:
all_examples[this_prefix].add(child)
else:
parts = this_prefix.split(',')
all_examples[this_prefix] = set(parts + [child])
if __name__ == '__main__':
# BBN
bbn_ont_url = ('https://raw.githubusercontent.com/BBN-E/Hume/master/'
'resource/ontologies/hume_ontology.yaml')
all_examples = {}
yml = requests.get(bbn_ont_url).content
root = yaml.load(yml)
for top_entry in root:
node = list(top_entry.keys())[0]
build_examples(node, top_entry[node], None)
with open('bbn_ontology_examples.tsv', 'w') as fh:
for k, v in sorted(all_examples.items(), key=lambda x: x[0]):
fh.write('%s\t%s\n' % (k, ','.join(sorted(list(v)))))
|
|
5b27b3730e779b9d5e4428c069f5c52d2a403d69
|
GetToken.py
|
GetToken.py
|
#!/usr/bin/env python
import getpass
import sys
from optparse import OptionParser
try:
from ZSI.client import AUTH
except ImportError:
print '==> Failed to import ZSI'
print '===> Please make sure you have it installed and locatable from PYTHONPATH'
print ' http://pywebsvcs.sourceforge.net/'
print
sys.exit(-1)
class GetTokenError(Exception):
pass
def getToken(opts, password):
if not opts.production:
from staging import CommonService_client
from staging import CommonService_types
else:
from production import CommonService_client
from production import CommonService_types
locator = CommonService_client.CommonServiceLocator()
service = locator.getCommonServiceSoap(auth=(4, opts.user, password))
request = CommonService_client.GetClientTokenSoapIn()
request._request = CommonService_types.ns0.GetClientToken_Dec()
request._specification = CommonService_types.ns0.TokenSpecification_Def(opts.user)
request._specification._ClientIPAddress = opts.clientip
request._specification._TokenValidityDurationMinutes = int(opts.tokenvalidity)
result = service.GetClientToken(request)
if result.__class__.__name__ == 'GetClientTokenResponse_Holder':
return result._GetClientTokenResult
raise GetTokenError('Web service failed to return a proper response! %s' % result)
def main():
_op = OptionParser()
_op.add_option('--user', dest='user', help='User ID for Virtual Earth\'s Web Services')
_op.add_option('--production', dest='production', action='store_true', help='Generate a token for production use')
_op.add_option('--clientip', dest='clientip', default='0.0.0.0', help='Specify the ClientIPAddress argument for the CommonService.GetClientToken() API')
_op.add_option('--tokenvalidity', dest='tokenvalidity', default='480', help='Specify to TokenValidityDurationInMinutes argument for CommonService.GetClientToken(), must be between 15 and 480')
_op.add_option('--password', dest='password', default=None, help='Password for Virtual Earth\'s Web Services')
opts, args = _op.parse_args()
if not opts.user:
print '==> Missing "user" argument'
print
_op.print_help()
return -1
password = opts.password
if not opts.password:
password = getpass.getpass(prompt='Virtual Earth Password: ')
token = getToken(opts, password)
print '==> Generated token: %s' % token
return 0
if __name__ == '__main__':
rc = main()
sys.exit(rc)
|
Add a basic module and command-line tool for generating tokens
|
Add a basic module and command-line tool for generating tokens
Signed-off-by: R. Tyler Ballance <ae42760ef71e07cdc78c21849b44551816bda917@slide.com>
|
Python
|
bsd-3-clause
|
rtyler/pyve
|
Add a basic module and command-line tool for generating tokens
Signed-off-by: R. Tyler Ballance <ae42760ef71e07cdc78c21849b44551816bda917@slide.com>
|
#!/usr/bin/env python
import getpass
import sys
from optparse import OptionParser
try:
from ZSI.client import AUTH
except ImportError:
print '==> Failed to import ZSI'
print '===> Please make sure you have it installed and locatable from PYTHONPATH'
print ' http://pywebsvcs.sourceforge.net/'
print
sys.exit(-1)
class GetTokenError(Exception):
pass
def getToken(opts, password):
if not opts.production:
from staging import CommonService_client
from staging import CommonService_types
else:
from production import CommonService_client
from production import CommonService_types
locator = CommonService_client.CommonServiceLocator()
service = locator.getCommonServiceSoap(auth=(4, opts.user, password))
request = CommonService_client.GetClientTokenSoapIn()
request._request = CommonService_types.ns0.GetClientToken_Dec()
request._specification = CommonService_types.ns0.TokenSpecification_Def(opts.user)
request._specification._ClientIPAddress = opts.clientip
request._specification._TokenValidityDurationMinutes = int(opts.tokenvalidity)
result = service.GetClientToken(request)
if result.__class__.__name__ == 'GetClientTokenResponse_Holder':
return result._GetClientTokenResult
raise GetTokenError('Web service failed to return a proper response! %s' % result)
def main():
_op = OptionParser()
_op.add_option('--user', dest='user', help='User ID for Virtual Earth\'s Web Services')
_op.add_option('--production', dest='production', action='store_true', help='Generate a token for production use')
_op.add_option('--clientip', dest='clientip', default='0.0.0.0', help='Specify the ClientIPAddress argument for the CommonService.GetClientToken() API')
_op.add_option('--tokenvalidity', dest='tokenvalidity', default='480', help='Specify to TokenValidityDurationInMinutes argument for CommonService.GetClientToken(), must be between 15 and 480')
_op.add_option('--password', dest='password', default=None, help='Password for Virtual Earth\'s Web Services')
opts, args = _op.parse_args()
if not opts.user:
print '==> Missing "user" argument'
print
_op.print_help()
return -1
password = opts.password
if not opts.password:
password = getpass.getpass(prompt='Virtual Earth Password: ')
token = getToken(opts, password)
print '==> Generated token: %s' % token
return 0
if __name__ == '__main__':
rc = main()
sys.exit(rc)
|
<commit_before><commit_msg>Add a basic module and command-line tool for generating tokens
Signed-off-by: R. Tyler Ballance <ae42760ef71e07cdc78c21849b44551816bda917@slide.com><commit_after>
|
#!/usr/bin/env python
import getpass
import sys
from optparse import OptionParser
try:
from ZSI.client import AUTH
except ImportError:
print '==> Failed to import ZSI'
print '===> Please make sure you have it installed and locatable from PYTHONPATH'
print ' http://pywebsvcs.sourceforge.net/'
print
sys.exit(-1)
class GetTokenError(Exception):
pass
def getToken(opts, password):
if not opts.production:
from staging import CommonService_client
from staging import CommonService_types
else:
from production import CommonService_client
from production import CommonService_types
locator = CommonService_client.CommonServiceLocator()
service = locator.getCommonServiceSoap(auth=(4, opts.user, password))
request = CommonService_client.GetClientTokenSoapIn()
request._request = CommonService_types.ns0.GetClientToken_Dec()
request._specification = CommonService_types.ns0.TokenSpecification_Def(opts.user)
request._specification._ClientIPAddress = opts.clientip
request._specification._TokenValidityDurationMinutes = int(opts.tokenvalidity)
result = service.GetClientToken(request)
if result.__class__.__name__ == 'GetClientTokenResponse_Holder':
return result._GetClientTokenResult
raise GetTokenError('Web service failed to return a proper response! %s' % result)
def main():
_op = OptionParser()
_op.add_option('--user', dest='user', help='User ID for Virtual Earth\'s Web Services')
_op.add_option('--production', dest='production', action='store_true', help='Generate a token for production use')
_op.add_option('--clientip', dest='clientip', default='0.0.0.0', help='Specify the ClientIPAddress argument for the CommonService.GetClientToken() API')
_op.add_option('--tokenvalidity', dest='tokenvalidity', default='480', help='Specify to TokenValidityDurationInMinutes argument for CommonService.GetClientToken(), must be between 15 and 480')
_op.add_option('--password', dest='password', default=None, help='Password for Virtual Earth\'s Web Services')
opts, args = _op.parse_args()
if not opts.user:
print '==> Missing "user" argument'
print
_op.print_help()
return -1
password = opts.password
if not opts.password:
password = getpass.getpass(prompt='Virtual Earth Password: ')
token = getToken(opts, password)
print '==> Generated token: %s' % token
return 0
if __name__ == '__main__':
rc = main()
sys.exit(rc)
|
Add a basic module and command-line tool for generating tokens
Signed-off-by: R. Tyler Ballance <ae42760ef71e07cdc78c21849b44551816bda917@slide.com>#!/usr/bin/env python
import getpass
import sys
from optparse import OptionParser
try:
from ZSI.client import AUTH
except ImportError:
print '==> Failed to import ZSI'
print '===> Please make sure you have it installed and locatable from PYTHONPATH'
print ' http://pywebsvcs.sourceforge.net/'
print
sys.exit(-1)
class GetTokenError(Exception):
pass
def getToken(opts, password):
if not opts.production:
from staging import CommonService_client
from staging import CommonService_types
else:
from production import CommonService_client
from production import CommonService_types
locator = CommonService_client.CommonServiceLocator()
service = locator.getCommonServiceSoap(auth=(4, opts.user, password))
request = CommonService_client.GetClientTokenSoapIn()
request._request = CommonService_types.ns0.GetClientToken_Dec()
request._specification = CommonService_types.ns0.TokenSpecification_Def(opts.user)
request._specification._ClientIPAddress = opts.clientip
request._specification._TokenValidityDurationMinutes = int(opts.tokenvalidity)
result = service.GetClientToken(request)
if result.__class__.__name__ == 'GetClientTokenResponse_Holder':
return result._GetClientTokenResult
raise GetTokenError('Web service failed to return a proper response! %s' % result)
def main():
_op = OptionParser()
_op.add_option('--user', dest='user', help='User ID for Virtual Earth\'s Web Services')
_op.add_option('--production', dest='production', action='store_true', help='Generate a token for production use')
_op.add_option('--clientip', dest='clientip', default='0.0.0.0', help='Specify the ClientIPAddress argument for the CommonService.GetClientToken() API')
_op.add_option('--tokenvalidity', dest='tokenvalidity', default='480', help='Specify to TokenValidityDurationInMinutes argument for CommonService.GetClientToken(), must be between 15 and 480')
_op.add_option('--password', dest='password', default=None, help='Password for Virtual Earth\'s Web Services')
opts, args = _op.parse_args()
if not opts.user:
print '==> Missing "user" argument'
print
_op.print_help()
return -1
password = opts.password
if not opts.password:
password = getpass.getpass(prompt='Virtual Earth Password: ')
token = getToken(opts, password)
print '==> Generated token: %s' % token
return 0
if __name__ == '__main__':
rc = main()
sys.exit(rc)
|
<commit_before><commit_msg>Add a basic module and command-line tool for generating tokens
Signed-off-by: R. Tyler Ballance <ae42760ef71e07cdc78c21849b44551816bda917@slide.com><commit_after>#!/usr/bin/env python
import getpass
import sys
from optparse import OptionParser
try:
from ZSI.client import AUTH
except ImportError:
print '==> Failed to import ZSI'
print '===> Please make sure you have it installed and locatable from PYTHONPATH'
print ' http://pywebsvcs.sourceforge.net/'
print
sys.exit(-1)
class GetTokenError(Exception):
pass
def getToken(opts, password):
if not opts.production:
from staging import CommonService_client
from staging import CommonService_types
else:
from production import CommonService_client
from production import CommonService_types
locator = CommonService_client.CommonServiceLocator()
service = locator.getCommonServiceSoap(auth=(4, opts.user, password))
request = CommonService_client.GetClientTokenSoapIn()
request._request = CommonService_types.ns0.GetClientToken_Dec()
request._specification = CommonService_types.ns0.TokenSpecification_Def(opts.user)
request._specification._ClientIPAddress = opts.clientip
request._specification._TokenValidityDurationMinutes = int(opts.tokenvalidity)
result = service.GetClientToken(request)
if result.__class__.__name__ == 'GetClientTokenResponse_Holder':
return result._GetClientTokenResult
raise GetTokenError('Web service failed to return a proper response! %s' % result)
def main():
_op = OptionParser()
_op.add_option('--user', dest='user', help='User ID for Virtual Earth\'s Web Services')
_op.add_option('--production', dest='production', action='store_true', help='Generate a token for production use')
_op.add_option('--clientip', dest='clientip', default='0.0.0.0', help='Specify the ClientIPAddress argument for the CommonService.GetClientToken() API')
_op.add_option('--tokenvalidity', dest='tokenvalidity', default='480', help='Specify to TokenValidityDurationInMinutes argument for CommonService.GetClientToken(), must be between 15 and 480')
_op.add_option('--password', dest='password', default=None, help='Password for Virtual Earth\'s Web Services')
opts, args = _op.parse_args()
if not opts.user:
print '==> Missing "user" argument'
print
_op.print_help()
return -1
password = opts.password
if not opts.password:
password = getpass.getpass(prompt='Virtual Earth Password: ')
token = getToken(opts, password)
print '==> Generated token: %s' % token
return 0
if __name__ == '__main__':
rc = main()
sys.exit(rc)
|
|
1f5e9b39f9942be0a6b64d1e05283415ad839e47
|
grammpy/IsMethodsRuleExtension.py
|
grammpy/IsMethodsRuleExtension.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from .Rule import Rule
class IsMethodsRuleExtension(Rule):
@classmethod
def is_regular(cls):
return False
@classmethod
def is_contextfree(cls):
return False
@classmethod
def is_context(cls):
return False
@classmethod
def is_unrestricted(cls):
return False
|
Add is_* methods from old commit
|
Add is_* methods from old commit
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add is_* methods from old commit
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from .Rule import Rule
class IsMethodsRuleExtension(Rule):
@classmethod
def is_regular(cls):
return False
@classmethod
def is_contextfree(cls):
return False
@classmethod
def is_context(cls):
return False
@classmethod
def is_unrestricted(cls):
return False
|
<commit_before><commit_msg>Add is_* methods from old commit<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from .Rule import Rule
class IsMethodsRuleExtension(Rule):
@classmethod
def is_regular(cls):
return False
@classmethod
def is_contextfree(cls):
return False
@classmethod
def is_context(cls):
return False
@classmethod
def is_unrestricted(cls):
return False
|
Add is_* methods from old commit#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from .Rule import Rule
class IsMethodsRuleExtension(Rule):
@classmethod
def is_regular(cls):
return False
@classmethod
def is_contextfree(cls):
return False
@classmethod
def is_context(cls):
return False
@classmethod
def is_unrestricted(cls):
return False
|
<commit_before><commit_msg>Add is_* methods from old commit<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from .Rule import Rule
class IsMethodsRuleExtension(Rule):
@classmethod
def is_regular(cls):
return False
@classmethod
def is_contextfree(cls):
return False
@classmethod
def is_context(cls):
return False
@classmethod
def is_unrestricted(cls):
return False
|
|
1343cbf191e1a665323c6e3a49dda15e5e65ad6b
|
presentationsapp/urls.py
|
presentationsapp/urls.py
|
from django.conf.urls import patterns, url
from presentationsapp import views
urlpatterns = patterns('',
#url(r'^(?P<course_id>[0-9]+)/$', views.courseDetails),
url(r'^$', views.index),
)
|
Add URL routing to first view
|
Add URL routing to first view
|
Python
|
mit
|
masonsbro/presentations
|
Add URL routing to first view
|
from django.conf.urls import patterns, url
from presentationsapp import views
urlpatterns = patterns('',
#url(r'^(?P<course_id>[0-9]+)/$', views.courseDetails),
url(r'^$', views.index),
)
|
<commit_before><commit_msg>Add URL routing to first view<commit_after>
|
from django.conf.urls import patterns, url
from presentationsapp import views
urlpatterns = patterns('',
#url(r'^(?P<course_id>[0-9]+)/$', views.courseDetails),
url(r'^$', views.index),
)
|
Add URL routing to first viewfrom django.conf.urls import patterns, url
from presentationsapp import views
urlpatterns = patterns('',
#url(r'^(?P<course_id>[0-9]+)/$', views.courseDetails),
url(r'^$', views.index),
)
|
<commit_before><commit_msg>Add URL routing to first view<commit_after>from django.conf.urls import patterns, url
from presentationsapp import views
urlpatterns = patterns('',
#url(r'^(?P<course_id>[0-9]+)/$', views.courseDetails),
url(r'^$', views.index),
)
|
|
e69c2f715093b7eda4a20a26e84af963fb5af9cf
|
regulations/tests/tasks_tests.py
|
regulations/tests/tasks_tests.py
|
import json
import mock
import six
from celery.exceptions import Retry, MaxRetriesExceededError
from requests.exceptions import RequestException
from django.test import SimpleTestCase, override_settings
from regulations.tasks import submit_comment
@mock.patch('regulations.models.FailedCommentSubmission.objects')
@mock.patch('regulations.tasks.submit_comment.retry')
@mock.patch('requests.post')
@mock.patch('regulations.tasks.html_to_pdf')
@override_settings(
ATTACHMENT_BUCKET='test-bucket',
ATTACHMENT_ACCESS_KEY_ID='test-access-key',
ATTACHMENT_SECRET_ACCESS_KEY='test-secret-key',
ATTACHMENT_MAX_SIZE=42,
REGS_GOV_API_URL='test-url',
REGS_GOV_API_KEY='test-key',
)
class TestSubmitComment(SimpleTestCase):
def test_submit_comment(self, html_to_pdf, post, retry, query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
expected_result = {'tracking_number': '133321'}
post.return_value.status_code = 201
post.return_value.json.return_value = expected_result
body = {'assembled_comment': {'sections': []}}
result = submit_comment(body)
self.assertEqual(result, expected_result)
def test_failed_submit_raises_retry(self, html_to_pdf, post, retry,
query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
post.side_effect = [RequestException]
retry.return_value = Retry()
body = {'assembled_comment': {'sections': []}}
with self.assertRaises(Retry):
submit_comment(body)
def test_failed_submit_maximum_retries(self, html_to_pdf, post, retry,
query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
post.side_effect = [RequestException]
retry.return_value = MaxRetriesExceededError()
body = {'assembled_comment': {'sections': []}}
submit_comment(body)
query_set.create.assert_called_with(body=json.dumps(body))
|
Add unit tests for the submit_comment task
|
Add unit tests for the submit_comment task
|
Python
|
cc0-1.0
|
18F/regulations-site,eregs/regulations-site,eregs/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,18F/regulations-site,tadhg-ohiggins/regulations-site,tadhg-ohiggins/regulations-site,18F/regulations-site,18F/regulations-site,tadhg-ohiggins/regulations-site
|
Add unit tests for the submit_comment task
|
import json
import mock
import six
from celery.exceptions import Retry, MaxRetriesExceededError
from requests.exceptions import RequestException
from django.test import SimpleTestCase, override_settings
from regulations.tasks import submit_comment
@mock.patch('regulations.models.FailedCommentSubmission.objects')
@mock.patch('regulations.tasks.submit_comment.retry')
@mock.patch('requests.post')
@mock.patch('regulations.tasks.html_to_pdf')
@override_settings(
ATTACHMENT_BUCKET='test-bucket',
ATTACHMENT_ACCESS_KEY_ID='test-access-key',
ATTACHMENT_SECRET_ACCESS_KEY='test-secret-key',
ATTACHMENT_MAX_SIZE=42,
REGS_GOV_API_URL='test-url',
REGS_GOV_API_KEY='test-key',
)
class TestSubmitComment(SimpleTestCase):
def test_submit_comment(self, html_to_pdf, post, retry, query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
expected_result = {'tracking_number': '133321'}
post.return_value.status_code = 201
post.return_value.json.return_value = expected_result
body = {'assembled_comment': {'sections': []}}
result = submit_comment(body)
self.assertEqual(result, expected_result)
def test_failed_submit_raises_retry(self, html_to_pdf, post, retry,
query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
post.side_effect = [RequestException]
retry.return_value = Retry()
body = {'assembled_comment': {'sections': []}}
with self.assertRaises(Retry):
submit_comment(body)
def test_failed_submit_maximum_retries(self, html_to_pdf, post, retry,
query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
post.side_effect = [RequestException]
retry.return_value = MaxRetriesExceededError()
body = {'assembled_comment': {'sections': []}}
submit_comment(body)
query_set.create.assert_called_with(body=json.dumps(body))
|
<commit_before><commit_msg>Add unit tests for the submit_comment task<commit_after>
|
import json
import mock
import six
from celery.exceptions import Retry, MaxRetriesExceededError
from requests.exceptions import RequestException
from django.test import SimpleTestCase, override_settings
from regulations.tasks import submit_comment
@mock.patch('regulations.models.FailedCommentSubmission.objects')
@mock.patch('regulations.tasks.submit_comment.retry')
@mock.patch('requests.post')
@mock.patch('regulations.tasks.html_to_pdf')
@override_settings(
ATTACHMENT_BUCKET='test-bucket',
ATTACHMENT_ACCESS_KEY_ID='test-access-key',
ATTACHMENT_SECRET_ACCESS_KEY='test-secret-key',
ATTACHMENT_MAX_SIZE=42,
REGS_GOV_API_URL='test-url',
REGS_GOV_API_KEY='test-key',
)
class TestSubmitComment(SimpleTestCase):
def test_submit_comment(self, html_to_pdf, post, retry, query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
expected_result = {'tracking_number': '133321'}
post.return_value.status_code = 201
post.return_value.json.return_value = expected_result
body = {'assembled_comment': {'sections': []}}
result = submit_comment(body)
self.assertEqual(result, expected_result)
def test_failed_submit_raises_retry(self, html_to_pdf, post, retry,
query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
post.side_effect = [RequestException]
retry.return_value = Retry()
body = {'assembled_comment': {'sections': []}}
with self.assertRaises(Retry):
submit_comment(body)
def test_failed_submit_maximum_retries(self, html_to_pdf, post, retry,
query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
post.side_effect = [RequestException]
retry.return_value = MaxRetriesExceededError()
body = {'assembled_comment': {'sections': []}}
submit_comment(body)
query_set.create.assert_called_with(body=json.dumps(body))
|
Add unit tests for the submit_comment taskimport json
import mock
import six
from celery.exceptions import Retry, MaxRetriesExceededError
from requests.exceptions import RequestException
from django.test import SimpleTestCase, override_settings
from regulations.tasks import submit_comment
@mock.patch('regulations.models.FailedCommentSubmission.objects')
@mock.patch('regulations.tasks.submit_comment.retry')
@mock.patch('requests.post')
@mock.patch('regulations.tasks.html_to_pdf')
@override_settings(
ATTACHMENT_BUCKET='test-bucket',
ATTACHMENT_ACCESS_KEY_ID='test-access-key',
ATTACHMENT_SECRET_ACCESS_KEY='test-secret-key',
ATTACHMENT_MAX_SIZE=42,
REGS_GOV_API_URL='test-url',
REGS_GOV_API_KEY='test-key',
)
class TestSubmitComment(SimpleTestCase):
def test_submit_comment(self, html_to_pdf, post, retry, query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
expected_result = {'tracking_number': '133321'}
post.return_value.status_code = 201
post.return_value.json.return_value = expected_result
body = {'assembled_comment': {'sections': []}}
result = submit_comment(body)
self.assertEqual(result, expected_result)
def test_failed_submit_raises_retry(self, html_to_pdf, post, retry,
query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
post.side_effect = [RequestException]
retry.return_value = Retry()
body = {'assembled_comment': {'sections': []}}
with self.assertRaises(Retry):
submit_comment(body)
def test_failed_submit_maximum_retries(self, html_to_pdf, post, retry,
query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
post.side_effect = [RequestException]
retry.return_value = MaxRetriesExceededError()
body = {'assembled_comment': {'sections': []}}
submit_comment(body)
query_set.create.assert_called_with(body=json.dumps(body))
|
<commit_before><commit_msg>Add unit tests for the submit_comment task<commit_after>import json
import mock
import six
from celery.exceptions import Retry, MaxRetriesExceededError
from requests.exceptions import RequestException
from django.test import SimpleTestCase, override_settings
from regulations.tasks import submit_comment
@mock.patch('regulations.models.FailedCommentSubmission.objects')
@mock.patch('regulations.tasks.submit_comment.retry')
@mock.patch('requests.post')
@mock.patch('regulations.tasks.html_to_pdf')
@override_settings(
ATTACHMENT_BUCKET='test-bucket',
ATTACHMENT_ACCESS_KEY_ID='test-access-key',
ATTACHMENT_SECRET_ACCESS_KEY='test-secret-key',
ATTACHMENT_MAX_SIZE=42,
REGS_GOV_API_URL='test-url',
REGS_GOV_API_KEY='test-key',
)
class TestSubmitComment(SimpleTestCase):
def test_submit_comment(self, html_to_pdf, post, retry, query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
expected_result = {'tracking_number': '133321'}
post.return_value.status_code = 201
post.return_value.json.return_value = expected_result
body = {'assembled_comment': {'sections': []}}
result = submit_comment(body)
self.assertEqual(result, expected_result)
def test_failed_submit_raises_retry(self, html_to_pdf, post, retry,
query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
post.side_effect = [RequestException]
retry.return_value = Retry()
body = {'assembled_comment': {'sections': []}}
with self.assertRaises(Retry):
submit_comment(body)
def test_failed_submit_maximum_retries(self, html_to_pdf, post, retry,
query_set):
file_handle = six.BytesIO("foobar")
html_to_pdf.return_value.__enter__ = mock.Mock(
return_value=file_handle)
post.side_effect = [RequestException]
retry.return_value = MaxRetriesExceededError()
body = {'assembled_comment': {'sections': []}}
submit_comment(body)
query_set.create.assert_called_with(body=json.dumps(body))
|
|
40ad6b1dbdc715aa0e4dfdff4bd689bbe795561b
|
test_mccabe.py
|
test_mccabe.py
|
import unittest
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from mccabe import get_code_complexity
_GLOBAL = """\
for i in range(10):
pass
def a():
def b():
def c():
pass
c()
b()
"""
class McCabeTest(unittest.TestCase):
def setUp(self):
self.old = sys.stdout
self.out = sys.stdout = StringIO()
def tearDown(self):
sys.sdtout = self.old
def test_sample(self):
self.assertEqual(get_code_complexity(_GLOBAL, 1), 2)
self.out.seek(0)
res = self.out.read().strip().split('\n')
wanted = ["stdin:5:1: C901 'a' is too complex (4)",
"stdin:2:1: C901 'Loop 2' is too complex (2)"]
self.assertEqual(res, wanted)
|
Add simple test for McCabe checker
|
Add simple test for McCabe checker
|
Python
|
mit
|
flintwork/mccabe,sigmavirus24/mccabe
|
Add simple test for McCabe checker
|
import unittest
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from mccabe import get_code_complexity
_GLOBAL = """\
for i in range(10):
pass
def a():
def b():
def c():
pass
c()
b()
"""
class McCabeTest(unittest.TestCase):
def setUp(self):
self.old = sys.stdout
self.out = sys.stdout = StringIO()
def tearDown(self):
sys.sdtout = self.old
def test_sample(self):
self.assertEqual(get_code_complexity(_GLOBAL, 1), 2)
self.out.seek(0)
res = self.out.read().strip().split('\n')
wanted = ["stdin:5:1: C901 'a' is too complex (4)",
"stdin:2:1: C901 'Loop 2' is too complex (2)"]
self.assertEqual(res, wanted)
|
<commit_before><commit_msg>Add simple test for McCabe checker<commit_after>
|
import unittest
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from mccabe import get_code_complexity
_GLOBAL = """\
for i in range(10):
pass
def a():
def b():
def c():
pass
c()
b()
"""
class McCabeTest(unittest.TestCase):
def setUp(self):
self.old = sys.stdout
self.out = sys.stdout = StringIO()
def tearDown(self):
sys.sdtout = self.old
def test_sample(self):
self.assertEqual(get_code_complexity(_GLOBAL, 1), 2)
self.out.seek(0)
res = self.out.read().strip().split('\n')
wanted = ["stdin:5:1: C901 'a' is too complex (4)",
"stdin:2:1: C901 'Loop 2' is too complex (2)"]
self.assertEqual(res, wanted)
|
Add simple test for McCabe checkerimport unittest
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from mccabe import get_code_complexity
_GLOBAL = """\
for i in range(10):
pass
def a():
def b():
def c():
pass
c()
b()
"""
class McCabeTest(unittest.TestCase):
def setUp(self):
self.old = sys.stdout
self.out = sys.stdout = StringIO()
def tearDown(self):
sys.sdtout = self.old
def test_sample(self):
self.assertEqual(get_code_complexity(_GLOBAL, 1), 2)
self.out.seek(0)
res = self.out.read().strip().split('\n')
wanted = ["stdin:5:1: C901 'a' is too complex (4)",
"stdin:2:1: C901 'Loop 2' is too complex (2)"]
self.assertEqual(res, wanted)
|
<commit_before><commit_msg>Add simple test for McCabe checker<commit_after>import unittest
import sys
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from mccabe import get_code_complexity
_GLOBAL = """\
for i in range(10):
pass
def a():
def b():
def c():
pass
c()
b()
"""
class McCabeTest(unittest.TestCase):
def setUp(self):
self.old = sys.stdout
self.out = sys.stdout = StringIO()
def tearDown(self):
sys.sdtout = self.old
def test_sample(self):
self.assertEqual(get_code_complexity(_GLOBAL, 1), 2)
self.out.seek(0)
res = self.out.read().strip().split('\n')
wanted = ["stdin:5:1: C901 'a' is too complex (4)",
"stdin:2:1: C901 'Loop 2' is too complex (2)"]
self.assertEqual(res, wanted)
|
|
0c3c8a84f8c0bce983e8fe0951dd26314951142c
|
bin/switch-country.py
|
bin/switch-country.py
|
#!/usr/bin/env python
# coding=UTF-8
import os
from os.path import dirname, join, normpath, realpath
import sys
script_directory = dirname(realpath(sys.argv[0]))
root_directory = join(script_directory, '..')
root_directory = normpath(root_directory)
election_options = [
e for e in os.listdir(join(root_directory, 'elections'))
if not e.startswith('__init__.py')
]
def usage_and_exit():
print >> sys.stderr, "Usage: %s <ELECTION>" % (sys.argv[0],)
print >> sys.stderr, "... where <ELECTION> is one of:"
for election in election_options:
print >> sys.stderr, " ", election
sys.exit(1)
if len(sys.argv) != 2:
usage_and_exit()
requested = sys.argv[1]
if requested not in election_options:
usage_and_exit()
general_yml_symlink = os.path.join(root_directory, 'conf', 'general.yml')
general_yml_target = 'general-' + requested + '.yml'
def switch_link(symlink_filename, target_filename):
if not os.path.islink(symlink_filename):
print >> sys.stderr, "%s was not a symlink, and should be" % (symlink_filename,)
sys.exit(1)
full_target_filename = os.path.join(os.path.dirname(symlink_filename),
target_filename)
if not os.path.exists(full_target_filename):
print >> sys.stderr, "The intended target of the symlink (%s) didn't exist" % (target_filename,)
sys.exit(1)
os.unlink(symlink_filename)
os.symlink(target_filename, symlink_filename)
switch_link(general_yml_symlink, general_yml_target)
|
Add a script to make it easier to switch between elections
|
Add a script to make it easier to switch between elections
|
Python
|
agpl-3.0
|
mysociety/yournextrepresentative,neavouli/yournextrepresentative,datamade/yournextmp-popit,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextrepresentative,openstate/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,neavouli/yournextrepresentative,neavouli/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative,datamade/yournextmp-popit,datamade/yournextmp-popit,mysociety/yournextrepresentative,DemocracyClub/yournextrepresentative,openstate/yournextrepresentative
|
Add a script to make it easier to switch between elections
|
#!/usr/bin/env python
# coding=UTF-8
import os
from os.path import dirname, join, normpath, realpath
import sys
script_directory = dirname(realpath(sys.argv[0]))
root_directory = join(script_directory, '..')
root_directory = normpath(root_directory)
election_options = [
e for e in os.listdir(join(root_directory, 'elections'))
if not e.startswith('__init__.py')
]
def usage_and_exit():
print >> sys.stderr, "Usage: %s <ELECTION>" % (sys.argv[0],)
print >> sys.stderr, "... where <ELECTION> is one of:"
for election in election_options:
print >> sys.stderr, " ", election
sys.exit(1)
if len(sys.argv) != 2:
usage_and_exit()
requested = sys.argv[1]
if requested not in election_options:
usage_and_exit()
general_yml_symlink = os.path.join(root_directory, 'conf', 'general.yml')
general_yml_target = 'general-' + requested + '.yml'
def switch_link(symlink_filename, target_filename):
if not os.path.islink(symlink_filename):
print >> sys.stderr, "%s was not a symlink, and should be" % (symlink_filename,)
sys.exit(1)
full_target_filename = os.path.join(os.path.dirname(symlink_filename),
target_filename)
if not os.path.exists(full_target_filename):
print >> sys.stderr, "The intended target of the symlink (%s) didn't exist" % (target_filename,)
sys.exit(1)
os.unlink(symlink_filename)
os.symlink(target_filename, symlink_filename)
switch_link(general_yml_symlink, general_yml_target)
|
<commit_before><commit_msg>Add a script to make it easier to switch between elections<commit_after>
|
#!/usr/bin/env python
# coding=UTF-8
import os
from os.path import dirname, join, normpath, realpath
import sys
script_directory = dirname(realpath(sys.argv[0]))
root_directory = join(script_directory, '..')
root_directory = normpath(root_directory)
election_options = [
e for e in os.listdir(join(root_directory, 'elections'))
if not e.startswith('__init__.py')
]
def usage_and_exit():
print >> sys.stderr, "Usage: %s <ELECTION>" % (sys.argv[0],)
print >> sys.stderr, "... where <ELECTION> is one of:"
for election in election_options:
print >> sys.stderr, " ", election
sys.exit(1)
if len(sys.argv) != 2:
usage_and_exit()
requested = sys.argv[1]
if requested not in election_options:
usage_and_exit()
general_yml_symlink = os.path.join(root_directory, 'conf', 'general.yml')
general_yml_target = 'general-' + requested + '.yml'
def switch_link(symlink_filename, target_filename):
if not os.path.islink(symlink_filename):
print >> sys.stderr, "%s was not a symlink, and should be" % (symlink_filename,)
sys.exit(1)
full_target_filename = os.path.join(os.path.dirname(symlink_filename),
target_filename)
if not os.path.exists(full_target_filename):
print >> sys.stderr, "The intended target of the symlink (%s) didn't exist" % (target_filename,)
sys.exit(1)
os.unlink(symlink_filename)
os.symlink(target_filename, symlink_filename)
switch_link(general_yml_symlink, general_yml_target)
|
Add a script to make it easier to switch between elections#!/usr/bin/env python
# coding=UTF-8
import os
from os.path import dirname, join, normpath, realpath
import sys
script_directory = dirname(realpath(sys.argv[0]))
root_directory = join(script_directory, '..')
root_directory = normpath(root_directory)
election_options = [
e for e in os.listdir(join(root_directory, 'elections'))
if not e.startswith('__init__.py')
]
def usage_and_exit():
print >> sys.stderr, "Usage: %s <ELECTION>" % (sys.argv[0],)
print >> sys.stderr, "... where <ELECTION> is one of:"
for election in election_options:
print >> sys.stderr, " ", election
sys.exit(1)
if len(sys.argv) != 2:
usage_and_exit()
requested = sys.argv[1]
if requested not in election_options:
usage_and_exit()
general_yml_symlink = os.path.join(root_directory, 'conf', 'general.yml')
general_yml_target = 'general-' + requested + '.yml'
def switch_link(symlink_filename, target_filename):
if not os.path.islink(symlink_filename):
print >> sys.stderr, "%s was not a symlink, and should be" % (symlink_filename,)
sys.exit(1)
full_target_filename = os.path.join(os.path.dirname(symlink_filename),
target_filename)
if not os.path.exists(full_target_filename):
print >> sys.stderr, "The intended target of the symlink (%s) didn't exist" % (target_filename,)
sys.exit(1)
os.unlink(symlink_filename)
os.symlink(target_filename, symlink_filename)
switch_link(general_yml_symlink, general_yml_target)
|
<commit_before><commit_msg>Add a script to make it easier to switch between elections<commit_after>#!/usr/bin/env python
# coding=UTF-8
import os
from os.path import dirname, join, normpath, realpath
import sys
script_directory = dirname(realpath(sys.argv[0]))
root_directory = join(script_directory, '..')
root_directory = normpath(root_directory)
election_options = [
e for e in os.listdir(join(root_directory, 'elections'))
if not e.startswith('__init__.py')
]
def usage_and_exit():
print >> sys.stderr, "Usage: %s <ELECTION>" % (sys.argv[0],)
print >> sys.stderr, "... where <ELECTION> is one of:"
for election in election_options:
print >> sys.stderr, " ", election
sys.exit(1)
if len(sys.argv) != 2:
usage_and_exit()
requested = sys.argv[1]
if requested not in election_options:
usage_and_exit()
general_yml_symlink = os.path.join(root_directory, 'conf', 'general.yml')
general_yml_target = 'general-' + requested + '.yml'
def switch_link(symlink_filename, target_filename):
if not os.path.islink(symlink_filename):
print >> sys.stderr, "%s was not a symlink, and should be" % (symlink_filename,)
sys.exit(1)
full_target_filename = os.path.join(os.path.dirname(symlink_filename),
target_filename)
if not os.path.exists(full_target_filename):
print >> sys.stderr, "The intended target of the symlink (%s) didn't exist" % (target_filename,)
sys.exit(1)
os.unlink(symlink_filename)
os.symlink(target_filename, symlink_filename)
switch_link(general_yml_symlink, general_yml_target)
|
|
2ac80e13e983ee102a475a3c4e7783d38b2f8603
|
cptm/experiment_prune_samples.py
|
cptm/experiment_prune_samples.py
|
import logging
import argparse
from os import remove
from cptm.utils.experiment import load_config, get_corpus, get_sampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
config = load_config(args.json)
corpus = get_corpus(config)
nTopics = config.get('expNumTopics')
nIter = config.get('nIter')
outDir = config.get('outDir')
sampleInterval = 10
for nt in nTopics:
sampler = get_sampler(config, corpus, nTopics=nt)
logging.info('removing parameter sample files for nTopics = {}'.format(nt))
for t in range(sampler.nIter):
if t != 0 and (t+1) % sampleInterval != 0:
try:
remove(sampler.get_theta_file_name(t))
except:
pass
try:
remove(sampler.get_phi_topic_file_name(t))
except:
pass
for persp in range(sampler.nPerspectives):
try:
remove(sampler.get_phi_opinion_file_name(persp, t))
except:
pass
|
Add script to prune parameter sample files
|
Add script to prune parameter sample files
The parameter sample files for a decent experiment take up a lot of
space. Because don't need all of them, we remove 90%.
|
Python
|
apache-2.0
|
NLeSC/cptm,NLeSC/cptm
|
Add script to prune parameter sample files
The parameter sample files for a decent experiment take up a lot of
space. Because don't need all of them, we remove 90%.
|
import logging
import argparse
from os import remove
from cptm.utils.experiment import load_config, get_corpus, get_sampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
config = load_config(args.json)
corpus = get_corpus(config)
nTopics = config.get('expNumTopics')
nIter = config.get('nIter')
outDir = config.get('outDir')
sampleInterval = 10
for nt in nTopics:
sampler = get_sampler(config, corpus, nTopics=nt)
logging.info('removing parameter sample files for nTopics = {}'.format(nt))
for t in range(sampler.nIter):
if t != 0 and (t+1) % sampleInterval != 0:
try:
remove(sampler.get_theta_file_name(t))
except:
pass
try:
remove(sampler.get_phi_topic_file_name(t))
except:
pass
for persp in range(sampler.nPerspectives):
try:
remove(sampler.get_phi_opinion_file_name(persp, t))
except:
pass
|
<commit_before><commit_msg>Add script to prune parameter sample files
The parameter sample files for a decent experiment take up a lot of
space. Because don't need all of them, we remove 90%.<commit_after>
|
import logging
import argparse
from os import remove
from cptm.utils.experiment import load_config, get_corpus, get_sampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
config = load_config(args.json)
corpus = get_corpus(config)
nTopics = config.get('expNumTopics')
nIter = config.get('nIter')
outDir = config.get('outDir')
sampleInterval = 10
for nt in nTopics:
sampler = get_sampler(config, corpus, nTopics=nt)
logging.info('removing parameter sample files for nTopics = {}'.format(nt))
for t in range(sampler.nIter):
if t != 0 and (t+1) % sampleInterval != 0:
try:
remove(sampler.get_theta_file_name(t))
except:
pass
try:
remove(sampler.get_phi_topic_file_name(t))
except:
pass
for persp in range(sampler.nPerspectives):
try:
remove(sampler.get_phi_opinion_file_name(persp, t))
except:
pass
|
Add script to prune parameter sample files
The parameter sample files for a decent experiment take up a lot of
space. Because don't need all of them, we remove 90%.import logging
import argparse
from os import remove
from cptm.utils.experiment import load_config, get_corpus, get_sampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
config = load_config(args.json)
corpus = get_corpus(config)
nTopics = config.get('expNumTopics')
nIter = config.get('nIter')
outDir = config.get('outDir')
sampleInterval = 10
for nt in nTopics:
sampler = get_sampler(config, corpus, nTopics=nt)
logging.info('removing parameter sample files for nTopics = {}'.format(nt))
for t in range(sampler.nIter):
if t != 0 and (t+1) % sampleInterval != 0:
try:
remove(sampler.get_theta_file_name(t))
except:
pass
try:
remove(sampler.get_phi_topic_file_name(t))
except:
pass
for persp in range(sampler.nPerspectives):
try:
remove(sampler.get_phi_opinion_file_name(persp, t))
except:
pass
|
<commit_before><commit_msg>Add script to prune parameter sample files
The parameter sample files for a decent experiment take up a lot of
space. Because don't need all of them, we remove 90%.<commit_after>import logging
import argparse
from os import remove
from cptm.utils.experiment import load_config, get_corpus, get_sampler
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.INFO)
logging.getLogger('gensim').setLevel(logging.ERROR)
logging.getLogger('CPTCorpus').setLevel(logging.ERROR)
logging.getLogger('CPT_Gibbs').setLevel(logging.ERROR)
parser = argparse.ArgumentParser()
parser.add_argument('json', help='json file containing experiment '
'configuration.')
args = parser.parse_args()
config = load_config(args.json)
corpus = get_corpus(config)
nTopics = config.get('expNumTopics')
nIter = config.get('nIter')
outDir = config.get('outDir')
sampleInterval = 10
for nt in nTopics:
sampler = get_sampler(config, corpus, nTopics=nt)
logging.info('removing parameter sample files for nTopics = {}'.format(nt))
for t in range(sampler.nIter):
if t != 0 and (t+1) % sampleInterval != 0:
try:
remove(sampler.get_theta_file_name(t))
except:
pass
try:
remove(sampler.get_phi_topic_file_name(t))
except:
pass
for persp in range(sampler.nPerspectives):
try:
remove(sampler.get_phi_opinion_file_name(persp, t))
except:
pass
|
|
6a90a4b27c3166b3b4d9f79791b511b9f403a682
|
zerver/migrations/0416_set_default_emoji_style.py
|
zerver/migrations/0416_set_default_emoji_style.py
|
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def update_deprecated_emoji_style(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""
This migration updates the emoji style for users who are using the
deprecated Google blob style. Unless they are part of an organization
which has Google blob as an organization default, these users will
now use the modern Google emoji style.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
RealmUserDefault = apps.get_model("zerver", "RealmUserDefault")
UserProfile.objects.filter(emojiset="google-blob").exclude(
realm__in=RealmUserDefault.objects.filter(emojiset="google-blob").values("realm")
).update(emojiset="google")
class Migration(migrations.Migration):
dependencies = [
("zerver", "0415_delete_scimclient"),
]
operations = [
migrations.RunPython(update_deprecated_emoji_style, elidable=True),
]
|
Switch users with blob emoji to use Google style.
|
emoji: Switch users with blob emoji to use Google style.
|
Python
|
apache-2.0
|
zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip,zulip/zulip
|
emoji: Switch users with blob emoji to use Google style.
|
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def update_deprecated_emoji_style(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""
This migration updates the emoji style for users who are using the
deprecated Google blob style. Unless they are part of an organization
which has Google blob as an organization default, these users will
now use the modern Google emoji style.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
RealmUserDefault = apps.get_model("zerver", "RealmUserDefault")
UserProfile.objects.filter(emojiset="google-blob").exclude(
realm__in=RealmUserDefault.objects.filter(emojiset="google-blob").values("realm")
).update(emojiset="google")
class Migration(migrations.Migration):
dependencies = [
("zerver", "0415_delete_scimclient"),
]
operations = [
migrations.RunPython(update_deprecated_emoji_style, elidable=True),
]
|
<commit_before><commit_msg>emoji: Switch users with blob emoji to use Google style.<commit_after>
|
from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def update_deprecated_emoji_style(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""
This migration updates the emoji style for users who are using the
deprecated Google blob style. Unless they are part of an organization
which has Google blob as an organization default, these users will
now use the modern Google emoji style.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
RealmUserDefault = apps.get_model("zerver", "RealmUserDefault")
UserProfile.objects.filter(emojiset="google-blob").exclude(
realm__in=RealmUserDefault.objects.filter(emojiset="google-blob").values("realm")
).update(emojiset="google")
class Migration(migrations.Migration):
dependencies = [
("zerver", "0415_delete_scimclient"),
]
operations = [
migrations.RunPython(update_deprecated_emoji_style, elidable=True),
]
|
emoji: Switch users with blob emoji to use Google style.from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def update_deprecated_emoji_style(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""
This migration updates the emoji style for users who are using the
deprecated Google blob style. Unless they are part of an organization
which has Google blob as an organization default, these users will
now use the modern Google emoji style.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
RealmUserDefault = apps.get_model("zerver", "RealmUserDefault")
UserProfile.objects.filter(emojiset="google-blob").exclude(
realm__in=RealmUserDefault.objects.filter(emojiset="google-blob").values("realm")
).update(emojiset="google")
class Migration(migrations.Migration):
dependencies = [
("zerver", "0415_delete_scimclient"),
]
operations = [
migrations.RunPython(update_deprecated_emoji_style, elidable=True),
]
|
<commit_before><commit_msg>emoji: Switch users with blob emoji to use Google style.<commit_after>from django.db import migrations
from django.db.backends.postgresql.schema import BaseDatabaseSchemaEditor
from django.db.migrations.state import StateApps
def update_deprecated_emoji_style(apps: StateApps, schema_editor: BaseDatabaseSchemaEditor) -> None:
"""
This migration updates the emoji style for users who are using the
deprecated Google blob style. Unless they are part of an organization
which has Google blob as an organization default, these users will
now use the modern Google emoji style.
"""
UserProfile = apps.get_model("zerver", "UserProfile")
RealmUserDefault = apps.get_model("zerver", "RealmUserDefault")
UserProfile.objects.filter(emojiset="google-blob").exclude(
realm__in=RealmUserDefault.objects.filter(emojiset="google-blob").values("realm")
).update(emojiset="google")
class Migration(migrations.Migration):
dependencies = [
("zerver", "0415_delete_scimclient"),
]
operations = [
migrations.RunPython(update_deprecated_emoji_style, elidable=True),
]
|
|
785f4bf4ca47fff6e955dc0894fd5a7ca744a201
|
scrapy_cdr/es_download_hashes.py
|
scrapy_cdr/es_download_hashes.py
|
import argparse
import csv
import hashlib
import elasticsearch
from elasticsearch_dsl import Search
import tqdm
from w3lib.url import canonicalize_url
def main():
parser = argparse.ArgumentParser(
description='Download item hashes from ES index')
arg = parser.add_argument
arg('output', help='output in .csv format')
arg('index', help='ES index name')
arg('--domain', help='url.domain to filter')
arg('--host', default='localhost', help='ES host in host[:port] format')
arg('--user', help='HTTP Basic Auth user')
arg('--password', help='HTTP Basic Auth password')
arg('--chunk-size', type=int, default=100, help='download chunk size')
args = parser.parse_args()
kwargs = {}
if args.user or args.password:
kwargs['http_auth'] = (args.user, args.password)
client = elasticsearch.Elasticsearch(
[args.host],
connection_class=elasticsearch.RequestsHttpConnection,
timeout=600,
**kwargs)
print(client.info())
search = Search(using=client, index=args.index)
if args.domain:
search = search.filter('term', **{'url.domain': args.domain})
total = 0
with tqdm.tqdm(total=search.count()) as pbar:
with open(args.output, 'wt') as f:
writer = csv.writer(f)
for x in search.params(size=args.chunk_size).scan():
total += 1
pbar.update(1)
x = x.to_dict()
writer.writerow([
x['timestamp_crawl'],
(hashlib.sha1((x['raw_content'] or '')
.encode('utf8')).hexdigest()),
x['team'],
x['url'],
canonicalize_url(x['url'], keep_fragments=True),
])
print('{:,} items downloaded to {}'.format(total, args.output))
if __name__ == '__main__':
main()
|
Add a script that downloads content hashes
|
Add a script that downloads content hashes
|
Python
|
mit
|
TeamHG-Memex/scrapy-cdr
|
Add a script that downloads content hashes
|
import argparse
import csv
import hashlib
import elasticsearch
from elasticsearch_dsl import Search
import tqdm
from w3lib.url import canonicalize_url
def main():
parser = argparse.ArgumentParser(
description='Download item hashes from ES index')
arg = parser.add_argument
arg('output', help='output in .csv format')
arg('index', help='ES index name')
arg('--domain', help='url.domain to filter')
arg('--host', default='localhost', help='ES host in host[:port] format')
arg('--user', help='HTTP Basic Auth user')
arg('--password', help='HTTP Basic Auth password')
arg('--chunk-size', type=int, default=100, help='download chunk size')
args = parser.parse_args()
kwargs = {}
if args.user or args.password:
kwargs['http_auth'] = (args.user, args.password)
client = elasticsearch.Elasticsearch(
[args.host],
connection_class=elasticsearch.RequestsHttpConnection,
timeout=600,
**kwargs)
print(client.info())
search = Search(using=client, index=args.index)
if args.domain:
search = search.filter('term', **{'url.domain': args.domain})
total = 0
with tqdm.tqdm(total=search.count()) as pbar:
with open(args.output, 'wt') as f:
writer = csv.writer(f)
for x in search.params(size=args.chunk_size).scan():
total += 1
pbar.update(1)
x = x.to_dict()
writer.writerow([
x['timestamp_crawl'],
(hashlib.sha1((x['raw_content'] or '')
.encode('utf8')).hexdigest()),
x['team'],
x['url'],
canonicalize_url(x['url'], keep_fragments=True),
])
print('{:,} items downloaded to {}'.format(total, args.output))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script that downloads content hashes<commit_after>
|
import argparse
import csv
import hashlib
import elasticsearch
from elasticsearch_dsl import Search
import tqdm
from w3lib.url import canonicalize_url
def main():
parser = argparse.ArgumentParser(
description='Download item hashes from ES index')
arg = parser.add_argument
arg('output', help='output in .csv format')
arg('index', help='ES index name')
arg('--domain', help='url.domain to filter')
arg('--host', default='localhost', help='ES host in host[:port] format')
arg('--user', help='HTTP Basic Auth user')
arg('--password', help='HTTP Basic Auth password')
arg('--chunk-size', type=int, default=100, help='download chunk size')
args = parser.parse_args()
kwargs = {}
if args.user or args.password:
kwargs['http_auth'] = (args.user, args.password)
client = elasticsearch.Elasticsearch(
[args.host],
connection_class=elasticsearch.RequestsHttpConnection,
timeout=600,
**kwargs)
print(client.info())
search = Search(using=client, index=args.index)
if args.domain:
search = search.filter('term', **{'url.domain': args.domain})
total = 0
with tqdm.tqdm(total=search.count()) as pbar:
with open(args.output, 'wt') as f:
writer = csv.writer(f)
for x in search.params(size=args.chunk_size).scan():
total += 1
pbar.update(1)
x = x.to_dict()
writer.writerow([
x['timestamp_crawl'],
(hashlib.sha1((x['raw_content'] or '')
.encode('utf8')).hexdigest()),
x['team'],
x['url'],
canonicalize_url(x['url'], keep_fragments=True),
])
print('{:,} items downloaded to {}'.format(total, args.output))
if __name__ == '__main__':
main()
|
Add a script that downloads content hashesimport argparse
import csv
import hashlib
import elasticsearch
from elasticsearch_dsl import Search
import tqdm
from w3lib.url import canonicalize_url
def main():
parser = argparse.ArgumentParser(
description='Download item hashes from ES index')
arg = parser.add_argument
arg('output', help='output in .csv format')
arg('index', help='ES index name')
arg('--domain', help='url.domain to filter')
arg('--host', default='localhost', help='ES host in host[:port] format')
arg('--user', help='HTTP Basic Auth user')
arg('--password', help='HTTP Basic Auth password')
arg('--chunk-size', type=int, default=100, help='download chunk size')
args = parser.parse_args()
kwargs = {}
if args.user or args.password:
kwargs['http_auth'] = (args.user, args.password)
client = elasticsearch.Elasticsearch(
[args.host],
connection_class=elasticsearch.RequestsHttpConnection,
timeout=600,
**kwargs)
print(client.info())
search = Search(using=client, index=args.index)
if args.domain:
search = search.filter('term', **{'url.domain': args.domain})
total = 0
with tqdm.tqdm(total=search.count()) as pbar:
with open(args.output, 'wt') as f:
writer = csv.writer(f)
for x in search.params(size=args.chunk_size).scan():
total += 1
pbar.update(1)
x = x.to_dict()
writer.writerow([
x['timestamp_crawl'],
(hashlib.sha1((x['raw_content'] or '')
.encode('utf8')).hexdigest()),
x['team'],
x['url'],
canonicalize_url(x['url'], keep_fragments=True),
])
print('{:,} items downloaded to {}'.format(total, args.output))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script that downloads content hashes<commit_after>import argparse
import csv
import hashlib
import elasticsearch
from elasticsearch_dsl import Search
import tqdm
from w3lib.url import canonicalize_url
def main():
parser = argparse.ArgumentParser(
description='Download item hashes from ES index')
arg = parser.add_argument
arg('output', help='output in .csv format')
arg('index', help='ES index name')
arg('--domain', help='url.domain to filter')
arg('--host', default='localhost', help='ES host in host[:port] format')
arg('--user', help='HTTP Basic Auth user')
arg('--password', help='HTTP Basic Auth password')
arg('--chunk-size', type=int, default=100, help='download chunk size')
args = parser.parse_args()
kwargs = {}
if args.user or args.password:
kwargs['http_auth'] = (args.user, args.password)
client = elasticsearch.Elasticsearch(
[args.host],
connection_class=elasticsearch.RequestsHttpConnection,
timeout=600,
**kwargs)
print(client.info())
search = Search(using=client, index=args.index)
if args.domain:
search = search.filter('term', **{'url.domain': args.domain})
total = 0
with tqdm.tqdm(total=search.count()) as pbar:
with open(args.output, 'wt') as f:
writer = csv.writer(f)
for x in search.params(size=args.chunk_size).scan():
total += 1
pbar.update(1)
x = x.to_dict()
writer.writerow([
x['timestamp_crawl'],
(hashlib.sha1((x['raw_content'] or '')
.encode('utf8')).hexdigest()),
x['team'],
x['url'],
canonicalize_url(x['url'], keep_fragments=True),
])
print('{:,} items downloaded to {}'.format(total, args.output))
if __name__ == '__main__':
main()
|
|
d41f777032f05e63c16b18bc74cb5a3d59b0ad82
|
ui/gfx/compositor/PRESUBMIT.py
|
ui/gfx/compositor/PRESUBMIT.py
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/externsions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
Add aura compile testing by default to likely areas (2).
|
Add aura compile testing by default to likely areas (2).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907045
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114850 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
gavinp/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium,ropik/chromium,adobe/chromium,ropik/chromium,yitian134/chromium,gavinp/chromium,ropik/chromium,adobe/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,yitian134/chromium,yitian134/chromium,yitian134/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,gavinp/chromium,adobe/chromium,yitian134/chromium,gavinp/chromium,adobe/chromium,ropik/chromium,yitian134/chromium,yitian134/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,adobe/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium
|
Add aura compile testing by default to likely areas (2).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907045
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114850 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/externsions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
<commit_before><commit_msg>Add aura compile testing by default to likely areas (2).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907045
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114850 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/externsions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
Add aura compile testing by default to likely areas (2).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907045
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114850 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/externsions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
<commit_before><commit_msg>Add aura compile testing by default to likely areas (2).
BUG=chromium:107599
TEST=None
Review URL: http://codereview.chromium.org/8907045
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@114850 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/chrome/browser/externsions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTrySlaves():
return ['linux_chromeos_aura:compile']
|
|
9bdd3f7aea307fb72ab4999e28b56bd4ce7f46d2
|
app/initial_tables.py
|
app/initial_tables.py
|
from tables import engine
def create_tables():
"""
Create tables the lazy way... with raw SQL.
"""
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE file_upload(
document_name TEXT
, time_uploaded TEXT DEFAULT now()
, filename TEXT NOT NULL
, word_counts JSON NOT NULL
, PRIMARY KEY(document_name, time_uploaded)
);
"""
)
conn.commit()
if __name__ == '__main__':
create_tables()
|
Add module to create initial tables
|
Add module to create initial tables
|
Python
|
mit
|
sprin/heroku-tut
|
Add module to create initial tables
|
from tables import engine
def create_tables():
"""
Create tables the lazy way... with raw SQL.
"""
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE file_upload(
document_name TEXT
, time_uploaded TEXT DEFAULT now()
, filename TEXT NOT NULL
, word_counts JSON NOT NULL
, PRIMARY KEY(document_name, time_uploaded)
);
"""
)
conn.commit()
if __name__ == '__main__':
create_tables()
|
<commit_before><commit_msg>Add module to create initial tables<commit_after>
|
from tables import engine
def create_tables():
"""
Create tables the lazy way... with raw SQL.
"""
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE file_upload(
document_name TEXT
, time_uploaded TEXT DEFAULT now()
, filename TEXT NOT NULL
, word_counts JSON NOT NULL
, PRIMARY KEY(document_name, time_uploaded)
);
"""
)
conn.commit()
if __name__ == '__main__':
create_tables()
|
Add module to create initial tablesfrom tables import engine
def create_tables():
"""
Create tables the lazy way... with raw SQL.
"""
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE file_upload(
document_name TEXT
, time_uploaded TEXT DEFAULT now()
, filename TEXT NOT NULL
, word_counts JSON NOT NULL
, PRIMARY KEY(document_name, time_uploaded)
);
"""
)
conn.commit()
if __name__ == '__main__':
create_tables()
|
<commit_before><commit_msg>Add module to create initial tables<commit_after>from tables import engine
def create_tables():
"""
Create tables the lazy way... with raw SQL.
"""
conn = engine.raw_connection()
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE file_upload(
document_name TEXT
, time_uploaded TEXT DEFAULT now()
, filename TEXT NOT NULL
, word_counts JSON NOT NULL
, PRIMARY KEY(document_name, time_uploaded)
);
"""
)
conn.commit()
if __name__ == '__main__':
create_tables()
|
|
54118be673602e7936a566d16185ed311e700ac2
|
scripts/msvs_projects.py
|
scripts/msvs_projects.py
|
#!/usr/bin/python
#
# Copyright 2017 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# msvs_projects.py:
# A helper utility that generates Visual Studio projects for each of
# the available directories in 'out', and then runs another helper
# utility that merges these projects into one solution.
import sys, os, subprocess
# Change this to target another VS version.
target_ide = 'vs2017'
solution_name = 'ANGLE'
script_dir = os.path.dirname(sys.argv[0])
# Set the CWD to the root ANGLE folder.
os.chdir(os.path.join(script_dir, '..'))
out_dir = 'out'
# Generate the VS solutions for any valid directory.
def generate_projects(dirname):
args = ['gn.bat', 'gen', dirname, '--ide=' + target_ide, '--sln=' + solution_name]
print('Running "' + ' '.join(args) + '"')
subprocess.call(args)
for potential_dir in os.listdir(out_dir):
path = os.path.join(out_dir, potential_dir)
build_ninja_d = os.path.join(path, 'build.ninja.d')
if os.path.exists(build_ninja_d):
generate_projects(path)
# Run the helper utility that merges the projects.
args = ['python', os.path.join('build', 'win', 'gn_meta_sln.py')]
print('Running "' + ' '.join(args) + '"')
subprocess.call(args)
|
Add script to generate GN/MSVS projects.
|
Add script to generate GN/MSVS projects.
BUG=angleproject:1569
Change-Id: I0e47720d17cd1a29603e471482cac31d4c281ee5
Reviewed-on: https://chromium-review.googlesource.com/735059
Reviewed-by: Geoff Lang <b6fc25fe0362055230985c05cbfa8adb741ccc0f@chromium.org>
Commit-Queue: Jamie Madill <7e492b4f1c8458024932de3ba475cbf015424c30@chromium.org>
|
Python
|
bsd-3-clause
|
ppy/angle,ppy/angle,ppy/angle,ppy/angle
|
Add script to generate GN/MSVS projects.
BUG=angleproject:1569
Change-Id: I0e47720d17cd1a29603e471482cac31d4c281ee5
Reviewed-on: https://chromium-review.googlesource.com/735059
Reviewed-by: Geoff Lang <b6fc25fe0362055230985c05cbfa8adb741ccc0f@chromium.org>
Commit-Queue: Jamie Madill <7e492b4f1c8458024932de3ba475cbf015424c30@chromium.org>
|
#!/usr/bin/python
#
# Copyright 2017 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# msvs_projects.py:
# A helper utility that generates Visual Studio projects for each of
# the available directories in 'out', and then runs another helper
# utility that merges these projects into one solution.
import sys, os, subprocess
# Change this to target another VS version.
target_ide = 'vs2017'
solution_name = 'ANGLE'
script_dir = os.path.dirname(sys.argv[0])
# Set the CWD to the root ANGLE folder.
os.chdir(os.path.join(script_dir, '..'))
out_dir = 'out'
# Generate the VS solutions for any valid directory.
def generate_projects(dirname):
args = ['gn.bat', 'gen', dirname, '--ide=' + target_ide, '--sln=' + solution_name]
print('Running "' + ' '.join(args) + '"')
subprocess.call(args)
for potential_dir in os.listdir(out_dir):
path = os.path.join(out_dir, potential_dir)
build_ninja_d = os.path.join(path, 'build.ninja.d')
if os.path.exists(build_ninja_d):
generate_projects(path)
# Run the helper utility that merges the projects.
args = ['python', os.path.join('build', 'win', 'gn_meta_sln.py')]
print('Running "' + ' '.join(args) + '"')
subprocess.call(args)
|
<commit_before><commit_msg>Add script to generate GN/MSVS projects.
BUG=angleproject:1569
Change-Id: I0e47720d17cd1a29603e471482cac31d4c281ee5
Reviewed-on: https://chromium-review.googlesource.com/735059
Reviewed-by: Geoff Lang <b6fc25fe0362055230985c05cbfa8adb741ccc0f@chromium.org>
Commit-Queue: Jamie Madill <7e492b4f1c8458024932de3ba475cbf015424c30@chromium.org><commit_after>
|
#!/usr/bin/python
#
# Copyright 2017 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# msvs_projects.py:
# A helper utility that generates Visual Studio projects for each of
# the available directories in 'out', and then runs another helper
# utility that merges these projects into one solution.
import sys, os, subprocess
# Change this to target another VS version.
target_ide = 'vs2017'
solution_name = 'ANGLE'
script_dir = os.path.dirname(sys.argv[0])
# Set the CWD to the root ANGLE folder.
os.chdir(os.path.join(script_dir, '..'))
out_dir = 'out'
# Generate the VS solutions for any valid directory.
def generate_projects(dirname):
args = ['gn.bat', 'gen', dirname, '--ide=' + target_ide, '--sln=' + solution_name]
print('Running "' + ' '.join(args) + '"')
subprocess.call(args)
for potential_dir in os.listdir(out_dir):
path = os.path.join(out_dir, potential_dir)
build_ninja_d = os.path.join(path, 'build.ninja.d')
if os.path.exists(build_ninja_d):
generate_projects(path)
# Run the helper utility that merges the projects.
args = ['python', os.path.join('build', 'win', 'gn_meta_sln.py')]
print('Running "' + ' '.join(args) + '"')
subprocess.call(args)
|
Add script to generate GN/MSVS projects.
BUG=angleproject:1569
Change-Id: I0e47720d17cd1a29603e471482cac31d4c281ee5
Reviewed-on: https://chromium-review.googlesource.com/735059
Reviewed-by: Geoff Lang <b6fc25fe0362055230985c05cbfa8adb741ccc0f@chromium.org>
Commit-Queue: Jamie Madill <7e492b4f1c8458024932de3ba475cbf015424c30@chromium.org>#!/usr/bin/python
#
# Copyright 2017 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# msvs_projects.py:
# A helper utility that generates Visual Studio projects for each of
# the available directories in 'out', and then runs another helper
# utility that merges these projects into one solution.
import sys, os, subprocess
# Change this to target another VS version.
target_ide = 'vs2017'
solution_name = 'ANGLE'
script_dir = os.path.dirname(sys.argv[0])
# Set the CWD to the root ANGLE folder.
os.chdir(os.path.join(script_dir, '..'))
out_dir = 'out'
# Generate the VS solutions for any valid directory.
def generate_projects(dirname):
args = ['gn.bat', 'gen', dirname, '--ide=' + target_ide, '--sln=' + solution_name]
print('Running "' + ' '.join(args) + '"')
subprocess.call(args)
for potential_dir in os.listdir(out_dir):
path = os.path.join(out_dir, potential_dir)
build_ninja_d = os.path.join(path, 'build.ninja.d')
if os.path.exists(build_ninja_d):
generate_projects(path)
# Run the helper utility that merges the projects.
args = ['python', os.path.join('build', 'win', 'gn_meta_sln.py')]
print('Running "' + ' '.join(args) + '"')
subprocess.call(args)
|
<commit_before><commit_msg>Add script to generate GN/MSVS projects.
BUG=angleproject:1569
Change-Id: I0e47720d17cd1a29603e471482cac31d4c281ee5
Reviewed-on: https://chromium-review.googlesource.com/735059
Reviewed-by: Geoff Lang <b6fc25fe0362055230985c05cbfa8adb741ccc0f@chromium.org>
Commit-Queue: Jamie Madill <7e492b4f1c8458024932de3ba475cbf015424c30@chromium.org><commit_after>#!/usr/bin/python
#
# Copyright 2017 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# msvs_projects.py:
# A helper utility that generates Visual Studio projects for each of
# the available directories in 'out', and then runs another helper
# utility that merges these projects into one solution.
import sys, os, subprocess
# Change this to target another VS version.
target_ide = 'vs2017'
solution_name = 'ANGLE'
script_dir = os.path.dirname(sys.argv[0])
# Set the CWD to the root ANGLE folder.
os.chdir(os.path.join(script_dir, '..'))
out_dir = 'out'
# Generate the VS solutions for any valid directory.
def generate_projects(dirname):
args = ['gn.bat', 'gen', dirname, '--ide=' + target_ide, '--sln=' + solution_name]
print('Running "' + ' '.join(args) + '"')
subprocess.call(args)
for potential_dir in os.listdir(out_dir):
path = os.path.join(out_dir, potential_dir)
build_ninja_d = os.path.join(path, 'build.ninja.d')
if os.path.exists(build_ninja_d):
generate_projects(path)
# Run the helper utility that merges the projects.
args = ['python', os.path.join('build', 'win', 'gn_meta_sln.py')]
print('Running "' + ' '.join(args) + '"')
subprocess.call(args)
|
|
2262e07f8e15879657409ff2b29696d128a2172c
|
python/image-tools/remove-all-exif-data.py
|
python/image-tools/remove-all-exif-data.py
|
#!/usr/bin/env python
import sys
# sudo pip3 install piexif
import piexif
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to remove EXIF tags from\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
piexif.remove(sys.argv[1])
|
Add script to remove all EXIF data from an image
|
Add script to remove all EXIF data from an image
|
Python
|
mit
|
bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile,bmaupin/junkpile
|
Add script to remove all EXIF data from an image
|
#!/usr/bin/env python
import sys
# sudo pip3 install piexif
import piexif
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to remove EXIF tags from\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
piexif.remove(sys.argv[1])
|
<commit_before><commit_msg>Add script to remove all EXIF data from an image<commit_after>
|
#!/usr/bin/env python
import sys
# sudo pip3 install piexif
import piexif
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to remove EXIF tags from\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
piexif.remove(sys.argv[1])
|
Add script to remove all EXIF data from an image#!/usr/bin/env python
import sys
# sudo pip3 install piexif
import piexif
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to remove EXIF tags from\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
piexif.remove(sys.argv[1])
|
<commit_before><commit_msg>Add script to remove all EXIF data from an image<commit_after>#!/usr/bin/env python
import sys
# sudo pip3 install piexif
import piexif
if len(sys.argv) < 2:
sys.stderr.write('ERROR: Must provide a file to remove EXIF tags from\n')
sys.exit('Usage: {} FILE'.format(sys.argv[0]))
piexif.remove(sys.argv[1])
|
|
df1d517fbd3dbd921ff50b64d95869ad8605d43a
|
proselint/checks/garner/sexism.py
|
proselint/checks/garner/sexism.py
|
# -*- coding: utf-8 -*-
"""MAU103: Sexism.
---
layout: post
error_code: MAU103
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: sexism
date: 2014-06-10 12:31:19
categories: writing
---
Points out sexist language.
"""
from proselint.tools import memoize, preferred_forms_check
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "MAU103"
msg = "Gender bias. Use '{}' instead of '{}'."
sexism = [
["anchor", ["anchorman", "anchorwoman", "anchorperson"]],
["chair", ["chairman", "chairwoman", "chairperson"]],
["drafter", ["draftman", "draftwoman", "draftperson"]],
["ombuds", ["ombudsman", "ombudswoman", "ombudsperson"]],
["tribe member", ["tribesman", "tribeswoman", "tribesperson"]],
["police officer", ["policeman", "policewoman", "policeperson"]],
["firefighter", ["fireman", "firewoman", "fireperson"]],
["mail carrier", ["mailman", "mailwoman", "mailperson"]],
["history", ["herstory"]],
["women", ["womyn"]],
["poet", ["poetess"]],
["author", ["authoress"]],
["waiter", ["waitress"]],
["lawyer", ["lady lawyer"]],
["doctor", ["woman doctor"]],
["bookseller", ["female booksalesman"]],
["air pilot", ["femaile airman"]],
["executor", ["executrix"]],
["prosecutor", ["prosecutrix"]],
["testator", ["testatrix"]],
["husband and wife", ["man and wife"]],
["chairs", ["chairmen and chairs"]],
["men and women", ["men and girls"]],
["comedian", ["comedienne"]],
["confidant", ["confidante"]],
# ["hero", ["heroine"]]
]
return preferred_forms_check(text, sexism, err, msg, ignore_case=False)
|
Add rule on sexist language
|
Add rule on sexist language
#51
|
Python
|
bsd-3-clause
|
amperser/proselint,jstewmon/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,amperser/proselint
|
Add rule on sexist language
#51
|
# -*- coding: utf-8 -*-
"""MAU103: Sexism.
---
layout: post
error_code: MAU103
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: sexism
date: 2014-06-10 12:31:19
categories: writing
---
Points out sexist language.
"""
from proselint.tools import memoize, preferred_forms_check
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "MAU103"
msg = "Gender bias. Use '{}' instead of '{}'."
sexism = [
["anchor", ["anchorman", "anchorwoman", "anchorperson"]],
["chair", ["chairman", "chairwoman", "chairperson"]],
["drafter", ["draftman", "draftwoman", "draftperson"]],
["ombuds", ["ombudsman", "ombudswoman", "ombudsperson"]],
["tribe member", ["tribesman", "tribeswoman", "tribesperson"]],
["police officer", ["policeman", "policewoman", "policeperson"]],
["firefighter", ["fireman", "firewoman", "fireperson"]],
["mail carrier", ["mailman", "mailwoman", "mailperson"]],
["history", ["herstory"]],
["women", ["womyn"]],
["poet", ["poetess"]],
["author", ["authoress"]],
["waiter", ["waitress"]],
["lawyer", ["lady lawyer"]],
["doctor", ["woman doctor"]],
["bookseller", ["female booksalesman"]],
["air pilot", ["femaile airman"]],
["executor", ["executrix"]],
["prosecutor", ["prosecutrix"]],
["testator", ["testatrix"]],
["husband and wife", ["man and wife"]],
["chairs", ["chairmen and chairs"]],
["men and women", ["men and girls"]],
["comedian", ["comedienne"]],
["confidant", ["confidante"]],
# ["hero", ["heroine"]]
]
return preferred_forms_check(text, sexism, err, msg, ignore_case=False)
|
<commit_before><commit_msg>Add rule on sexist language
#51<commit_after>
|
# -*- coding: utf-8 -*-
"""MAU103: Sexism.
---
layout: post
error_code: MAU103
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: sexism
date: 2014-06-10 12:31:19
categories: writing
---
Points out sexist language.
"""
from proselint.tools import memoize, preferred_forms_check
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "MAU103"
msg = "Gender bias. Use '{}' instead of '{}'."
sexism = [
["anchor", ["anchorman", "anchorwoman", "anchorperson"]],
["chair", ["chairman", "chairwoman", "chairperson"]],
["drafter", ["draftman", "draftwoman", "draftperson"]],
["ombuds", ["ombudsman", "ombudswoman", "ombudsperson"]],
["tribe member", ["tribesman", "tribeswoman", "tribesperson"]],
["police officer", ["policeman", "policewoman", "policeperson"]],
["firefighter", ["fireman", "firewoman", "fireperson"]],
["mail carrier", ["mailman", "mailwoman", "mailperson"]],
["history", ["herstory"]],
["women", ["womyn"]],
["poet", ["poetess"]],
["author", ["authoress"]],
["waiter", ["waitress"]],
["lawyer", ["lady lawyer"]],
["doctor", ["woman doctor"]],
["bookseller", ["female booksalesman"]],
["air pilot", ["femaile airman"]],
["executor", ["executrix"]],
["prosecutor", ["prosecutrix"]],
["testator", ["testatrix"]],
["husband and wife", ["man and wife"]],
["chairs", ["chairmen and chairs"]],
["men and women", ["men and girls"]],
["comedian", ["comedienne"]],
["confidant", ["confidante"]],
# ["hero", ["heroine"]]
]
return preferred_forms_check(text, sexism, err, msg, ignore_case=False)
|
Add rule on sexist language
#51# -*- coding: utf-8 -*-
"""MAU103: Sexism.
---
layout: post
error_code: MAU103
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: sexism
date: 2014-06-10 12:31:19
categories: writing
---
Points out sexist language.
"""
from proselint.tools import memoize, preferred_forms_check
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "MAU103"
msg = "Gender bias. Use '{}' instead of '{}'."
sexism = [
["anchor", ["anchorman", "anchorwoman", "anchorperson"]],
["chair", ["chairman", "chairwoman", "chairperson"]],
["drafter", ["draftman", "draftwoman", "draftperson"]],
["ombuds", ["ombudsman", "ombudswoman", "ombudsperson"]],
["tribe member", ["tribesman", "tribeswoman", "tribesperson"]],
["police officer", ["policeman", "policewoman", "policeperson"]],
["firefighter", ["fireman", "firewoman", "fireperson"]],
["mail carrier", ["mailman", "mailwoman", "mailperson"]],
["history", ["herstory"]],
["women", ["womyn"]],
["poet", ["poetess"]],
["author", ["authoress"]],
["waiter", ["waitress"]],
["lawyer", ["lady lawyer"]],
["doctor", ["woman doctor"]],
["bookseller", ["female booksalesman"]],
["air pilot", ["femaile airman"]],
["executor", ["executrix"]],
["prosecutor", ["prosecutrix"]],
["testator", ["testatrix"]],
["husband and wife", ["man and wife"]],
["chairs", ["chairmen and chairs"]],
["men and women", ["men and girls"]],
["comedian", ["comedienne"]],
["confidant", ["confidante"]],
# ["hero", ["heroine"]]
]
return preferred_forms_check(text, sexism, err, msg, ignore_case=False)
|
<commit_before><commit_msg>Add rule on sexist language
#51<commit_after># -*- coding: utf-8 -*-
"""MAU103: Sexism.
---
layout: post
error_code: MAU103
source: Garner's Modern American Usage
source_url: http://amzn.to/15wF76r
title: sexism
date: 2014-06-10 12:31:19
categories: writing
---
Points out sexist language.
"""
from proselint.tools import memoize, preferred_forms_check
@memoize
def check(text):
"""Suggest the preferred forms."""
err = "MAU103"
msg = "Gender bias. Use '{}' instead of '{}'."
sexism = [
["anchor", ["anchorman", "anchorwoman", "anchorperson"]],
["chair", ["chairman", "chairwoman", "chairperson"]],
["drafter", ["draftman", "draftwoman", "draftperson"]],
["ombuds", ["ombudsman", "ombudswoman", "ombudsperson"]],
["tribe member", ["tribesman", "tribeswoman", "tribesperson"]],
["police officer", ["policeman", "policewoman", "policeperson"]],
["firefighter", ["fireman", "firewoman", "fireperson"]],
["mail carrier", ["mailman", "mailwoman", "mailperson"]],
["history", ["herstory"]],
["women", ["womyn"]],
["poet", ["poetess"]],
["author", ["authoress"]],
["waiter", ["waitress"]],
["lawyer", ["lady lawyer"]],
["doctor", ["woman doctor"]],
["bookseller", ["female booksalesman"]],
["air pilot", ["femaile airman"]],
["executor", ["executrix"]],
["prosecutor", ["prosecutrix"]],
["testator", ["testatrix"]],
["husband and wife", ["man and wife"]],
["chairs", ["chairmen and chairs"]],
["men and women", ["men and girls"]],
["comedian", ["comedienne"]],
["confidant", ["confidante"]],
# ["hero", ["heroine"]]
]
return preferred_forms_check(text, sexism, err, msg, ignore_case=False)
|
|
10327654f40ec443be756cfebf3a51e7f037781b
|
cleanup-old-kernels.py
|
cleanup-old-kernels.py
|
#!/usr/bin/env python
import logging
import re
import os
import shutil
logger = logging.getLogger(__name__)
BOOT_FILE_PREFIXES = (u'initrd', u'System.map', u'vmlinuz', )
def discover_in_dir(directory, file_prefix):
version_pattern = u'(?P<version>[\\w.-]+catalin[\\w.-]*)'
kernel_file_pattern = u'{0}-{1}'.format(file_prefix, version_pattern)
for dir_entry in os.listdir(directory):
match = re.match(kernel_file_pattern, dir_entry)
if match is not None:
version = match.group(u'version')
yield version
def discover_kernel_versions():
for file_prefix in BOOT_FILE_PREFIXES:
for version in discover_in_dir(u'/boot', file_prefix):
yield version
for version in discover_in_dir(u'/lib/modules', u''):
yield version
def remove_file_report_errors(file_path):
try:
os.remove(file_path)
except OSError as e:
logger.warning(u'Cannot remove %s: %s', file_path, e)
def remove_kernel(version):
for file_prefix in BOOT_FILE_PREFIXES:
file_path = u'{0}-{1}'.format(file_prefix, version)
remove_file_report_errors(os.path.join(u'/boot', file_path))
modules_path = os.path.join(u'/lib/modules', version)
shutil.rmtree(u'/lib/modules/{0}'.format(candidate), ignore_errors=True)
if __name__ == u'__main__':
logging.basicConfig(level=logging.DEBUG)
versions = list(sorted(set(discover_kernel_versions())))
logger.debug(u'Found kernel versions %s', u', '.join(versions))
running_version = os.uname()[2]
removal_candidates = versions[:]
if running_version in removal_candidates:
removal_candidates.remove(running_version)
logger.debug(u'Candidates for removal %s', u', '.join(removal_candidates))
removed = []
for candidate in removal_candidates:
known_input = False
while not known_input:
input = raw_input(u'Remove {0}? [y/n] '.format(candidate))
if input == u'y':
logger.info(u'Removing kernel %s', candidate)
remove_kernel(candidate)
removed.append(candidate)
if input in (u'y', u'n'):
known_input = True
logger.info(u'Removed kernels, update your bootloader:\n%s', u'\n'.join(removed))
|
Add script that deletes previously installed kernels
|
Add script that deletes previously installed kernels
Only the bootloader needs to be manually adjusted afterwards.
|
Python
|
mit
|
cataliniacob/misc,cataliniacob/misc
|
Add script that deletes previously installed kernels
Only the bootloader needs to be manually adjusted afterwards.
|
#!/usr/bin/env python
import logging
import re
import os
import shutil
logger = logging.getLogger(__name__)
BOOT_FILE_PREFIXES = (u'initrd', u'System.map', u'vmlinuz', )
def discover_in_dir(directory, file_prefix):
version_pattern = u'(?P<version>[\\w.-]+catalin[\\w.-]*)'
kernel_file_pattern = u'{0}-{1}'.format(file_prefix, version_pattern)
for dir_entry in os.listdir(directory):
match = re.match(kernel_file_pattern, dir_entry)
if match is not None:
version = match.group(u'version')
yield version
def discover_kernel_versions():
for file_prefix in BOOT_FILE_PREFIXES:
for version in discover_in_dir(u'/boot', file_prefix):
yield version
for version in discover_in_dir(u'/lib/modules', u''):
yield version
def remove_file_report_errors(file_path):
try:
os.remove(file_path)
except OSError as e:
logger.warning(u'Cannot remove %s: %s', file_path, e)
def remove_kernel(version):
for file_prefix in BOOT_FILE_PREFIXES:
file_path = u'{0}-{1}'.format(file_prefix, version)
remove_file_report_errors(os.path.join(u'/boot', file_path))
modules_path = os.path.join(u'/lib/modules', version)
shutil.rmtree(u'/lib/modules/{0}'.format(candidate), ignore_errors=True)
if __name__ == u'__main__':
logging.basicConfig(level=logging.DEBUG)
versions = list(sorted(set(discover_kernel_versions())))
logger.debug(u'Found kernel versions %s', u', '.join(versions))
running_version = os.uname()[2]
removal_candidates = versions[:]
if running_version in removal_candidates:
removal_candidates.remove(running_version)
logger.debug(u'Candidates for removal %s', u', '.join(removal_candidates))
removed = []
for candidate in removal_candidates:
known_input = False
while not known_input:
input = raw_input(u'Remove {0}? [y/n] '.format(candidate))
if input == u'y':
logger.info(u'Removing kernel %s', candidate)
remove_kernel(candidate)
removed.append(candidate)
if input in (u'y', u'n'):
known_input = True
logger.info(u'Removed kernels, update your bootloader:\n%s', u'\n'.join(removed))
|
<commit_before><commit_msg>Add script that deletes previously installed kernels
Only the bootloader needs to be manually adjusted afterwards.<commit_after>
|
#!/usr/bin/env python
import logging
import re
import os
import shutil
logger = logging.getLogger(__name__)
BOOT_FILE_PREFIXES = (u'initrd', u'System.map', u'vmlinuz', )
def discover_in_dir(directory, file_prefix):
version_pattern = u'(?P<version>[\\w.-]+catalin[\\w.-]*)'
kernel_file_pattern = u'{0}-{1}'.format(file_prefix, version_pattern)
for dir_entry in os.listdir(directory):
match = re.match(kernel_file_pattern, dir_entry)
if match is not None:
version = match.group(u'version')
yield version
def discover_kernel_versions():
for file_prefix in BOOT_FILE_PREFIXES:
for version in discover_in_dir(u'/boot', file_prefix):
yield version
for version in discover_in_dir(u'/lib/modules', u''):
yield version
def remove_file_report_errors(file_path):
try:
os.remove(file_path)
except OSError as e:
logger.warning(u'Cannot remove %s: %s', file_path, e)
def remove_kernel(version):
for file_prefix in BOOT_FILE_PREFIXES:
file_path = u'{0}-{1}'.format(file_prefix, version)
remove_file_report_errors(os.path.join(u'/boot', file_path))
modules_path = os.path.join(u'/lib/modules', version)
shutil.rmtree(u'/lib/modules/{0}'.format(candidate), ignore_errors=True)
if __name__ == u'__main__':
logging.basicConfig(level=logging.DEBUG)
versions = list(sorted(set(discover_kernel_versions())))
logger.debug(u'Found kernel versions %s', u', '.join(versions))
running_version = os.uname()[2]
removal_candidates = versions[:]
if running_version in removal_candidates:
removal_candidates.remove(running_version)
logger.debug(u'Candidates for removal %s', u', '.join(removal_candidates))
removed = []
for candidate in removal_candidates:
known_input = False
while not known_input:
input = raw_input(u'Remove {0}? [y/n] '.format(candidate))
if input == u'y':
logger.info(u'Removing kernel %s', candidate)
remove_kernel(candidate)
removed.append(candidate)
if input in (u'y', u'n'):
known_input = True
logger.info(u'Removed kernels, update your bootloader:\n%s', u'\n'.join(removed))
|
Add script that deletes previously installed kernels
Only the bootloader needs to be manually adjusted afterwards.#!/usr/bin/env python
import logging
import re
import os
import shutil
logger = logging.getLogger(__name__)
BOOT_FILE_PREFIXES = (u'initrd', u'System.map', u'vmlinuz', )
def discover_in_dir(directory, file_prefix):
version_pattern = u'(?P<version>[\\w.-]+catalin[\\w.-]*)'
kernel_file_pattern = u'{0}-{1}'.format(file_prefix, version_pattern)
for dir_entry in os.listdir(directory):
match = re.match(kernel_file_pattern, dir_entry)
if match is not None:
version = match.group(u'version')
yield version
def discover_kernel_versions():
for file_prefix in BOOT_FILE_PREFIXES:
for version in discover_in_dir(u'/boot', file_prefix):
yield version
for version in discover_in_dir(u'/lib/modules', u''):
yield version
def remove_file_report_errors(file_path):
try:
os.remove(file_path)
except OSError as e:
logger.warning(u'Cannot remove %s: %s', file_path, e)
def remove_kernel(version):
for file_prefix in BOOT_FILE_PREFIXES:
file_path = u'{0}-{1}'.format(file_prefix, version)
remove_file_report_errors(os.path.join(u'/boot', file_path))
modules_path = os.path.join(u'/lib/modules', version)
shutil.rmtree(u'/lib/modules/{0}'.format(candidate), ignore_errors=True)
if __name__ == u'__main__':
logging.basicConfig(level=logging.DEBUG)
versions = list(sorted(set(discover_kernel_versions())))
logger.debug(u'Found kernel versions %s', u', '.join(versions))
running_version = os.uname()[2]
removal_candidates = versions[:]
if running_version in removal_candidates:
removal_candidates.remove(running_version)
logger.debug(u'Candidates for removal %s', u', '.join(removal_candidates))
removed = []
for candidate in removal_candidates:
known_input = False
while not known_input:
input = raw_input(u'Remove {0}? [y/n] '.format(candidate))
if input == u'y':
logger.info(u'Removing kernel %s', candidate)
remove_kernel(candidate)
removed.append(candidate)
if input in (u'y', u'n'):
known_input = True
logger.info(u'Removed kernels, update your bootloader:\n%s', u'\n'.join(removed))
|
<commit_before><commit_msg>Add script that deletes previously installed kernels
Only the bootloader needs to be manually adjusted afterwards.<commit_after>#!/usr/bin/env python
import logging
import re
import os
import shutil
logger = logging.getLogger(__name__)
BOOT_FILE_PREFIXES = (u'initrd', u'System.map', u'vmlinuz', )
def discover_in_dir(directory, file_prefix):
version_pattern = u'(?P<version>[\\w.-]+catalin[\\w.-]*)'
kernel_file_pattern = u'{0}-{1}'.format(file_prefix, version_pattern)
for dir_entry in os.listdir(directory):
match = re.match(kernel_file_pattern, dir_entry)
if match is not None:
version = match.group(u'version')
yield version
def discover_kernel_versions():
for file_prefix in BOOT_FILE_PREFIXES:
for version in discover_in_dir(u'/boot', file_prefix):
yield version
for version in discover_in_dir(u'/lib/modules', u''):
yield version
def remove_file_report_errors(file_path):
try:
os.remove(file_path)
except OSError as e:
logger.warning(u'Cannot remove %s: %s', file_path, e)
def remove_kernel(version):
for file_prefix in BOOT_FILE_PREFIXES:
file_path = u'{0}-{1}'.format(file_prefix, version)
remove_file_report_errors(os.path.join(u'/boot', file_path))
modules_path = os.path.join(u'/lib/modules', version)
shutil.rmtree(u'/lib/modules/{0}'.format(candidate), ignore_errors=True)
if __name__ == u'__main__':
logging.basicConfig(level=logging.DEBUG)
versions = list(sorted(set(discover_kernel_versions())))
logger.debug(u'Found kernel versions %s', u', '.join(versions))
running_version = os.uname()[2]
removal_candidates = versions[:]
if running_version in removal_candidates:
removal_candidates.remove(running_version)
logger.debug(u'Candidates for removal %s', u', '.join(removal_candidates))
removed = []
for candidate in removal_candidates:
known_input = False
while not known_input:
input = raw_input(u'Remove {0}? [y/n] '.format(candidate))
if input == u'y':
logger.info(u'Removing kernel %s', candidate)
remove_kernel(candidate)
removed.append(candidate)
if input in (u'y', u'n'):
known_input = True
logger.info(u'Removed kernels, update your bootloader:\n%s', u'\n'.join(removed))
|
|
28b621677b3236684303a2145ce347cf053b8bdd
|
examples/kb_interrupt_handler.py
|
examples/kb_interrupt_handler.py
|
# -*- coding: utf-8 -*-
"""
examples.kb_interrupt_handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Handling keyboard interrupt (control+c) with
try-except block.
"""
import random
import time
from yaspin import yaspin
from yaspin.spinners import Spinners
def unpacker():
sp = yaspin(Spinners.simpleDotsScrolling, right=True)
try:
sp.start()
for p in range(0, 101, 5):
sp.text = "{0}% Unpacking".format(p)
time.sleep(random.random())
sp.ok("✔")
except KeyboardInterrupt:
sp.color = "red"
sp.fail("✘")
sp.stop()
def main():
unpacker()
if __name__ == '__main__':
main()
|
Add example for handling keyboard interrupt
|
Add example for handling keyboard interrupt
|
Python
|
mit
|
pavdmyt/yaspin
|
Add example for handling keyboard interrupt
|
# -*- coding: utf-8 -*-
"""
examples.kb_interrupt_handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Handling keyboard interrupt (control+c) with
try-except block.
"""
import random
import time
from yaspin import yaspin
from yaspin.spinners import Spinners
def unpacker():
sp = yaspin(Spinners.simpleDotsScrolling, right=True)
try:
sp.start()
for p in range(0, 101, 5):
sp.text = "{0}% Unpacking".format(p)
time.sleep(random.random())
sp.ok("✔")
except KeyboardInterrupt:
sp.color = "red"
sp.fail("✘")
sp.stop()
def main():
unpacker()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example for handling keyboard interrupt<commit_after>
|
# -*- coding: utf-8 -*-
"""
examples.kb_interrupt_handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Handling keyboard interrupt (control+c) with
try-except block.
"""
import random
import time
from yaspin import yaspin
from yaspin.spinners import Spinners
def unpacker():
sp = yaspin(Spinners.simpleDotsScrolling, right=True)
try:
sp.start()
for p in range(0, 101, 5):
sp.text = "{0}% Unpacking".format(p)
time.sleep(random.random())
sp.ok("✔")
except KeyboardInterrupt:
sp.color = "red"
sp.fail("✘")
sp.stop()
def main():
unpacker()
if __name__ == '__main__':
main()
|
Add example for handling keyboard interrupt# -*- coding: utf-8 -*-
"""
examples.kb_interrupt_handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Handling keyboard interrupt (control+c) with
try-except block.
"""
import random
import time
from yaspin import yaspin
from yaspin.spinners import Spinners
def unpacker():
sp = yaspin(Spinners.simpleDotsScrolling, right=True)
try:
sp.start()
for p in range(0, 101, 5):
sp.text = "{0}% Unpacking".format(p)
time.sleep(random.random())
sp.ok("✔")
except KeyboardInterrupt:
sp.color = "red"
sp.fail("✘")
sp.stop()
def main():
unpacker()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add example for handling keyboard interrupt<commit_after># -*- coding: utf-8 -*-
"""
examples.kb_interrupt_handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Handling keyboard interrupt (control+c) with
try-except block.
"""
import random
import time
from yaspin import yaspin
from yaspin.spinners import Spinners
def unpacker():
sp = yaspin(Spinners.simpleDotsScrolling, right=True)
try:
sp.start()
for p in range(0, 101, 5):
sp.text = "{0}% Unpacking".format(p)
time.sleep(random.random())
sp.ok("✔")
except KeyboardInterrupt:
sp.color = "red"
sp.fail("✘")
sp.stop()
def main():
unpacker()
if __name__ == '__main__':
main()
|
|
e44ebe1e0132b34f666f81d750589a44beef11f0
|
modernrpc/tests/test_jsonrpc_specific_features.py
|
modernrpc/tests/test_jsonrpc_specific_features.py
|
# coding: utf-8
from jsonrpcclient.http_client import HTTPClient
def test_call_with_named_args(live_server):
c = HTTPClient(live_server.url + '/all-rpc/')
result = c.divide(numerator=10, denominator=2, z=123)
assert result == 5.0
def test_notify(live_server):
c = HTTPClient(live_server.url + '/all-rpc/')
assert c.notify('add', 5, 12) == None
|
Add tests for recently added features
|
Add tests for recently added features
|
Python
|
mit
|
alorence/django-modern-rpc,alorence/django-modern-rpc
|
Add tests for recently added features
|
# coding: utf-8
from jsonrpcclient.http_client import HTTPClient
def test_call_with_named_args(live_server):
c = HTTPClient(live_server.url + '/all-rpc/')
result = c.divide(numerator=10, denominator=2, z=123)
assert result == 5.0
def test_notify(live_server):
c = HTTPClient(live_server.url + '/all-rpc/')
assert c.notify('add', 5, 12) == None
|
<commit_before><commit_msg>Add tests for recently added features<commit_after>
|
# coding: utf-8
from jsonrpcclient.http_client import HTTPClient
def test_call_with_named_args(live_server):
c = HTTPClient(live_server.url + '/all-rpc/')
result = c.divide(numerator=10, denominator=2, z=123)
assert result == 5.0
def test_notify(live_server):
c = HTTPClient(live_server.url + '/all-rpc/')
assert c.notify('add', 5, 12) == None
|
Add tests for recently added features# coding: utf-8
from jsonrpcclient.http_client import HTTPClient
def test_call_with_named_args(live_server):
c = HTTPClient(live_server.url + '/all-rpc/')
result = c.divide(numerator=10, denominator=2, z=123)
assert result == 5.0
def test_notify(live_server):
c = HTTPClient(live_server.url + '/all-rpc/')
assert c.notify('add', 5, 12) == None
|
<commit_before><commit_msg>Add tests for recently added features<commit_after># coding: utf-8
from jsonrpcclient.http_client import HTTPClient
def test_call_with_named_args(live_server):
c = HTTPClient(live_server.url + '/all-rpc/')
result = c.divide(numerator=10, denominator=2, z=123)
assert result == 5.0
def test_notify(live_server):
c = HTTPClient(live_server.url + '/all-rpc/')
assert c.notify('add', 5, 12) == None
|
|
b99b056f0af8c07db063987a75b96ee65bd0975f
|
scripts/mbedtls_dev/bignum_mod.py
|
scripts/mbedtls_dev/bignum_mod.py
|
"""Framework classes for generation of bignum mod test cases."""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from abc import ABCMeta
from typing import Dict, Iterator, List, Tuple
from . import test_data_generation
from . import bignum_common
class BignumCoreTarget(test_data_generation.BaseTarget, metaclass=ABCMeta):
#pylint: disable=abstract-method
"""Target for bignum mod test case generation."""
target_basename = 'test_suite_bignum_mod.generated'
|
Add script for generating mod test cases
|
Add script for generating mod test cases
This commit only adds the boilerplate, no actual tests are added.
Signed-off-by: Janos Follath <b175c17abe0474719672be1367bb75ee28bd5c71@arm.com>
|
Python
|
apache-2.0
|
Mbed-TLS/mbedtls,Mbed-TLS/mbedtls,Mbed-TLS/mbedtls,ARMmbed/mbedtls,ARMmbed/mbedtls,Mbed-TLS/mbedtls,ARMmbed/mbedtls,ARMmbed/mbedtls
|
Add script for generating mod test cases
This commit only adds the boilerplate, no actual tests are added.
Signed-off-by: Janos Follath <b175c17abe0474719672be1367bb75ee28bd5c71@arm.com>
|
"""Framework classes for generation of bignum mod test cases."""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from abc import ABCMeta
from typing import Dict, Iterator, List, Tuple
from . import test_data_generation
from . import bignum_common
class BignumCoreTarget(test_data_generation.BaseTarget, metaclass=ABCMeta):
#pylint: disable=abstract-method
"""Target for bignum mod test case generation."""
target_basename = 'test_suite_bignum_mod.generated'
|
<commit_before><commit_msg>Add script for generating mod test cases
This commit only adds the boilerplate, no actual tests are added.
Signed-off-by: Janos Follath <b175c17abe0474719672be1367bb75ee28bd5c71@arm.com><commit_after>
|
"""Framework classes for generation of bignum mod test cases."""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from abc import ABCMeta
from typing import Dict, Iterator, List, Tuple
from . import test_data_generation
from . import bignum_common
class BignumCoreTarget(test_data_generation.BaseTarget, metaclass=ABCMeta):
#pylint: disable=abstract-method
"""Target for bignum mod test case generation."""
target_basename = 'test_suite_bignum_mod.generated'
|
Add script for generating mod test cases
This commit only adds the boilerplate, no actual tests are added.
Signed-off-by: Janos Follath <b175c17abe0474719672be1367bb75ee28bd5c71@arm.com>"""Framework classes for generation of bignum mod test cases."""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from abc import ABCMeta
from typing import Dict, Iterator, List, Tuple
from . import test_data_generation
from . import bignum_common
class BignumCoreTarget(test_data_generation.BaseTarget, metaclass=ABCMeta):
#pylint: disable=abstract-method
"""Target for bignum mod test case generation."""
target_basename = 'test_suite_bignum_mod.generated'
|
<commit_before><commit_msg>Add script for generating mod test cases
This commit only adds the boilerplate, no actual tests are added.
Signed-off-by: Janos Follath <b175c17abe0474719672be1367bb75ee28bd5c71@arm.com><commit_after>"""Framework classes for generation of bignum mod test cases."""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from abc import ABCMeta
from typing import Dict, Iterator, List, Tuple
from . import test_data_generation
from . import bignum_common
class BignumCoreTarget(test_data_generation.BaseTarget, metaclass=ABCMeta):
#pylint: disable=abstract-method
"""Target for bignum mod test case generation."""
target_basename = 'test_suite_bignum_mod.generated'
|
|
d921af5066fff0ec7b623bdd7f563b69152f27eb
|
filter_plugins/custom_plugins.py
|
filter_plugins/custom_plugins.py
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
from subprocess import check_output
return check_output(["python", "-c", method])
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
import subprocess
child = subprocess.Popen(['python', '-c', method], stdout=subprocess.PIPE)
output = child.communicate()[0]
if child.returncode != 0:
raise ValueError("Exit code non-zero: %d" % child.returncode)
return output
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
Use Popen which is available on RHEL6, but do check exit code
|
Use Popen which is available on RHEL6, but do check exit code
|
Python
|
apache-2.0
|
OpenConext/OpenConext-deploy,remold/OpenConext-deploy,OpenConext/OpenConext-deploy,remold/OpenConext-deploy,baszoetekouw/OpenConext-deploy,baszoetekouw/OpenConext-deploy,baszoetekouw/OpenConext-deploy,baszoetekouw/OpenConext-deploy,OpenConext/OpenConext-deploy,OpenConext/OpenConext-deploy,baszoetekouw/OpenConext-deploy,remold/OpenConext-deploy,OpenConext/OpenConext-deploy
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
from subprocess import check_output
return check_output(["python", "-c", method])
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
Use Popen which is available on RHEL6, but do check exit code
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
import subprocess
child = subprocess.Popen(['python', '-c', method], stdout=subprocess.PIPE)
output = child.communicate()[0]
if child.returncode != 0:
raise ValueError("Exit code non-zero: %d" % child.returncode)
return output
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
<commit_before>#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
from subprocess import check_output
return check_output(["python", "-c", method])
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
<commit_msg>Use Popen which is available on RHEL6, but do check exit code<commit_after>
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
import subprocess
child = subprocess.Popen(['python', '-c', method], stdout=subprocess.PIPE)
output = child.communicate()[0]
if child.returncode != 0:
raise ValueError("Exit code non-zero: %d" % child.returncode)
return output
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
from subprocess import check_output
return check_output(["python", "-c", method])
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
Use Popen which is available on RHEL6, but do check exit code#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
import subprocess
child = subprocess.Popen(['python', '-c', method], stdout=subprocess.PIPE)
output = child.communicate()[0]
if child.returncode != 0:
raise ValueError("Exit code non-zero: %d" % child.returncode)
return output
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
<commit_before>#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
from subprocess import check_output
return check_output(["python", "-c", method])
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
<commit_msg>Use Popen which is available on RHEL6, but do check exit code<commit_after>#
# Usage: {{ foo | vault }}
#
def vault(encrypted, env):
method = """
from keyczar import keyczar
import os.path
import sys
keydir = '.vault'
if not os.path.isdir(keydir):
keydir = os.path.expanduser('~/.decrypted_openconext_keystore_{env}')
crypter = keyczar.Crypter.Read(keydir)
sys.stdout.write(crypter.Decrypt("%s"))
""".format(env=env) % encrypted
import subprocess
child = subprocess.Popen(['python', '-c', method], stdout=subprocess.PIPE)
output = child.communicate()[0]
if child.returncode != 0:
raise ValueError("Exit code non-zero: %d" % child.returncode)
return output
class FilterModule(object):
def filters(self):
return {
'vault': vault
}
|
37604722a186a28ee2df7cca422bc47cfea1857e
|
migrations/versions/d6b40a745e5_.py
|
migrations/versions/d6b40a745e5_.py
|
"""Add table usercache
Revision ID: d6b40a745e5
Revises: 1edda52b619f
Create Date: 2017-04-13 15:25:44.050719
"""
# revision identifiers, used by Alembic.
revision = 'd6b40a745e5'
down_revision = '1edda52b619f'
from alembic import op
import sqlalchemy as sa
def upgrade():
try:
op.create_table('usercache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.Unicode(length=64), nullable=True, index=True),
sa.Column('resolver', sa.Unicode(length=120), nullable=True),
sa.Column('user_id', sa.Unicode(length=320), nullable=True, index=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
except Exception as exx:
print ("Could not create table 'usercache'.")
print (exx)
def downgrade():
op.drop_table('usercache')
|
Add SQL migration for usercache
|
Add SQL migration for usercache
Working on #670
|
Python
|
agpl-3.0
|
jh23453/privacyidea,privacyidea/privacyidea,privacyidea/privacyidea,jh23453/privacyidea,privacyidea/privacyidea,wheldom01/privacyidea,wheldom01/privacyidea,jh23453/privacyidea,wheldom01/privacyidea,privacyidea/privacyidea,wheldom01/privacyidea,jh23453/privacyidea,privacyidea/privacyidea,jh23453/privacyidea,jh23453/privacyidea,privacyidea/privacyidea,wheldom01/privacyidea,wheldom01/privacyidea
|
Add SQL migration for usercache
Working on #670
|
"""Add table usercache
Revision ID: d6b40a745e5
Revises: 1edda52b619f
Create Date: 2017-04-13 15:25:44.050719
"""
# revision identifiers, used by Alembic.
revision = 'd6b40a745e5'
down_revision = '1edda52b619f'
from alembic import op
import sqlalchemy as sa
def upgrade():
try:
op.create_table('usercache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.Unicode(length=64), nullable=True, index=True),
sa.Column('resolver', sa.Unicode(length=120), nullable=True),
sa.Column('user_id', sa.Unicode(length=320), nullable=True, index=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
except Exception as exx:
print ("Could not create table 'usercache'.")
print (exx)
def downgrade():
op.drop_table('usercache')
|
<commit_before><commit_msg>Add SQL migration for usercache
Working on #670<commit_after>
|
"""Add table usercache
Revision ID: d6b40a745e5
Revises: 1edda52b619f
Create Date: 2017-04-13 15:25:44.050719
"""
# revision identifiers, used by Alembic.
revision = 'd6b40a745e5'
down_revision = '1edda52b619f'
from alembic import op
import sqlalchemy as sa
def upgrade():
try:
op.create_table('usercache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.Unicode(length=64), nullable=True, index=True),
sa.Column('resolver', sa.Unicode(length=120), nullable=True),
sa.Column('user_id', sa.Unicode(length=320), nullable=True, index=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
except Exception as exx:
print ("Could not create table 'usercache'.")
print (exx)
def downgrade():
op.drop_table('usercache')
|
Add SQL migration for usercache
Working on #670"""Add table usercache
Revision ID: d6b40a745e5
Revises: 1edda52b619f
Create Date: 2017-04-13 15:25:44.050719
"""
# revision identifiers, used by Alembic.
revision = 'd6b40a745e5'
down_revision = '1edda52b619f'
from alembic import op
import sqlalchemy as sa
def upgrade():
try:
op.create_table('usercache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.Unicode(length=64), nullable=True, index=True),
sa.Column('resolver', sa.Unicode(length=120), nullable=True),
sa.Column('user_id', sa.Unicode(length=320), nullable=True, index=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
except Exception as exx:
print ("Could not create table 'usercache'.")
print (exx)
def downgrade():
op.drop_table('usercache')
|
<commit_before><commit_msg>Add SQL migration for usercache
Working on #670<commit_after>"""Add table usercache
Revision ID: d6b40a745e5
Revises: 1edda52b619f
Create Date: 2017-04-13 15:25:44.050719
"""
# revision identifiers, used by Alembic.
revision = 'd6b40a745e5'
down_revision = '1edda52b619f'
from alembic import op
import sqlalchemy as sa
def upgrade():
try:
op.create_table('usercache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.Unicode(length=64), nullable=True, index=True),
sa.Column('resolver', sa.Unicode(length=120), nullable=True),
sa.Column('user_id', sa.Unicode(length=320), nullable=True, index=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
except Exception as exx:
print ("Could not create table 'usercache'.")
print (exx)
def downgrade():
op.drop_table('usercache')
|
|
c29b81e9ecc800898342c54cb6f4637a8371b1b4
|
models/fallahi_eval/process_data.py
|
models/fallahi_eval/process_data.py
|
import pandas
from collections import defaultdict
rppa_file = 'data/TableS1-Split.xlsx'
cell_lines = ['C32', 'COLO858', 'K2', 'LOXIMVI', 'MMACSF', 'MZ7MEL',
'RVH421', 'SKMEL28', 'WM115', 'WM1552C']
def read_rppa_data(fname=rppa_file):
data = {}
for cell_line in cell_lines:
data[cell_line] = {}
# Read both the median and the std sheet for each cell line
for data_type, postfix in (('median', ''), ('std', '-std')):
# Handle unpredictable number of extra rows before the actual
# header row.
i = 0
while True:
df = pandas.read_excel(fname, sheetname=(cell_line + postfix),
skiprows=i, header=0)
if df.columns[0] == 'Drug':
break
i += 1
data[cell_line][data_type] = df
return data
|
Read in RPPA medians and stds
|
Read in RPPA medians and stds
|
Python
|
bsd-2-clause
|
johnbachman/belpy,bgyori/indra,pvtodorov/indra,sorgerlab/belpy,pvtodorov/indra,pvtodorov/indra,johnbachman/indra,pvtodorov/indra,sorgerlab/indra,sorgerlab/indra,sorgerlab/belpy,bgyori/indra,sorgerlab/indra,sorgerlab/belpy,johnbachman/belpy,johnbachman/belpy,johnbachman/indra,bgyori/indra,johnbachman/indra
|
Read in RPPA medians and stds
|
import pandas
from collections import defaultdict
rppa_file = 'data/TableS1-Split.xlsx'
cell_lines = ['C32', 'COLO858', 'K2', 'LOXIMVI', 'MMACSF', 'MZ7MEL',
'RVH421', 'SKMEL28', 'WM115', 'WM1552C']
def read_rppa_data(fname=rppa_file):
data = {}
for cell_line in cell_lines:
data[cell_line] = {}
# Read both the median and the std sheet for each cell line
for data_type, postfix in (('median', ''), ('std', '-std')):
# Handle unpredictable number of extra rows before the actual
# header row.
i = 0
while True:
df = pandas.read_excel(fname, sheetname=(cell_line + postfix),
skiprows=i, header=0)
if df.columns[0] == 'Drug':
break
i += 1
data[cell_line][data_type] = df
return data
|
<commit_before><commit_msg>Read in RPPA medians and stds<commit_after>
|
import pandas
from collections import defaultdict
rppa_file = 'data/TableS1-Split.xlsx'
cell_lines = ['C32', 'COLO858', 'K2', 'LOXIMVI', 'MMACSF', 'MZ7MEL',
'RVH421', 'SKMEL28', 'WM115', 'WM1552C']
def read_rppa_data(fname=rppa_file):
data = {}
for cell_line in cell_lines:
data[cell_line] = {}
# Read both the median and the std sheet for each cell line
for data_type, postfix in (('median', ''), ('std', '-std')):
# Handle unpredictable number of extra rows before the actual
# header row.
i = 0
while True:
df = pandas.read_excel(fname, sheetname=(cell_line + postfix),
skiprows=i, header=0)
if df.columns[0] == 'Drug':
break
i += 1
data[cell_line][data_type] = df
return data
|
Read in RPPA medians and stdsimport pandas
from collections import defaultdict
rppa_file = 'data/TableS1-Split.xlsx'
cell_lines = ['C32', 'COLO858', 'K2', 'LOXIMVI', 'MMACSF', 'MZ7MEL',
'RVH421', 'SKMEL28', 'WM115', 'WM1552C']
def read_rppa_data(fname=rppa_file):
data = {}
for cell_line in cell_lines:
data[cell_line] = {}
# Read both the median and the std sheet for each cell line
for data_type, postfix in (('median', ''), ('std', '-std')):
# Handle unpredictable number of extra rows before the actual
# header row.
i = 0
while True:
df = pandas.read_excel(fname, sheetname=(cell_line + postfix),
skiprows=i, header=0)
if df.columns[0] == 'Drug':
break
i += 1
data[cell_line][data_type] = df
return data
|
<commit_before><commit_msg>Read in RPPA medians and stds<commit_after>import pandas
from collections import defaultdict
rppa_file = 'data/TableS1-Split.xlsx'
cell_lines = ['C32', 'COLO858', 'K2', 'LOXIMVI', 'MMACSF', 'MZ7MEL',
'RVH421', 'SKMEL28', 'WM115', 'WM1552C']
def read_rppa_data(fname=rppa_file):
data = {}
for cell_line in cell_lines:
data[cell_line] = {}
# Read both the median and the std sheet for each cell line
for data_type, postfix in (('median', ''), ('std', '-std')):
# Handle unpredictable number of extra rows before the actual
# header row.
i = 0
while True:
df = pandas.read_excel(fname, sheetname=(cell_line + postfix),
skiprows=i, header=0)
if df.columns[0] == 'Drug':
break
i += 1
data[cell_line][data_type] = df
return data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.