commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8040b797ba78f59fd0d182dfd22993340cb37700
|
test/test_logging.py
|
test/test_logging.py
|
from cStringIO import StringIO
import logging
import os
import sys
import tempfile
import unittest
from apel.common import set_up_logging
class LoggingTest(unittest.TestCase):
def setUp(self):
# Capture stdout in a StringIO object for inspection.
self._stdout = sys.stdout
sys.stdout = StringIO()
# Create a temporary file to write log to
handle, self.path = tempfile.mkstemp()
os.close(handle)
def tearDown(self):
# Restore stdout to normal.
sys.stdout.close()
sys.stdout = self._stdout
# Delete temporary file
os.remove(self.path)
def test_stdout_logging(self):
"""
Check that logging to stdout and file works, ignoring timestamp.
These are tested together as otherwise the logger setup conflicts.
"""
set_up_logging(self.path, 'INFO', True)
log = logging.getLogger('test_logging')
log.info('out')
# Retrieve output from StringIO object.
output = sys.stdout.getvalue()
# Shutdown logging to release log file for later deletion.
logging.shutdown()
# Only check bit after timestamp as that doesn't change.
self.assertEqual(output[23:], " - test_logging - INFO - out\n")
f = open(self.path)
self.assertEqual(f.readline()[23:], " - test_logging - INFO - out\n")
f.close()
def test_boring_logging(self):
"""Check that logging without handlers at least runs without errors."""
set_up_logging(None, 'INFO', False)
if __name__ == '__main__':
unittest.main()
|
Add unittest for logging from apel.common.__init__
|
Add unittest for logging from apel.common.__init__
|
Python
|
apache-2.0
|
apel/apel,stfc/apel,tofu-rocketry/apel,stfc/apel,tofu-rocketry/apel,apel/apel
|
Add unittest for logging from apel.common.__init__
|
from cStringIO import StringIO
import logging
import os
import sys
import tempfile
import unittest
from apel.common import set_up_logging
class LoggingTest(unittest.TestCase):
def setUp(self):
# Capture stdout in a StringIO object for inspection.
self._stdout = sys.stdout
sys.stdout = StringIO()
# Create a temporary file to write log to
handle, self.path = tempfile.mkstemp()
os.close(handle)
def tearDown(self):
# Restore stdout to normal.
sys.stdout.close()
sys.stdout = self._stdout
# Delete temporary file
os.remove(self.path)
def test_stdout_logging(self):
"""
Check that logging to stdout and file works, ignoring timestamp.
These are tested together as otherwise the logger setup conflicts.
"""
set_up_logging(self.path, 'INFO', True)
log = logging.getLogger('test_logging')
log.info('out')
# Retrieve output from StringIO object.
output = sys.stdout.getvalue()
# Shutdown logging to release log file for later deletion.
logging.shutdown()
# Only check bit after timestamp as that doesn't change.
self.assertEqual(output[23:], " - test_logging - INFO - out\n")
f = open(self.path)
self.assertEqual(f.readline()[23:], " - test_logging - INFO - out\n")
f.close()
def test_boring_logging(self):
"""Check that logging without handlers at least runs without errors."""
set_up_logging(None, 'INFO', False)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unittest for logging from apel.common.__init__<commit_after>
|
from cStringIO import StringIO
import logging
import os
import sys
import tempfile
import unittest
from apel.common import set_up_logging
class LoggingTest(unittest.TestCase):
def setUp(self):
# Capture stdout in a StringIO object for inspection.
self._stdout = sys.stdout
sys.stdout = StringIO()
# Create a temporary file to write log to
handle, self.path = tempfile.mkstemp()
os.close(handle)
def tearDown(self):
# Restore stdout to normal.
sys.stdout.close()
sys.stdout = self._stdout
# Delete temporary file
os.remove(self.path)
def test_stdout_logging(self):
"""
Check that logging to stdout and file works, ignoring timestamp.
These are tested together as otherwise the logger setup conflicts.
"""
set_up_logging(self.path, 'INFO', True)
log = logging.getLogger('test_logging')
log.info('out')
# Retrieve output from StringIO object.
output = sys.stdout.getvalue()
# Shutdown logging to release log file for later deletion.
logging.shutdown()
# Only check bit after timestamp as that doesn't change.
self.assertEqual(output[23:], " - test_logging - INFO - out\n")
f = open(self.path)
self.assertEqual(f.readline()[23:], " - test_logging - INFO - out\n")
f.close()
def test_boring_logging(self):
"""Check that logging without handlers at least runs without errors."""
set_up_logging(None, 'INFO', False)
if __name__ == '__main__':
unittest.main()
|
Add unittest for logging from apel.common.__init__from cStringIO import StringIO
import logging
import os
import sys
import tempfile
import unittest
from apel.common import set_up_logging
class LoggingTest(unittest.TestCase):
def setUp(self):
# Capture stdout in a StringIO object for inspection.
self._stdout = sys.stdout
sys.stdout = StringIO()
# Create a temporary file to write log to
handle, self.path = tempfile.mkstemp()
os.close(handle)
def tearDown(self):
# Restore stdout to normal.
sys.stdout.close()
sys.stdout = self._stdout
# Delete temporary file
os.remove(self.path)
def test_stdout_logging(self):
"""
Check that logging to stdout and file works, ignoring timestamp.
These are tested together as otherwise the logger setup conflicts.
"""
set_up_logging(self.path, 'INFO', True)
log = logging.getLogger('test_logging')
log.info('out')
# Retrieve output from StringIO object.
output = sys.stdout.getvalue()
# Shutdown logging to release log file for later deletion.
logging.shutdown()
# Only check bit after timestamp as that doesn't change.
self.assertEqual(output[23:], " - test_logging - INFO - out\n")
f = open(self.path)
self.assertEqual(f.readline()[23:], " - test_logging - INFO - out\n")
f.close()
def test_boring_logging(self):
"""Check that logging without handlers at least runs without errors."""
set_up_logging(None, 'INFO', False)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unittest for logging from apel.common.__init__<commit_after>from cStringIO import StringIO
import logging
import os
import sys
import tempfile
import unittest
from apel.common import set_up_logging
class LoggingTest(unittest.TestCase):
def setUp(self):
# Capture stdout in a StringIO object for inspection.
self._stdout = sys.stdout
sys.stdout = StringIO()
# Create a temporary file to write log to
handle, self.path = tempfile.mkstemp()
os.close(handle)
def tearDown(self):
# Restore stdout to normal.
sys.stdout.close()
sys.stdout = self._stdout
# Delete temporary file
os.remove(self.path)
def test_stdout_logging(self):
"""
Check that logging to stdout and file works, ignoring timestamp.
These are tested together as otherwise the logger setup conflicts.
"""
set_up_logging(self.path, 'INFO', True)
log = logging.getLogger('test_logging')
log.info('out')
# Retrieve output from StringIO object.
output = sys.stdout.getvalue()
# Shutdown logging to release log file for later deletion.
logging.shutdown()
# Only check bit after timestamp as that doesn't change.
self.assertEqual(output[23:], " - test_logging - INFO - out\n")
f = open(self.path)
self.assertEqual(f.readline()[23:], " - test_logging - INFO - out\n")
f.close()
def test_boring_logging(self):
"""Check that logging without handlers at least runs without errors."""
set_up_logging(None, 'INFO', False)
if __name__ == '__main__':
unittest.main()
|
|
c0e6979a98251fa2300b2c61f206775ef953f7a8
|
tests/test-basics.py
|
tests/test-basics.py
|
#!/usr/bin/env python
import unittest
import pandas as pd
import pdfplumber
import sys, os
import six
import logging
logging.disable(logging.ERROR)
HERE = os.path.abspath(os.path.dirname(__file__))
class Test(unittest.TestCase):
def setUp(self):
path = os.path.join(HERE, "pdfs/nics-background-checks-2015-11.pdf")
self.pdf = pdfplumber.from_path(path)
def test_metadata(self):
metadata = self.pdf.metadata
assert(isinstance(metadata["Producer"], six.text_type))
def test_pagecount(self):
assert(len(self.pdf.pages) == 1)
|
Add test for very basic things
|
Add test for very basic things
|
Python
|
mit
|
jsvine/pdfplumber
|
Add test for very basic things
|
#!/usr/bin/env python
import unittest
import pandas as pd
import pdfplumber
import sys, os
import six
import logging
logging.disable(logging.ERROR)
HERE = os.path.abspath(os.path.dirname(__file__))
class Test(unittest.TestCase):
def setUp(self):
path = os.path.join(HERE, "pdfs/nics-background-checks-2015-11.pdf")
self.pdf = pdfplumber.from_path(path)
def test_metadata(self):
metadata = self.pdf.metadata
assert(isinstance(metadata["Producer"], six.text_type))
def test_pagecount(self):
assert(len(self.pdf.pages) == 1)
|
<commit_before><commit_msg>Add test for very basic things<commit_after>
|
#!/usr/bin/env python
import unittest
import pandas as pd
import pdfplumber
import sys, os
import six
import logging
logging.disable(logging.ERROR)
HERE = os.path.abspath(os.path.dirname(__file__))
class Test(unittest.TestCase):
def setUp(self):
path = os.path.join(HERE, "pdfs/nics-background-checks-2015-11.pdf")
self.pdf = pdfplumber.from_path(path)
def test_metadata(self):
metadata = self.pdf.metadata
assert(isinstance(metadata["Producer"], six.text_type))
def test_pagecount(self):
assert(len(self.pdf.pages) == 1)
|
Add test for very basic things#!/usr/bin/env python
import unittest
import pandas as pd
import pdfplumber
import sys, os
import six
import logging
logging.disable(logging.ERROR)
HERE = os.path.abspath(os.path.dirname(__file__))
class Test(unittest.TestCase):
def setUp(self):
path = os.path.join(HERE, "pdfs/nics-background-checks-2015-11.pdf")
self.pdf = pdfplumber.from_path(path)
def test_metadata(self):
metadata = self.pdf.metadata
assert(isinstance(metadata["Producer"], six.text_type))
def test_pagecount(self):
assert(len(self.pdf.pages) == 1)
|
<commit_before><commit_msg>Add test for very basic things<commit_after>#!/usr/bin/env python
import unittest
import pandas as pd
import pdfplumber
import sys, os
import six
import logging
logging.disable(logging.ERROR)
HERE = os.path.abspath(os.path.dirname(__file__))
class Test(unittest.TestCase):
def setUp(self):
path = os.path.join(HERE, "pdfs/nics-background-checks-2015-11.pdf")
self.pdf = pdfplumber.from_path(path)
def test_metadata(self):
metadata = self.pdf.metadata
assert(isinstance(metadata["Producer"], six.text_type))
def test_pagecount(self):
assert(len(self.pdf.pages) == 1)
|
|
305a137352ee2a287ded5d39657f91460e474e3b
|
tests/test_search.py
|
tests/test_search.py
|
"""Test the ElasticSearch module"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from pytest import raises
from gobble.search import Contributors, BadSearchKey
def test_validate_bad_query_raises_exception():
with raises(BadSearchKey):
Contributors().validate_query(foo='bar')
def test_validate_good_query_adds_double_quotes():
original = {'name': 'mickey mouse'}
validated = {'name': '"mickey mouse"'}
assert Contributors().validate_query(**original) == validated
|
Add tests for ElasticSearch classes.
|
Add tests for ElasticSearch classes.
|
Python
|
mit
|
openspending/gobble
|
Add tests for ElasticSearch classes.
|
"""Test the ElasticSearch module"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from pytest import raises
from gobble.search import Contributors, BadSearchKey
def test_validate_bad_query_raises_exception():
with raises(BadSearchKey):
Contributors().validate_query(foo='bar')
def test_validate_good_query_adds_double_quotes():
original = {'name': 'mickey mouse'}
validated = {'name': '"mickey mouse"'}
assert Contributors().validate_query(**original) == validated
|
<commit_before><commit_msg>Add tests for ElasticSearch classes.<commit_after>
|
"""Test the ElasticSearch module"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from pytest import raises
from gobble.search import Contributors, BadSearchKey
def test_validate_bad_query_raises_exception():
with raises(BadSearchKey):
Contributors().validate_query(foo='bar')
def test_validate_good_query_adds_double_quotes():
original = {'name': 'mickey mouse'}
validated = {'name': '"mickey mouse"'}
assert Contributors().validate_query(**original) == validated
|
Add tests for ElasticSearch classes."""Test the ElasticSearch module"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from pytest import raises
from gobble.search import Contributors, BadSearchKey
def test_validate_bad_query_raises_exception():
with raises(BadSearchKey):
Contributors().validate_query(foo='bar')
def test_validate_good_query_adds_double_quotes():
original = {'name': 'mickey mouse'}
validated = {'name': '"mickey mouse"'}
assert Contributors().validate_query(**original) == validated
|
<commit_before><commit_msg>Add tests for ElasticSearch classes.<commit_after>"""Test the ElasticSearch module"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from pytest import raises
from gobble.search import Contributors, BadSearchKey
def test_validate_bad_query_raises_exception():
with raises(BadSearchKey):
Contributors().validate_query(foo='bar')
def test_validate_good_query_adds_double_quotes():
original = {'name': 'mickey mouse'}
validated = {'name': '"mickey mouse"'}
assert Contributors().validate_query(**original) == validated
|
|
5981cba4f2bac5b1c29d4901bd0b3b3f523eb8a9
|
tests/test_tshark.py
|
tests/test_tshark.py
|
import mock
from pyshark.tshark.tshark import (
get_tshark_display_filter_flag,
get_tshark_interfaces,
get_tshark_version,
)
@mock.patch('pyshark.tshark.tshark.subprocess.check_output', autospec=True)
def test_get_tshark_version(mock_check_output):
mock_check_output.return_value = (
b'TShark 1.12.1 (Git Rev Unknown from unknown)\n\n'b'Copyright '
b'1998-2014 Gerald Combs <gerald@wireshark.org> and contributors.\n'
)
actual = get_tshark_version()
expected = '1.12.1'
assert actual == expected
@mock.patch('pyshark.tshark.tshark.get_tshark_version', autospec=True)
def test_get_display_filter_flag(mock_get_tshark_version):
mock_get_tshark_version.return_value = '1.10.0'
actual = get_tshark_display_filter_flag()
expected = '-Y'
assert actual == expected
mock_get_tshark_version.return_value = '1.6.0'
actual = get_tshark_display_filter_flag()
expected = '-R'
assert actual == expected
@mock.patch('pyshark.tshark.tshark.subprocess.check_output', autospec=True)
def test_get_tshark_interfaces(mock_check_output):
mock_check_output.return_value = (
b'1. wlan0\n2. any\n3. lo (Loopback)\n4. eth0\n5. docker0\n'
)
actual = get_tshark_interfaces()
expected = ['1', '2', '3', '4', '5']
assert actual == expected
|
Add tests for tshark sub-module
|
Add tests for tshark sub-module
|
Python
|
mit
|
eaufavor/pyshark-ssl,KimiNewt/pyshark
|
Add tests for tshark sub-module
|
import mock
from pyshark.tshark.tshark import (
get_tshark_display_filter_flag,
get_tshark_interfaces,
get_tshark_version,
)
@mock.patch('pyshark.tshark.tshark.subprocess.check_output', autospec=True)
def test_get_tshark_version(mock_check_output):
mock_check_output.return_value = (
b'TShark 1.12.1 (Git Rev Unknown from unknown)\n\n'b'Copyright '
b'1998-2014 Gerald Combs <gerald@wireshark.org> and contributors.\n'
)
actual = get_tshark_version()
expected = '1.12.1'
assert actual == expected
@mock.patch('pyshark.tshark.tshark.get_tshark_version', autospec=True)
def test_get_display_filter_flag(mock_get_tshark_version):
mock_get_tshark_version.return_value = '1.10.0'
actual = get_tshark_display_filter_flag()
expected = '-Y'
assert actual == expected
mock_get_tshark_version.return_value = '1.6.0'
actual = get_tshark_display_filter_flag()
expected = '-R'
assert actual == expected
@mock.patch('pyshark.tshark.tshark.subprocess.check_output', autospec=True)
def test_get_tshark_interfaces(mock_check_output):
mock_check_output.return_value = (
b'1. wlan0\n2. any\n3. lo (Loopback)\n4. eth0\n5. docker0\n'
)
actual = get_tshark_interfaces()
expected = ['1', '2', '3', '4', '5']
assert actual == expected
|
<commit_before><commit_msg>Add tests for tshark sub-module<commit_after>
|
import mock
from pyshark.tshark.tshark import (
get_tshark_display_filter_flag,
get_tshark_interfaces,
get_tshark_version,
)
@mock.patch('pyshark.tshark.tshark.subprocess.check_output', autospec=True)
def test_get_tshark_version(mock_check_output):
mock_check_output.return_value = (
b'TShark 1.12.1 (Git Rev Unknown from unknown)\n\n'b'Copyright '
b'1998-2014 Gerald Combs <gerald@wireshark.org> and contributors.\n'
)
actual = get_tshark_version()
expected = '1.12.1'
assert actual == expected
@mock.patch('pyshark.tshark.tshark.get_tshark_version', autospec=True)
def test_get_display_filter_flag(mock_get_tshark_version):
mock_get_tshark_version.return_value = '1.10.0'
actual = get_tshark_display_filter_flag()
expected = '-Y'
assert actual == expected
mock_get_tshark_version.return_value = '1.6.0'
actual = get_tshark_display_filter_flag()
expected = '-R'
assert actual == expected
@mock.patch('pyshark.tshark.tshark.subprocess.check_output', autospec=True)
def test_get_tshark_interfaces(mock_check_output):
mock_check_output.return_value = (
b'1. wlan0\n2. any\n3. lo (Loopback)\n4. eth0\n5. docker0\n'
)
actual = get_tshark_interfaces()
expected = ['1', '2', '3', '4', '5']
assert actual == expected
|
Add tests for tshark sub-moduleimport mock
from pyshark.tshark.tshark import (
get_tshark_display_filter_flag,
get_tshark_interfaces,
get_tshark_version,
)
@mock.patch('pyshark.tshark.tshark.subprocess.check_output', autospec=True)
def test_get_tshark_version(mock_check_output):
mock_check_output.return_value = (
b'TShark 1.12.1 (Git Rev Unknown from unknown)\n\n'b'Copyright '
b'1998-2014 Gerald Combs <gerald@wireshark.org> and contributors.\n'
)
actual = get_tshark_version()
expected = '1.12.1'
assert actual == expected
@mock.patch('pyshark.tshark.tshark.get_tshark_version', autospec=True)
def test_get_display_filter_flag(mock_get_tshark_version):
mock_get_tshark_version.return_value = '1.10.0'
actual = get_tshark_display_filter_flag()
expected = '-Y'
assert actual == expected
mock_get_tshark_version.return_value = '1.6.0'
actual = get_tshark_display_filter_flag()
expected = '-R'
assert actual == expected
@mock.patch('pyshark.tshark.tshark.subprocess.check_output', autospec=True)
def test_get_tshark_interfaces(mock_check_output):
mock_check_output.return_value = (
b'1. wlan0\n2. any\n3. lo (Loopback)\n4. eth0\n5. docker0\n'
)
actual = get_tshark_interfaces()
expected = ['1', '2', '3', '4', '5']
assert actual == expected
|
<commit_before><commit_msg>Add tests for tshark sub-module<commit_after>import mock
from pyshark.tshark.tshark import (
get_tshark_display_filter_flag,
get_tshark_interfaces,
get_tshark_version,
)
@mock.patch('pyshark.tshark.tshark.subprocess.check_output', autospec=True)
def test_get_tshark_version(mock_check_output):
mock_check_output.return_value = (
b'TShark 1.12.1 (Git Rev Unknown from unknown)\n\n'b'Copyright '
b'1998-2014 Gerald Combs <gerald@wireshark.org> and contributors.\n'
)
actual = get_tshark_version()
expected = '1.12.1'
assert actual == expected
@mock.patch('pyshark.tshark.tshark.get_tshark_version', autospec=True)
def test_get_display_filter_flag(mock_get_tshark_version):
mock_get_tshark_version.return_value = '1.10.0'
actual = get_tshark_display_filter_flag()
expected = '-Y'
assert actual == expected
mock_get_tshark_version.return_value = '1.6.0'
actual = get_tshark_display_filter_flag()
expected = '-R'
assert actual == expected
@mock.patch('pyshark.tshark.tshark.subprocess.check_output', autospec=True)
def test_get_tshark_interfaces(mock_check_output):
mock_check_output.return_value = (
b'1. wlan0\n2. any\n3. lo (Loopback)\n4. eth0\n5. docker0\n'
)
actual = get_tshark_interfaces()
expected = ['1', '2', '3', '4', '5']
assert actual == expected
|
|
c637bb057698b2699c9515ee9aed8432d7c51fe7
|
projects/generate.py
|
projects/generate.py
|
import os
import subprocess
import sys
# get the path to the directory containing the script
projects_directory = os.path.dirname(os.path.realpath(__file__))
platform_name = sys.platform
platform_folder = ""
if platform_name == 'win32':
platform_folder = "win"
elif platform_name == 'darwin':
platform_folder = "mac"
else:
print("Unsupported platform %s." % platform_name)
sys.exit(-1)
# create the platform specific project folder if it does not exist
platform_project_directory = os.path.join(projects_directory, platform_folder)
if not os.path.exists(platform_project_directory):
os.makedirs(platform_project_directory)
# run CMake from within the platform specific folder
os.chdir(platform_project_directory)
result = subprocess.run("cmake ../")
if not result.returncode == 0:
sys.exit(result.returncode)
|
Add Python project generation script.
|
Add Python project generation script.
|
Python
|
mit
|
chrisculy/Divida,chrisculy/Divida
|
Add Python project generation script.
|
import os
import subprocess
import sys
# get the path to the directory containing the script
projects_directory = os.path.dirname(os.path.realpath(__file__))
platform_name = sys.platform
platform_folder = ""
if platform_name == 'win32':
platform_folder = "win"
elif platform_name == 'darwin':
platform_folder = "mac"
else:
print("Unsupported platform %s." % platform_name)
sys.exit(-1)
# create the platform specific project folder if it does not exist
platform_project_directory = os.path.join(projects_directory, platform_folder)
if not os.path.exists(platform_project_directory):
os.makedirs(platform_project_directory)
# run CMake from within the platform specific folder
os.chdir(platform_project_directory)
result = subprocess.run("cmake ../")
if not result.returncode == 0:
sys.exit(result.returncode)
|
<commit_before><commit_msg>Add Python project generation script.<commit_after>
|
import os
import subprocess
import sys
# get the path to the directory containing the script
projects_directory = os.path.dirname(os.path.realpath(__file__))
platform_name = sys.platform
platform_folder = ""
if platform_name == 'win32':
platform_folder = "win"
elif platform_name == 'darwin':
platform_folder = "mac"
else:
print("Unsupported platform %s." % platform_name)
sys.exit(-1)
# create the platform specific project folder if it does not exist
platform_project_directory = os.path.join(projects_directory, platform_folder)
if not os.path.exists(platform_project_directory):
os.makedirs(platform_project_directory)
# run CMake from within the platform specific folder
os.chdir(platform_project_directory)
result = subprocess.run("cmake ../")
if not result.returncode == 0:
sys.exit(result.returncode)
|
Add Python project generation script.import os
import subprocess
import sys
# get the path to the directory containing the script
projects_directory = os.path.dirname(os.path.realpath(__file__))
platform_name = sys.platform
platform_folder = ""
if platform_name == 'win32':
platform_folder = "win"
elif platform_name == 'darwin':
platform_folder = "mac"
else:
print("Unsupported platform %s." % platform_name)
sys.exit(-1)
# create the platform specific project folder if it does not exist
platform_project_directory = os.path.join(projects_directory, platform_folder)
if not os.path.exists(platform_project_directory):
os.makedirs(platform_project_directory)
# run CMake from within the platform specific folder
os.chdir(platform_project_directory)
result = subprocess.run("cmake ../")
if not result.returncode == 0:
sys.exit(result.returncode)
|
<commit_before><commit_msg>Add Python project generation script.<commit_after>import os
import subprocess
import sys
# get the path to the directory containing the script
projects_directory = os.path.dirname(os.path.realpath(__file__))
platform_name = sys.platform
platform_folder = ""
if platform_name == 'win32':
platform_folder = "win"
elif platform_name == 'darwin':
platform_folder = "mac"
else:
print("Unsupported platform %s." % platform_name)
sys.exit(-1)
# create the platform specific project folder if it does not exist
platform_project_directory = os.path.join(projects_directory, platform_folder)
if not os.path.exists(platform_project_directory):
os.makedirs(platform_project_directory)
# run CMake from within the platform specific folder
os.chdir(platform_project_directory)
result = subprocess.run("cmake ../")
if not result.returncode == 0:
sys.exit(result.returncode)
|
|
ef204d9455070d3ebff2890813e98ca44d398212
|
client/bootloader_run_application.py
|
client/bootloader_run_application.py
|
#!/usr/bin/env python
import utils
import commands
def parse_commandline_args():
parser = utils.ConnectionArgumentParser(description='Send a jump to application command.')
parser.add_argument("ids", metavar='DEVICEID', nargs='*', type=int,
help="Device IDs to send jump command")
parser.add_argument('-a', '--all', help="Try to scan all network.",
action='store_true')
return parser.parse_args()
def main():
args = parse_commandline_args()
connection = utils.open_connection(args)
if args.all is True:
ids = list(range(1, 128))
else:
ids = args.ids
utils.write_command(connection, commands.encode_jump_to_main(), ids)
if __name__ == "__main__":
main()
|
Add tool to send a jump to application command.
|
Add tool to send a jump to application command.
|
Python
|
bsd-2-clause
|
cvra/can-bootloader,cvra/can-bootloader,cvra/can-bootloader,cvra/can-bootloader
|
Add tool to send a jump to application command.
|
#!/usr/bin/env python
import utils
import commands
def parse_commandline_args():
parser = utils.ConnectionArgumentParser(description='Send a jump to application command.')
parser.add_argument("ids", metavar='DEVICEID', nargs='*', type=int,
help="Device IDs to send jump command")
parser.add_argument('-a', '--all', help="Try to scan all network.",
action='store_true')
return parser.parse_args()
def main():
args = parse_commandline_args()
connection = utils.open_connection(args)
if args.all is True:
ids = list(range(1, 128))
else:
ids = args.ids
utils.write_command(connection, commands.encode_jump_to_main(), ids)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add tool to send a jump to application command.<commit_after>
|
#!/usr/bin/env python
import utils
import commands
def parse_commandline_args():
parser = utils.ConnectionArgumentParser(description='Send a jump to application command.')
parser.add_argument("ids", metavar='DEVICEID', nargs='*', type=int,
help="Device IDs to send jump command")
parser.add_argument('-a', '--all', help="Try to scan all network.",
action='store_true')
return parser.parse_args()
def main():
args = parse_commandline_args()
connection = utils.open_connection(args)
if args.all is True:
ids = list(range(1, 128))
else:
ids = args.ids
utils.write_command(connection, commands.encode_jump_to_main(), ids)
if __name__ == "__main__":
main()
|
Add tool to send a jump to application command.#!/usr/bin/env python
import utils
import commands
def parse_commandline_args():
parser = utils.ConnectionArgumentParser(description='Send a jump to application command.')
parser.add_argument("ids", metavar='DEVICEID', nargs='*', type=int,
help="Device IDs to send jump command")
parser.add_argument('-a', '--all', help="Try to scan all network.",
action='store_true')
return parser.parse_args()
def main():
args = parse_commandline_args()
connection = utils.open_connection(args)
if args.all is True:
ids = list(range(1, 128))
else:
ids = args.ids
utils.write_command(connection, commands.encode_jump_to_main(), ids)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add tool to send a jump to application command.<commit_after>#!/usr/bin/env python
import utils
import commands
def parse_commandline_args():
parser = utils.ConnectionArgumentParser(description='Send a jump to application command.')
parser.add_argument("ids", metavar='DEVICEID', nargs='*', type=int,
help="Device IDs to send jump command")
parser.add_argument('-a', '--all', help="Try to scan all network.",
action='store_true')
return parser.parse_args()
def main():
args = parse_commandline_args()
connection = utils.open_connection(args)
if args.all is True:
ids = list(range(1, 128))
else:
ids = args.ids
utils.write_command(connection, commands.encode_jump_to_main(), ids)
if __name__ == "__main__":
main()
|
|
7c292b1577420d74fd72cbc4eef3d5364ba01bb2
|
backend/django/apps/accounts/migrations/0001_initial.py
|
backend/django/apps/accounts/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-11 19:06
from __future__ import unicode_literals
import apps.accounts.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='AbstractAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(max_length=50, null=True)),
('last_name', models.CharField(max_length=50, null=True)),
('email', models.EmailField(max_length=254, unique=True)),
('telephone', models.CharField(blank=True, max_length=20, null=True)),
('address', models.TextField(blank=True, max_length=100, null=True)),
('registered_at', models.DateTimeField(default=django.utils.timezone.now)),
('last_activity_at', models.DateTimeField(auto_now=True)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_accounts.abstractaccount_set+', to='contenttypes.ContentType')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
managers=[
('objects', apps.accounts.models.AccountManager()),
],
),
]
|
Make the initial account migrations
|
Make the initial account migrations
|
Python
|
mit
|
slavpetroff/sweetshop,slavpetroff/sweetshop
|
Make the initial account migrations
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-11 19:06
from __future__ import unicode_literals
import apps.accounts.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='AbstractAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(max_length=50, null=True)),
('last_name', models.CharField(max_length=50, null=True)),
('email', models.EmailField(max_length=254, unique=True)),
('telephone', models.CharField(blank=True, max_length=20, null=True)),
('address', models.TextField(blank=True, max_length=100, null=True)),
('registered_at', models.DateTimeField(default=django.utils.timezone.now)),
('last_activity_at', models.DateTimeField(auto_now=True)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_accounts.abstractaccount_set+', to='contenttypes.ContentType')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
managers=[
('objects', apps.accounts.models.AccountManager()),
],
),
]
|
<commit_before><commit_msg>Make the initial account migrations<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-11 19:06
from __future__ import unicode_literals
import apps.accounts.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='AbstractAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(max_length=50, null=True)),
('last_name', models.CharField(max_length=50, null=True)),
('email', models.EmailField(max_length=254, unique=True)),
('telephone', models.CharField(blank=True, max_length=20, null=True)),
('address', models.TextField(blank=True, max_length=100, null=True)),
('registered_at', models.DateTimeField(default=django.utils.timezone.now)),
('last_activity_at', models.DateTimeField(auto_now=True)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_accounts.abstractaccount_set+', to='contenttypes.ContentType')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
managers=[
('objects', apps.accounts.models.AccountManager()),
],
),
]
|
Make the initial account migrations# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-11 19:06
from __future__ import unicode_literals
import apps.accounts.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='AbstractAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(max_length=50, null=True)),
('last_name', models.CharField(max_length=50, null=True)),
('email', models.EmailField(max_length=254, unique=True)),
('telephone', models.CharField(blank=True, max_length=20, null=True)),
('address', models.TextField(blank=True, max_length=100, null=True)),
('registered_at', models.DateTimeField(default=django.utils.timezone.now)),
('last_activity_at', models.DateTimeField(auto_now=True)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_accounts.abstractaccount_set+', to='contenttypes.ContentType')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
managers=[
('objects', apps.accounts.models.AccountManager()),
],
),
]
|
<commit_before><commit_msg>Make the initial account migrations<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-11 19:06
from __future__ import unicode_literals
import apps.accounts.models
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='AbstractAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(max_length=50, null=True)),
('last_name', models.CharField(max_length=50, null=True)),
('email', models.EmailField(max_length=254, unique=True)),
('telephone', models.CharField(blank=True, max_length=20, null=True)),
('address', models.TextField(blank=True, max_length=100, null=True)),
('registered_at', models.DateTimeField(default=django.utils.timezone.now)),
('last_activity_at', models.DateTimeField(auto_now=True)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('polymorphic_ctype', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='polymorphic_accounts.abstractaccount_set+', to='contenttypes.ContentType')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
managers=[
('objects', apps.accounts.models.AccountManager()),
],
),
]
|
|
3c9ef6d42bb45d147d0c6093ea134d94fc4cf29f
|
penn/libraries.py
|
penn/libraries.py
|
"""A module for consuming the Penn Libraries API"""
import requests
BASE_URL = "http://dla.library.upenn.edu/2.0.0/search"
def search(query):
"""Search Penn Libraries Franklin for documents
The maximum pagesize currently is 50.
"""
params = {
's.cmd': 'setTextQuery(%s)setPageSize(50)setHoldingsOnly(true)' % query
}
return requests.get(BASE_URL, params=params, timeout=10).json()
|
Add primitive library search support
|
Add primitive library search support
|
Python
|
mit
|
pennlabs/penn-sdk-python,pennlabs/penn-sdk-python
|
Add primitive library search support
|
"""A module for consuming the Penn Libraries API"""
import requests
BASE_URL = "http://dla.library.upenn.edu/2.0.0/search"
def search(query):
"""Search Penn Libraries Franklin for documents
The maximum pagesize currently is 50.
"""
params = {
's.cmd': 'setTextQuery(%s)setPageSize(50)setHoldingsOnly(true)' % query
}
return requests.get(BASE_URL, params=params, timeout=10).json()
|
<commit_before><commit_msg>Add primitive library search support<commit_after>
|
"""A module for consuming the Penn Libraries API"""
import requests
BASE_URL = "http://dla.library.upenn.edu/2.0.0/search"
def search(query):
"""Search Penn Libraries Franklin for documents
The maximum pagesize currently is 50.
"""
params = {
's.cmd': 'setTextQuery(%s)setPageSize(50)setHoldingsOnly(true)' % query
}
return requests.get(BASE_URL, params=params, timeout=10).json()
|
Add primitive library search support"""A module for consuming the Penn Libraries API"""
import requests
BASE_URL = "http://dla.library.upenn.edu/2.0.0/search"
def search(query):
"""Search Penn Libraries Franklin for documents
The maximum pagesize currently is 50.
"""
params = {
's.cmd': 'setTextQuery(%s)setPageSize(50)setHoldingsOnly(true)' % query
}
return requests.get(BASE_URL, params=params, timeout=10).json()
|
<commit_before><commit_msg>Add primitive library search support<commit_after>"""A module for consuming the Penn Libraries API"""
import requests
BASE_URL = "http://dla.library.upenn.edu/2.0.0/search"
def search(query):
"""Search Penn Libraries Franklin for documents
The maximum pagesize currently is 50.
"""
params = {
's.cmd': 'setTextQuery(%s)setPageSize(50)setHoldingsOnly(true)' % query
}
return requests.get(BASE_URL, params=params, timeout=10).json()
|
|
8290df4317b0aae2c15b4a82942413b84f754e84
|
cla_public/apps/base/tests/test_locale.py
|
cla_public/apps/base/tests/test_locale.py
|
import unittest
from cla_public.apps.base.views import set_locale
from cla_public.app import create_app
from flask import request
class LocaleTest(unittest.TestCase):
def setUp(self):
self.app = create_app('config/testing.py')
ctx = self.app.test_request_context()
ctx.push()
self.client = self.app.test_client()
def test_locale_cookie_is_set(self):
with self.app.test_client() as client:
response = client.get('/?locale=en_GB')
self.check_cookie(response, 'locale', 'en_GB')
def check_cookie(self, response, name, value):
# Checks for existence of a cookie and verifies the value of it.
from werkzeug.http import parse_cookie
cookies = response.headers.getlist('Set-Cookie')
for cookie in cookies:
c_key, c_value = parse_cookie(cookie).items()[0]
if c_key == name:
assert c_value == value
return
# Cookie not found
assert False
|
Test locale is set based on query param
|
Test locale is set based on query param
|
Python
|
mit
|
ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public,ministryofjustice/cla_public
|
Test locale is set based on query param
|
import unittest
from cla_public.apps.base.views import set_locale
from cla_public.app import create_app
from flask import request
class LocaleTest(unittest.TestCase):
def setUp(self):
self.app = create_app('config/testing.py')
ctx = self.app.test_request_context()
ctx.push()
self.client = self.app.test_client()
def test_locale_cookie_is_set(self):
with self.app.test_client() as client:
response = client.get('/?locale=en_GB')
self.check_cookie(response, 'locale', 'en_GB')
def check_cookie(self, response, name, value):
# Checks for existence of a cookie and verifies the value of it.
from werkzeug.http import parse_cookie
cookies = response.headers.getlist('Set-Cookie')
for cookie in cookies:
c_key, c_value = parse_cookie(cookie).items()[0]
if c_key == name:
assert c_value == value
return
# Cookie not found
assert False
|
<commit_before><commit_msg>Test locale is set based on query param<commit_after>
|
import unittest
from cla_public.apps.base.views import set_locale
from cla_public.app import create_app
from flask import request
class LocaleTest(unittest.TestCase):
def setUp(self):
self.app = create_app('config/testing.py')
ctx = self.app.test_request_context()
ctx.push()
self.client = self.app.test_client()
def test_locale_cookie_is_set(self):
with self.app.test_client() as client:
response = client.get('/?locale=en_GB')
self.check_cookie(response, 'locale', 'en_GB')
def check_cookie(self, response, name, value):
# Checks for existence of a cookie and verifies the value of it.
from werkzeug.http import parse_cookie
cookies = response.headers.getlist('Set-Cookie')
for cookie in cookies:
c_key, c_value = parse_cookie(cookie).items()[0]
if c_key == name:
assert c_value == value
return
# Cookie not found
assert False
|
Test locale is set based on query paramimport unittest
from cla_public.apps.base.views import set_locale
from cla_public.app import create_app
from flask import request
class LocaleTest(unittest.TestCase):
def setUp(self):
self.app = create_app('config/testing.py')
ctx = self.app.test_request_context()
ctx.push()
self.client = self.app.test_client()
def test_locale_cookie_is_set(self):
with self.app.test_client() as client:
response = client.get('/?locale=en_GB')
self.check_cookie(response, 'locale', 'en_GB')
def check_cookie(self, response, name, value):
# Checks for existence of a cookie and verifies the value of it.
from werkzeug.http import parse_cookie
cookies = response.headers.getlist('Set-Cookie')
for cookie in cookies:
c_key, c_value = parse_cookie(cookie).items()[0]
if c_key == name:
assert c_value == value
return
# Cookie not found
assert False
|
<commit_before><commit_msg>Test locale is set based on query param<commit_after>import unittest
from cla_public.apps.base.views import set_locale
from cla_public.app import create_app
from flask import request
class LocaleTest(unittest.TestCase):
def setUp(self):
self.app = create_app('config/testing.py')
ctx = self.app.test_request_context()
ctx.push()
self.client = self.app.test_client()
def test_locale_cookie_is_set(self):
with self.app.test_client() as client:
response = client.get('/?locale=en_GB')
self.check_cookie(response, 'locale', 'en_GB')
def check_cookie(self, response, name, value):
# Checks for existence of a cookie and verifies the value of it.
from werkzeug.http import parse_cookie
cookies = response.headers.getlist('Set-Cookie')
for cookie in cookies:
c_key, c_value = parse_cookie(cookie).items()[0]
if c_key == name:
assert c_value == value
return
# Cookie not found
assert False
|
|
844fd2533063343c969d9ff9259c80605fe488dd
|
hiicart/forms.py
|
hiicart/forms.py
|
from django import forms
class PaymentFormBase(forms.Form):
def __getitem__(self, key):
payment_fields = self._get_payment_fields()
if key in payment_fields:
key = payment_fields[key]
return super(PaymentFormBase, self).__getitem__(key)
def _get_payment_fields(self):
raise NotImplementedError()
def set_result(self, result):
"""
Set result from payment gateway for form validation.
"""
raise NotImplementedError()
|
Apply refactoring done to authorize.net form/gateway to braintree.
|
Apply refactoring done to authorize.net form/gateway to braintree.
|
Python
|
mit
|
hiidef/hiicart,hiidef/hiicart
|
Apply refactoring done to authorize.net form/gateway to braintree.
|
from django import forms
class PaymentFormBase(forms.Form):
def __getitem__(self, key):
payment_fields = self._get_payment_fields()
if key in payment_fields:
key = payment_fields[key]
return super(PaymentFormBase, self).__getitem__(key)
def _get_payment_fields(self):
raise NotImplementedError()
def set_result(self, result):
"""
Set result from payment gateway for form validation.
"""
raise NotImplementedError()
|
<commit_before><commit_msg>Apply refactoring done to authorize.net form/gateway to braintree.<commit_after>
|
from django import forms
class PaymentFormBase(forms.Form):
def __getitem__(self, key):
payment_fields = self._get_payment_fields()
if key in payment_fields:
key = payment_fields[key]
return super(PaymentFormBase, self).__getitem__(key)
def _get_payment_fields(self):
raise NotImplementedError()
def set_result(self, result):
"""
Set result from payment gateway for form validation.
"""
raise NotImplementedError()
|
Apply refactoring done to authorize.net form/gateway to braintree.from django import forms
class PaymentFormBase(forms.Form):
def __getitem__(self, key):
payment_fields = self._get_payment_fields()
if key in payment_fields:
key = payment_fields[key]
return super(PaymentFormBase, self).__getitem__(key)
def _get_payment_fields(self):
raise NotImplementedError()
def set_result(self, result):
"""
Set result from payment gateway for form validation.
"""
raise NotImplementedError()
|
<commit_before><commit_msg>Apply refactoring done to authorize.net form/gateway to braintree.<commit_after>from django import forms
class PaymentFormBase(forms.Form):
def __getitem__(self, key):
payment_fields = self._get_payment_fields()
if key in payment_fields:
key = payment_fields[key]
return super(PaymentFormBase, self).__getitem__(key)
def _get_payment_fields(self):
raise NotImplementedError()
def set_result(self, result):
"""
Set result from payment gateway for form validation.
"""
raise NotImplementedError()
|
|
8a07ebf3db3cd2b70534c92e90d5428444e3d8f1
|
bookwormDB/bin/dbbindings-flask.py
|
bookwormDB/bin/dbbindings-flask.py
|
#!/usr/bin/env python
#So we load in the terms that allow the API implementation to happen for now.
from bookwormDB.general_API import SQLAPIcall as SQLAPIcall
from flask import Flask, request, Response
import json
import os
app = Flask(__name__)
@app.route('/')
def index():
JSONinput = request.args.get('queryTerms') or request.args.get('query')
if not JSONinput:
return "Need query or queryTerms argument"
return main(JSONinput)
@app.route('/debug/query')
def debug_query():
JSONinput = request.args.get('queryTerms') or request.args.get('query')
return JSONinput
def main(JSONinput):
query = json.loads(JSONinput)
p = SQLAPIcall(query)
result = p.execute()
resp = Response(result)
if query['method'] == "return_tsv":
resp.headers['Content-Type'] = "text; charset=utf-8"
resp.headers["Content-Disposition"] = "filename=Bookworm-data.txt"
resp.headers["Pragma"] = "no-cache"
resp.headers["Expires"] = 0
else:
resp.headers['Content-Type'] = "text/html"
return resp
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8080))
app.run(host='0.0.0.0', port=port, debug=False)
|
Add a python-only alternative to dbbindings.py
|
Add a python-only alternative to dbbindings.py
Pushing a small web app that can return Bookworm API calls through Python, rather than Apache HTTP Server. It is currently a very simple porting of the Apache code that runs on port 8080, and is intended for minimal installs.
|
Python
|
mit
|
Bookworm-project/BookwormDB,Bookworm-project/BookwormDB
|
Add a python-only alternative to dbbindings.py
Pushing a small web app that can return Bookworm API calls through Python, rather than Apache HTTP Server. It is currently a very simple porting of the Apache code that runs on port 8080, and is intended for minimal installs.
|
#!/usr/bin/env python
#So we load in the terms that allow the API implementation to happen for now.
from bookwormDB.general_API import SQLAPIcall as SQLAPIcall
from flask import Flask, request, Response
import json
import os
app = Flask(__name__)
@app.route('/')
def index():
JSONinput = request.args.get('queryTerms') or request.args.get('query')
if not JSONinput:
return "Need query or queryTerms argument"
return main(JSONinput)
@app.route('/debug/query')
def debug_query():
JSONinput = request.args.get('queryTerms') or request.args.get('query')
return JSONinput
def main(JSONinput):
query = json.loads(JSONinput)
p = SQLAPIcall(query)
result = p.execute()
resp = Response(result)
if query['method'] == "return_tsv":
resp.headers['Content-Type'] = "text; charset=utf-8"
resp.headers["Content-Disposition"] = "filename=Bookworm-data.txt"
resp.headers["Pragma"] = "no-cache"
resp.headers["Expires"] = 0
else:
resp.headers['Content-Type'] = "text/html"
return resp
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8080))
app.run(host='0.0.0.0', port=port, debug=False)
|
<commit_before><commit_msg>Add a python-only alternative to dbbindings.py
Pushing a small web app that can return Bookworm API calls through Python, rather than Apache HTTP Server. It is currently a very simple porting of the Apache code that runs on port 8080, and is intended for minimal installs.<commit_after>
|
#!/usr/bin/env python
#So we load in the terms that allow the API implementation to happen for now.
from bookwormDB.general_API import SQLAPIcall as SQLAPIcall
from flask import Flask, request, Response
import json
import os
app = Flask(__name__)
@app.route('/')
def index():
JSONinput = request.args.get('queryTerms') or request.args.get('query')
if not JSONinput:
return "Need query or queryTerms argument"
return main(JSONinput)
@app.route('/debug/query')
def debug_query():
JSONinput = request.args.get('queryTerms') or request.args.get('query')
return JSONinput
def main(JSONinput):
query = json.loads(JSONinput)
p = SQLAPIcall(query)
result = p.execute()
resp = Response(result)
if query['method'] == "return_tsv":
resp.headers['Content-Type'] = "text; charset=utf-8"
resp.headers["Content-Disposition"] = "filename=Bookworm-data.txt"
resp.headers["Pragma"] = "no-cache"
resp.headers["Expires"] = 0
else:
resp.headers['Content-Type'] = "text/html"
return resp
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8080))
app.run(host='0.0.0.0', port=port, debug=False)
|
Add a python-only alternative to dbbindings.py
Pushing a small web app that can return Bookworm API calls through Python, rather than Apache HTTP Server. It is currently a very simple porting of the Apache code that runs on port 8080, and is intended for minimal installs.#!/usr/bin/env python
#So we load in the terms that allow the API implementation to happen for now.
from bookwormDB.general_API import SQLAPIcall as SQLAPIcall
from flask import Flask, request, Response
import json
import os
app = Flask(__name__)
@app.route('/')
def index():
JSONinput = request.args.get('queryTerms') or request.args.get('query')
if not JSONinput:
return "Need query or queryTerms argument"
return main(JSONinput)
@app.route('/debug/query')
def debug_query():
JSONinput = request.args.get('queryTerms') or request.args.get('query')
return JSONinput
def main(JSONinput):
query = json.loads(JSONinput)
p = SQLAPIcall(query)
result = p.execute()
resp = Response(result)
if query['method'] == "return_tsv":
resp.headers['Content-Type'] = "text; charset=utf-8"
resp.headers["Content-Disposition"] = "filename=Bookworm-data.txt"
resp.headers["Pragma"] = "no-cache"
resp.headers["Expires"] = 0
else:
resp.headers['Content-Type'] = "text/html"
return resp
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8080))
app.run(host='0.0.0.0', port=port, debug=False)
|
<commit_before><commit_msg>Add a python-only alternative to dbbindings.py
Pushing a small web app that can return Bookworm API calls through Python, rather than Apache HTTP Server. It is currently a very simple porting of the Apache code that runs on port 8080, and is intended for minimal installs.<commit_after>#!/usr/bin/env python
#So we load in the terms that allow the API implementation to happen for now.
from bookwormDB.general_API import SQLAPIcall as SQLAPIcall
from flask import Flask, request, Response
import json
import os
app = Flask(__name__)
@app.route('/')
def index():
JSONinput = request.args.get('queryTerms') or request.args.get('query')
if not JSONinput:
return "Need query or queryTerms argument"
return main(JSONinput)
@app.route('/debug/query')
def debug_query():
JSONinput = request.args.get('queryTerms') or request.args.get('query')
return JSONinput
def main(JSONinput):
query = json.loads(JSONinput)
p = SQLAPIcall(query)
result = p.execute()
resp = Response(result)
if query['method'] == "return_tsv":
resp.headers['Content-Type'] = "text; charset=utf-8"
resp.headers["Content-Disposition"] = "filename=Bookworm-data.txt"
resp.headers["Pragma"] = "no-cache"
resp.headers["Expires"] = 0
else:
resp.headers['Content-Type'] = "text/html"
return resp
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8080))
app.run(host='0.0.0.0', port=port, debug=False)
|
|
ea0ff4ae04907473e2af6e7629ea9d73a34f4d85
|
graphysio/plotwidgets/poiselector.py
|
graphysio/plotwidgets/poiselector.py
|
from functools import partial
import numpy as np
import pandas as pd
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from graphysio.plotwidgets.plotwidget import PlotWidget
from graphysio.plotwidgets.tsplot import CurveItemWithFeet
class POISelectorWidget(PlotWidget):
@staticmethod
def mouseMoved(self, evt):
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if self.sceneBoundingRect().contains(pos):
mousePoint = self.getViewBox().mapSceneToView(pos)
index = int(mousePoint.x())
self.vLine.setPos(mousePoint.x())
@staticmethod
def clicked(self, evt):
pos = self.vLine.value()
b = evt.button()
print(b, pos)
def __init__(self, series, parent=None):
super().__init__(parent=parent, CurveClass=CurveItemWithFeet)
self.curve = self.addSeriesAsCurve(series)
pen = pg.mkPen('k', width=2)
self.vLine = pg.InfiniteLine(angle=90, movable=False, pen=pen)
self.addItem(self.vLine, ignoreBounds=True)
mouseMoved = partial(self.mouseMoved, self)
self.sigproxy = pg.SignalProxy(self.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved)
clicked = partial(self.clicked, self)
self.scene().sigMouseClicked.connect(clicked)
# def keyPressEvent(self, event):
# if event.key() == QtCore.Qt.Key_Delete:
# for curve in self.curves.values():
# curve.feetitem.removeSelection()
|
Add sketch for POI selector widget
|
Add sketch for POI selector widget
|
Python
|
isc
|
jaj42/dyngraph,jaj42/GraPhysio,jaj42/GraPhysio
|
Add sketch for POI selector widget
|
from functools import partial
import numpy as np
import pandas as pd
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from graphysio.plotwidgets.plotwidget import PlotWidget
from graphysio.plotwidgets.tsplot import CurveItemWithFeet
class POISelectorWidget(PlotWidget):
@staticmethod
def mouseMoved(self, evt):
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if self.sceneBoundingRect().contains(pos):
mousePoint = self.getViewBox().mapSceneToView(pos)
index = int(mousePoint.x())
self.vLine.setPos(mousePoint.x())
@staticmethod
def clicked(self, evt):
pos = self.vLine.value()
b = evt.button()
print(b, pos)
def __init__(self, series, parent=None):
super().__init__(parent=parent, CurveClass=CurveItemWithFeet)
self.curve = self.addSeriesAsCurve(series)
pen = pg.mkPen('k', width=2)
self.vLine = pg.InfiniteLine(angle=90, movable=False, pen=pen)
self.addItem(self.vLine, ignoreBounds=True)
mouseMoved = partial(self.mouseMoved, self)
self.sigproxy = pg.SignalProxy(self.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved)
clicked = partial(self.clicked, self)
self.scene().sigMouseClicked.connect(clicked)
# def keyPressEvent(self, event):
# if event.key() == QtCore.Qt.Key_Delete:
# for curve in self.curves.values():
# curve.feetitem.removeSelection()
|
<commit_before><commit_msg>Add sketch for POI selector widget<commit_after>
|
from functools import partial
import numpy as np
import pandas as pd
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from graphysio.plotwidgets.plotwidget import PlotWidget
from graphysio.plotwidgets.tsplot import CurveItemWithFeet
class POISelectorWidget(PlotWidget):
@staticmethod
def mouseMoved(self, evt):
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if self.sceneBoundingRect().contains(pos):
mousePoint = self.getViewBox().mapSceneToView(pos)
index = int(mousePoint.x())
self.vLine.setPos(mousePoint.x())
@staticmethod
def clicked(self, evt):
pos = self.vLine.value()
b = evt.button()
print(b, pos)
def __init__(self, series, parent=None):
super().__init__(parent=parent, CurveClass=CurveItemWithFeet)
self.curve = self.addSeriesAsCurve(series)
pen = pg.mkPen('k', width=2)
self.vLine = pg.InfiniteLine(angle=90, movable=False, pen=pen)
self.addItem(self.vLine, ignoreBounds=True)
mouseMoved = partial(self.mouseMoved, self)
self.sigproxy = pg.SignalProxy(self.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved)
clicked = partial(self.clicked, self)
self.scene().sigMouseClicked.connect(clicked)
# def keyPressEvent(self, event):
# if event.key() == QtCore.Qt.Key_Delete:
# for curve in self.curves.values():
# curve.feetitem.removeSelection()
|
Add sketch for POI selector widgetfrom functools import partial
import numpy as np
import pandas as pd
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from graphysio.plotwidgets.plotwidget import PlotWidget
from graphysio.plotwidgets.tsplot import CurveItemWithFeet
class POISelectorWidget(PlotWidget):
@staticmethod
def mouseMoved(self, evt):
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if self.sceneBoundingRect().contains(pos):
mousePoint = self.getViewBox().mapSceneToView(pos)
index = int(mousePoint.x())
self.vLine.setPos(mousePoint.x())
@staticmethod
def clicked(self, evt):
pos = self.vLine.value()
b = evt.button()
print(b, pos)
def __init__(self, series, parent=None):
super().__init__(parent=parent, CurveClass=CurveItemWithFeet)
self.curve = self.addSeriesAsCurve(series)
pen = pg.mkPen('k', width=2)
self.vLine = pg.InfiniteLine(angle=90, movable=False, pen=pen)
self.addItem(self.vLine, ignoreBounds=True)
mouseMoved = partial(self.mouseMoved, self)
self.sigproxy = pg.SignalProxy(self.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved)
clicked = partial(self.clicked, self)
self.scene().sigMouseClicked.connect(clicked)
# def keyPressEvent(self, event):
# if event.key() == QtCore.Qt.Key_Delete:
# for curve in self.curves.values():
# curve.feetitem.removeSelection()
|
<commit_before><commit_msg>Add sketch for POI selector widget<commit_after>from functools import partial
import numpy as np
import pandas as pd
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
from graphysio.plotwidgets.plotwidget import PlotWidget
from graphysio.plotwidgets.tsplot import CurveItemWithFeet
class POISelectorWidget(PlotWidget):
@staticmethod
def mouseMoved(self, evt):
pos = evt[0] ## using signal proxy turns original arguments into a tuple
if self.sceneBoundingRect().contains(pos):
mousePoint = self.getViewBox().mapSceneToView(pos)
index = int(mousePoint.x())
self.vLine.setPos(mousePoint.x())
@staticmethod
def clicked(self, evt):
pos = self.vLine.value()
b = evt.button()
print(b, pos)
def __init__(self, series, parent=None):
super().__init__(parent=parent, CurveClass=CurveItemWithFeet)
self.curve = self.addSeriesAsCurve(series)
pen = pg.mkPen('k', width=2)
self.vLine = pg.InfiniteLine(angle=90, movable=False, pen=pen)
self.addItem(self.vLine, ignoreBounds=True)
mouseMoved = partial(self.mouseMoved, self)
self.sigproxy = pg.SignalProxy(self.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved)
clicked = partial(self.clicked, self)
self.scene().sigMouseClicked.connect(clicked)
# def keyPressEvent(self, event):
# if event.key() == QtCore.Qt.Key_Delete:
# for curve in self.curves.values():
# curve.feetitem.removeSelection()
|
|
3bcb4fd5fd9680d33423e76bd9502f408a5e498f
|
tests/test_modules/test_scanning/test_unrollingblock.py
|
tests/test_modules/test_scanning/test_unrollingblock.py
|
from mock import Mock
from malcolm.modules.scanning.blocks import unrolling_block
from malcolm.testutil import ChildTestCase
class TestUnrollingBlock(ChildTestCase):
def test_unrolling_block(self):
self.create_child_block(
unrolling_block,
Mock(),
mri="TEST:MRI",
)
|
Add test to instantiate unrolling block
|
Add test to instantiate unrolling block
|
Python
|
apache-2.0
|
dls-controls/pymalcolm,dls-controls/pymalcolm,dls-controls/pymalcolm
|
Add test to instantiate unrolling block
|
from mock import Mock
from malcolm.modules.scanning.blocks import unrolling_block
from malcolm.testutil import ChildTestCase
class TestUnrollingBlock(ChildTestCase):
def test_unrolling_block(self):
self.create_child_block(
unrolling_block,
Mock(),
mri="TEST:MRI",
)
|
<commit_before><commit_msg>Add test to instantiate unrolling block<commit_after>
|
from mock import Mock
from malcolm.modules.scanning.blocks import unrolling_block
from malcolm.testutil import ChildTestCase
class TestUnrollingBlock(ChildTestCase):
def test_unrolling_block(self):
self.create_child_block(
unrolling_block,
Mock(),
mri="TEST:MRI",
)
|
Add test to instantiate unrolling blockfrom mock import Mock
from malcolm.modules.scanning.blocks import unrolling_block
from malcolm.testutil import ChildTestCase
class TestUnrollingBlock(ChildTestCase):
def test_unrolling_block(self):
self.create_child_block(
unrolling_block,
Mock(),
mri="TEST:MRI",
)
|
<commit_before><commit_msg>Add test to instantiate unrolling block<commit_after>from mock import Mock
from malcolm.modules.scanning.blocks import unrolling_block
from malcolm.testutil import ChildTestCase
class TestUnrollingBlock(ChildTestCase):
def test_unrolling_block(self):
self.create_child_block(
unrolling_block,
Mock(),
mri="TEST:MRI",
)
|
|
998442022f2c71197c6e91fb8de276316eff5cc5
|
tempest/tests/lib/services/volume/v2/test_capabilities_client.py
|
tempest/tests/lib/services/volume/v2/test_capabilities_client.py
|
# Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import capabilities_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestCapabilitiesClient(base.BaseServiceTest):
FAKE_BACKEND_CAPABILITIES = {
"namespace": "OS::Storage::Capabilities::fake",
"vendor_name": "OpenStack",
"volume_backend_name": "lvmdriver-1",
"pool_name": "pool",
"driver_version": "2.0.0",
"storage_protocol": "iSCSI",
"display_name": "Capabilities of Cinder LVM driver",
"description": (
"These are volume type options provided by Cinder LVM driver."),
"visibility": "public",
"replication_targets": [],
"properties": {
"compression": {
"title": "Compression",
"description": "Enables compression.",
"type": "boolean"
},
"qos": {
"title": "QoS",
"description": "Enables QoS.",
"type": "boolean"
},
"replication": {
"title": "Replication",
"description": "Enables replication.",
"type": "boolean"
},
"thin_provisioning": {
"title": "Thin Provisioning",
"description": "Sets thin provisioning.",
"type": "boolean"
}
}
}
def setUp(self):
super(TestCapabilitiesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = capabilities_client.CapabilitiesClient(
fake_auth, 'volume', 'regionOne')
def _test_show_backend_capabilities(self, bytes_body=False):
self.check_service_client_function(
self.client.show_backend_capabilities,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_BACKEND_CAPABILITIES,
bytes_body,
host='lvmdriver-1')
def test_show_backend_capabilities_with_str_body(self):
self._test_show_backend_capabilities()
def test_show_backend_capabilities_with_bytes_body(self):
self._test_show_backend_capabilities(bytes_body=True)
|
Add Capabilities Client unit tests
|
Add Capabilities Client unit tests
Change-Id: I0bb5d0d7b775ee41dedf6ac30ece3698d585f072
|
Python
|
apache-2.0
|
Juniper/tempest,vedujoshi/tempest,cisco-openstack/tempest,cisco-openstack/tempest,vedujoshi/tempest,masayukig/tempest,openstack/tempest,Juniper/tempest,masayukig/tempest,openstack/tempest
|
Add Capabilities Client unit tests
Change-Id: I0bb5d0d7b775ee41dedf6ac30ece3698d585f072
|
# Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import capabilities_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestCapabilitiesClient(base.BaseServiceTest):
FAKE_BACKEND_CAPABILITIES = {
"namespace": "OS::Storage::Capabilities::fake",
"vendor_name": "OpenStack",
"volume_backend_name": "lvmdriver-1",
"pool_name": "pool",
"driver_version": "2.0.0",
"storage_protocol": "iSCSI",
"display_name": "Capabilities of Cinder LVM driver",
"description": (
"These are volume type options provided by Cinder LVM driver."),
"visibility": "public",
"replication_targets": [],
"properties": {
"compression": {
"title": "Compression",
"description": "Enables compression.",
"type": "boolean"
},
"qos": {
"title": "QoS",
"description": "Enables QoS.",
"type": "boolean"
},
"replication": {
"title": "Replication",
"description": "Enables replication.",
"type": "boolean"
},
"thin_provisioning": {
"title": "Thin Provisioning",
"description": "Sets thin provisioning.",
"type": "boolean"
}
}
}
def setUp(self):
super(TestCapabilitiesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = capabilities_client.CapabilitiesClient(
fake_auth, 'volume', 'regionOne')
def _test_show_backend_capabilities(self, bytes_body=False):
self.check_service_client_function(
self.client.show_backend_capabilities,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_BACKEND_CAPABILITIES,
bytes_body,
host='lvmdriver-1')
def test_show_backend_capabilities_with_str_body(self):
self._test_show_backend_capabilities()
def test_show_backend_capabilities_with_bytes_body(self):
self._test_show_backend_capabilities(bytes_body=True)
|
<commit_before><commit_msg>Add Capabilities Client unit tests
Change-Id: I0bb5d0d7b775ee41dedf6ac30ece3698d585f072<commit_after>
|
# Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import capabilities_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestCapabilitiesClient(base.BaseServiceTest):
FAKE_BACKEND_CAPABILITIES = {
"namespace": "OS::Storage::Capabilities::fake",
"vendor_name": "OpenStack",
"volume_backend_name": "lvmdriver-1",
"pool_name": "pool",
"driver_version": "2.0.0",
"storage_protocol": "iSCSI",
"display_name": "Capabilities of Cinder LVM driver",
"description": (
"These are volume type options provided by Cinder LVM driver."),
"visibility": "public",
"replication_targets": [],
"properties": {
"compression": {
"title": "Compression",
"description": "Enables compression.",
"type": "boolean"
},
"qos": {
"title": "QoS",
"description": "Enables QoS.",
"type": "boolean"
},
"replication": {
"title": "Replication",
"description": "Enables replication.",
"type": "boolean"
},
"thin_provisioning": {
"title": "Thin Provisioning",
"description": "Sets thin provisioning.",
"type": "boolean"
}
}
}
def setUp(self):
super(TestCapabilitiesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = capabilities_client.CapabilitiesClient(
fake_auth, 'volume', 'regionOne')
def _test_show_backend_capabilities(self, bytes_body=False):
self.check_service_client_function(
self.client.show_backend_capabilities,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_BACKEND_CAPABILITIES,
bytes_body,
host='lvmdriver-1')
def test_show_backend_capabilities_with_str_body(self):
self._test_show_backend_capabilities()
def test_show_backend_capabilities_with_bytes_body(self):
self._test_show_backend_capabilities(bytes_body=True)
|
Add Capabilities Client unit tests
Change-Id: I0bb5d0d7b775ee41dedf6ac30ece3698d585f072# Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import capabilities_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestCapabilitiesClient(base.BaseServiceTest):
FAKE_BACKEND_CAPABILITIES = {
"namespace": "OS::Storage::Capabilities::fake",
"vendor_name": "OpenStack",
"volume_backend_name": "lvmdriver-1",
"pool_name": "pool",
"driver_version": "2.0.0",
"storage_protocol": "iSCSI",
"display_name": "Capabilities of Cinder LVM driver",
"description": (
"These are volume type options provided by Cinder LVM driver."),
"visibility": "public",
"replication_targets": [],
"properties": {
"compression": {
"title": "Compression",
"description": "Enables compression.",
"type": "boolean"
},
"qos": {
"title": "QoS",
"description": "Enables QoS.",
"type": "boolean"
},
"replication": {
"title": "Replication",
"description": "Enables replication.",
"type": "boolean"
},
"thin_provisioning": {
"title": "Thin Provisioning",
"description": "Sets thin provisioning.",
"type": "boolean"
}
}
}
def setUp(self):
super(TestCapabilitiesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = capabilities_client.CapabilitiesClient(
fake_auth, 'volume', 'regionOne')
def _test_show_backend_capabilities(self, bytes_body=False):
self.check_service_client_function(
self.client.show_backend_capabilities,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_BACKEND_CAPABILITIES,
bytes_body,
host='lvmdriver-1')
def test_show_backend_capabilities_with_str_body(self):
self._test_show_backend_capabilities()
def test_show_backend_capabilities_with_bytes_body(self):
self._test_show_backend_capabilities(bytes_body=True)
|
<commit_before><commit_msg>Add Capabilities Client unit tests
Change-Id: I0bb5d0d7b775ee41dedf6ac30ece3698d585f072<commit_after># Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v2 import capabilities_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestCapabilitiesClient(base.BaseServiceTest):
FAKE_BACKEND_CAPABILITIES = {
"namespace": "OS::Storage::Capabilities::fake",
"vendor_name": "OpenStack",
"volume_backend_name": "lvmdriver-1",
"pool_name": "pool",
"driver_version": "2.0.0",
"storage_protocol": "iSCSI",
"display_name": "Capabilities of Cinder LVM driver",
"description": (
"These are volume type options provided by Cinder LVM driver."),
"visibility": "public",
"replication_targets": [],
"properties": {
"compression": {
"title": "Compression",
"description": "Enables compression.",
"type": "boolean"
},
"qos": {
"title": "QoS",
"description": "Enables QoS.",
"type": "boolean"
},
"replication": {
"title": "Replication",
"description": "Enables replication.",
"type": "boolean"
},
"thin_provisioning": {
"title": "Thin Provisioning",
"description": "Sets thin provisioning.",
"type": "boolean"
}
}
}
def setUp(self):
super(TestCapabilitiesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = capabilities_client.CapabilitiesClient(
fake_auth, 'volume', 'regionOne')
def _test_show_backend_capabilities(self, bytes_body=False):
self.check_service_client_function(
self.client.show_backend_capabilities,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_BACKEND_CAPABILITIES,
bytes_body,
host='lvmdriver-1')
def test_show_backend_capabilities_with_str_body(self):
self._test_show_backend_capabilities()
def test_show_backend_capabilities_with_bytes_body(self):
self._test_show_backend_capabilities(bytes_body=True)
|
|
1c39b15ef604db13c37e37080ba6294c163b0c5d
|
tools/gitlog2asciidoc.py
|
tools/gitlog2asciidoc.py
|
#!/usr/bin/python
import sys
import re
import subprocess
"""
This script generates a release note from the output of git log
between the specified tags.
Arguments:
since -- tag name
until -- tag name
Example Input:
* <commit subject>
+
<commit message>
Bug: issue 123
Change-Id: <change id>
Signed-off-by: <name>
Expected Output:
* issue 123 <commit subject>
+
<commit message>
"""
if len(sys.argv) != 3:
sys.exit('Usage: ' + sys.argv[0] + ' <since> <until>')
since_until = sys.argv[1] + '..' + sys.argv[2]
proc = subprocess.Popen(['git', 'log', '--reverse', '--no-merges',
since_until, "--format=* %s%n+%n%b"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,)
stdout_value = proc.communicate()[0]
subject = ""
message = []
for line in stdout_value.splitlines(True):
if re.match('\* ', line) >= 0:
# Write change log for a commit
if subject != "":
# Write subject
sys.stdout.write(subject)
# Write message lines
if message != []:
# Clear + from last line in commit message
message[-1] = '\n'
for m in message:
sys.stdout.write(m)
# Start new commit block
message = []
subject = line
continue
# Move issue number to subject line
elif re.match('Bug: ', line) is not None:
line = line.replace('Bug: ', '').replace('\n',' ')
subject = subject[:2] + line + subject[2:]
# Move issue number to subject line
elif re.match('Issue: ', line) is not None:
line = line.replace('Issue: ', 'issue ').replace('\n',' ')
subject = subject[:2] + line + subject[2:]
# Remove commit footers
elif re.match(r'((\w+-)+\w+:)', line) is not None:
continue
else:
if line == '\n':
# Don't add extra blank line if last one is already blank
if message[-1] != '+\n':
message.append('+\n')
else:
message.append(line)
|
Add scripts to create release notes from git log
|
Add scripts to create release notes from git log
These script generates a list of commits from git log between
<since> and <until>. The output of this script is in asciidoc
format containting list of commits subject and body.
Bug: issue 1272
Change-Id: I32b2ee055e81c5663128aafefd5f46c2e17e58c8
|
Python
|
apache-2.0
|
midnightradio/gerrit,zommarin/gerrit,gcoders/gerrit,bpollack/gerrit,Seinlin/gerrit,thesamet/gerrit,keerath/gerrit_newssh,teamblueridge/gerrit,qtproject/qtqa-gerrit,GerritCodeReview/gerrit,anminhsu/gerrit,quyixia/gerrit,teamblueridge/gerrit,thinkernel/gerrit,Seinlin/gerrit,quyixia/gerrit,ashang/aaron-gerrit,Saulis/gerrit,joshuawilson/merrit,pkdevbox/gerrit,anminhsu/gerrit,supriyantomaftuh/gerrit,gcoders/gerrit,jackminicloud/test,supriyantomaftuh/gerrit,evanchueng/gerrit,midnightradio/gerrit,joshuawilson/merrit,WANdisco/gerrit,Seinlin/gerrit,quyixia/gerrit,jackminicloud/test,basilgor/gerrit,Overruler/gerrit,ckamm/gerrit,bootstraponline-archive/gerrit-mirror,WANdisco/gerrit,netroby/gerrit,gerrit-review/gerrit,atdt/gerrit,ashang/aaron-gerrit,Saulis/gerrit,hdost/gerrit,atdt/gerrit,netroby/gerrit,keerath/gerrit_newssh,zommarin/gerrit,netroby/gerrit,m1kah/gerrit-contributions,atdt/gerrit,gerrit-review/gerrit,TonyChai24/test,anminhsu/gerrit,ckamm/gerrit,qtproject/qtqa-gerrit,bpollack/gerrit,Distrotech/gerrit,qtproject/qtqa-gerrit,netroby/gerrit,gerrit-review/gerrit,Team-OctOS/host_gerrit,1yvT0s/gerrit,bpollack/gerrit,basilgor/gerrit,midnightradio/gerrit,Distrotech/gerrit,qtproject/qtqa-gerrit,Team-OctOS/host_gerrit,gcoders/gerrit,Seinlin/gerrit,gerrit-review/gerrit,Seinlin/gerrit,midnightradio/gerrit,1yvT0s/gerrit,TonyChai24/test,pkdevbox/gerrit,evanchueng/gerrit,TonyChai24/test,Team-OctOS/host_gerrit,CandyShop/gerrit,dwhipstock/gerrit,quyixia/gerrit,1yvT0s/gerrit,CandyShop/gerrit,quyixia/gerrit,joshuawilson/merrit,CandyShop/gerrit,renchaorevee/gerrit,Team-OctOS/host_gerrit,jackminicloud/test,renchaorevee/gerrit,gcoders/gerrit,joshuawilson/merrit,Team-OctOS/host_gerrit,joshuawilson/merrit,dwhipstock/gerrit,Distrotech/gerrit,pkdevbox/gerrit,hdost/gerrit,atdt/gerrit,CandyShop/gerrit,basilgor/gerrit,qtproject/qtqa-gerrit,supriyantomaftuh/gerrit,Distrotech/gerrit,netroby/gerrit,supriyantomaftuh/gerrit,MerritCR/merrit,Saulis/gerrit,jackminicloud/test,ckamm/gerrit,thesamet/gerrit,qtproject/qtqa-gerrit,gracefullife/gerrit,1yvT0s/gerrit,MerritCR/merrit,quyixia/gerrit,TonyChai24/test,teamblueridge/gerrit,hdost/gerrit,m1kah/gerrit-contributions,gracefullife/gerrit,dwhipstock/gerrit,thinkernel/gerrit,hdost/gerrit,ckamm/gerrit,Seinlin/gerrit,jackminicloud/test,teamblueridge/gerrit,qtproject/qtqa-gerrit,Overruler/gerrit,thesamet/gerrit,renchaorevee/gerrit,bootstraponline-archive/gerrit-mirror,evanchueng/gerrit,WANdisco/gerrit,thesamet/gerrit,WANdisco/gerrit,jackminicloud/test,anminhsu/gerrit,renchaorevee/gerrit,netroby/gerrit,dwhipstock/gerrit,ashang/aaron-gerrit,GerritCodeReview/gerrit,dwhipstock/gerrit,TonyChai24/test,bootstraponline-archive/gerrit-mirror,thinkernel/gerrit,GerritCodeReview/gerrit,midnightradio/gerrit,MerritCR/merrit,bpollack/gerrit,MerritCR/merrit,supriyantomaftuh/gerrit,TonyChai24/test,MerritCR/merrit,thinkernel/gerrit,gracefullife/gerrit,gerrit-review/gerrit,hdost/gerrit,1yvT0s/gerrit,keerath/gerrit_newssh,gerrit-review/gerrit,thesamet/gerrit,Saulis/gerrit,dwhipstock/gerrit,netroby/gerrit,gcoders/gerrit,gcoders/gerrit,GerritCodeReview/gerrit,Saulis/gerrit,thesamet/gerrit,supriyantomaftuh/gerrit,Saulis/gerrit,thinkernel/gerrit,thinkernel/gerrit,bootstraponline-archive/gerrit-mirror,midnightradio/gerrit,thinkernel/gerrit,GerritCodeReview/gerrit,CandyShop/gerrit,pkdevbox/gerrit,Distrotech/gerrit,m1kah/gerrit-contributions,gracefullife/gerrit,zommarin/gerrit,basilgor/gerrit,ckamm/gerrit,MerritCR/merrit,quyixia/gerrit,GerritCodeReview/gerrit,renchaorevee/gerrit,keerath/gerrit_newssh,WANdisco/gerrit,ashang/aaron-gerrit,zommarin/gerrit,anminhsu/gerrit,thesamet/gerrit,MerritCR/merrit,Distrotech/gerrit,joshuawilson/merrit,m1kah/gerrit-contributions,gracefullife/gerrit,supriyantomaftuh/gerrit,Overruler/gerrit,Overruler/gerrit,ashang/aaron-gerrit,basilgor/gerrit,WANdisco/gerrit,renchaorevee/gerrit,pkdevbox/gerrit,pkdevbox/gerrit,joshuawilson/merrit,evanchueng/gerrit,hdost/gerrit,anminhsu/gerrit,Team-OctOS/host_gerrit,GerritCodeReview/gerrit,evanchueng/gerrit,bootstraponline-archive/gerrit-mirror,gcoders/gerrit,Distrotech/gerrit,keerath/gerrit_newssh,Overruler/gerrit,MerritCR/merrit,Overruler/gerrit,bootstraponline-archive/gerrit-mirror,WANdisco/gerrit,dwhipstock/gerrit,GerritCodeReview/gerrit,atdt/gerrit,bpollack/gerrit,TonyChai24/test,gerrit-review/gerrit,bpollack/gerrit,pkdevbox/gerrit,Seinlin/gerrit,hdost/gerrit,renchaorevee/gerrit,teamblueridge/gerrit,Team-OctOS/host_gerrit,joshuawilson/merrit,jackminicloud/test,anminhsu/gerrit,zommarin/gerrit
|
Add scripts to create release notes from git log
These script generates a list of commits from git log between
<since> and <until>. The output of this script is in asciidoc
format containting list of commits subject and body.
Bug: issue 1272
Change-Id: I32b2ee055e81c5663128aafefd5f46c2e17e58c8
|
#!/usr/bin/python
import sys
import re
import subprocess
"""
This script generates a release note from the output of git log
between the specified tags.
Arguments:
since -- tag name
until -- tag name
Example Input:
* <commit subject>
+
<commit message>
Bug: issue 123
Change-Id: <change id>
Signed-off-by: <name>
Expected Output:
* issue 123 <commit subject>
+
<commit message>
"""
if len(sys.argv) != 3:
sys.exit('Usage: ' + sys.argv[0] + ' <since> <until>')
since_until = sys.argv[1] + '..' + sys.argv[2]
proc = subprocess.Popen(['git', 'log', '--reverse', '--no-merges',
since_until, "--format=* %s%n+%n%b"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,)
stdout_value = proc.communicate()[0]
subject = ""
message = []
for line in stdout_value.splitlines(True):
if re.match('\* ', line) >= 0:
# Write change log for a commit
if subject != "":
# Write subject
sys.stdout.write(subject)
# Write message lines
if message != []:
# Clear + from last line in commit message
message[-1] = '\n'
for m in message:
sys.stdout.write(m)
# Start new commit block
message = []
subject = line
continue
# Move issue number to subject line
elif re.match('Bug: ', line) is not None:
line = line.replace('Bug: ', '').replace('\n',' ')
subject = subject[:2] + line + subject[2:]
# Move issue number to subject line
elif re.match('Issue: ', line) is not None:
line = line.replace('Issue: ', 'issue ').replace('\n',' ')
subject = subject[:2] + line + subject[2:]
# Remove commit footers
elif re.match(r'((\w+-)+\w+:)', line) is not None:
continue
else:
if line == '\n':
# Don't add extra blank line if last one is already blank
if message[-1] != '+\n':
message.append('+\n')
else:
message.append(line)
|
<commit_before><commit_msg>Add scripts to create release notes from git log
These script generates a list of commits from git log between
<since> and <until>. The output of this script is in asciidoc
format containting list of commits subject and body.
Bug: issue 1272
Change-Id: I32b2ee055e81c5663128aafefd5f46c2e17e58c8<commit_after>
|
#!/usr/bin/python
import sys
import re
import subprocess
"""
This script generates a release note from the output of git log
between the specified tags.
Arguments:
since -- tag name
until -- tag name
Example Input:
* <commit subject>
+
<commit message>
Bug: issue 123
Change-Id: <change id>
Signed-off-by: <name>
Expected Output:
* issue 123 <commit subject>
+
<commit message>
"""
if len(sys.argv) != 3:
sys.exit('Usage: ' + sys.argv[0] + ' <since> <until>')
since_until = sys.argv[1] + '..' + sys.argv[2]
proc = subprocess.Popen(['git', 'log', '--reverse', '--no-merges',
since_until, "--format=* %s%n+%n%b"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,)
stdout_value = proc.communicate()[0]
subject = ""
message = []
for line in stdout_value.splitlines(True):
if re.match('\* ', line) >= 0:
# Write change log for a commit
if subject != "":
# Write subject
sys.stdout.write(subject)
# Write message lines
if message != []:
# Clear + from last line in commit message
message[-1] = '\n'
for m in message:
sys.stdout.write(m)
# Start new commit block
message = []
subject = line
continue
# Move issue number to subject line
elif re.match('Bug: ', line) is not None:
line = line.replace('Bug: ', '').replace('\n',' ')
subject = subject[:2] + line + subject[2:]
# Move issue number to subject line
elif re.match('Issue: ', line) is not None:
line = line.replace('Issue: ', 'issue ').replace('\n',' ')
subject = subject[:2] + line + subject[2:]
# Remove commit footers
elif re.match(r'((\w+-)+\w+:)', line) is not None:
continue
else:
if line == '\n':
# Don't add extra blank line if last one is already blank
if message[-1] != '+\n':
message.append('+\n')
else:
message.append(line)
|
Add scripts to create release notes from git log
These script generates a list of commits from git log between
<since> and <until>. The output of this script is in asciidoc
format containting list of commits subject and body.
Bug: issue 1272
Change-Id: I32b2ee055e81c5663128aafefd5f46c2e17e58c8#!/usr/bin/python
import sys
import re
import subprocess
"""
This script generates a release note from the output of git log
between the specified tags.
Arguments:
since -- tag name
until -- tag name
Example Input:
* <commit subject>
+
<commit message>
Bug: issue 123
Change-Id: <change id>
Signed-off-by: <name>
Expected Output:
* issue 123 <commit subject>
+
<commit message>
"""
if len(sys.argv) != 3:
sys.exit('Usage: ' + sys.argv[0] + ' <since> <until>')
since_until = sys.argv[1] + '..' + sys.argv[2]
proc = subprocess.Popen(['git', 'log', '--reverse', '--no-merges',
since_until, "--format=* %s%n+%n%b"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,)
stdout_value = proc.communicate()[0]
subject = ""
message = []
for line in stdout_value.splitlines(True):
if re.match('\* ', line) >= 0:
# Write change log for a commit
if subject != "":
# Write subject
sys.stdout.write(subject)
# Write message lines
if message != []:
# Clear + from last line in commit message
message[-1] = '\n'
for m in message:
sys.stdout.write(m)
# Start new commit block
message = []
subject = line
continue
# Move issue number to subject line
elif re.match('Bug: ', line) is not None:
line = line.replace('Bug: ', '').replace('\n',' ')
subject = subject[:2] + line + subject[2:]
# Move issue number to subject line
elif re.match('Issue: ', line) is not None:
line = line.replace('Issue: ', 'issue ').replace('\n',' ')
subject = subject[:2] + line + subject[2:]
# Remove commit footers
elif re.match(r'((\w+-)+\w+:)', line) is not None:
continue
else:
if line == '\n':
# Don't add extra blank line if last one is already blank
if message[-1] != '+\n':
message.append('+\n')
else:
message.append(line)
|
<commit_before><commit_msg>Add scripts to create release notes from git log
These script generates a list of commits from git log between
<since> and <until>. The output of this script is in asciidoc
format containting list of commits subject and body.
Bug: issue 1272
Change-Id: I32b2ee055e81c5663128aafefd5f46c2e17e58c8<commit_after>#!/usr/bin/python
import sys
import re
import subprocess
"""
This script generates a release note from the output of git log
between the specified tags.
Arguments:
since -- tag name
until -- tag name
Example Input:
* <commit subject>
+
<commit message>
Bug: issue 123
Change-Id: <change id>
Signed-off-by: <name>
Expected Output:
* issue 123 <commit subject>
+
<commit message>
"""
if len(sys.argv) != 3:
sys.exit('Usage: ' + sys.argv[0] + ' <since> <until>')
since_until = sys.argv[1] + '..' + sys.argv[2]
proc = subprocess.Popen(['git', 'log', '--reverse', '--no-merges',
since_until, "--format=* %s%n+%n%b"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,)
stdout_value = proc.communicate()[0]
subject = ""
message = []
for line in stdout_value.splitlines(True):
if re.match('\* ', line) >= 0:
# Write change log for a commit
if subject != "":
# Write subject
sys.stdout.write(subject)
# Write message lines
if message != []:
# Clear + from last line in commit message
message[-1] = '\n'
for m in message:
sys.stdout.write(m)
# Start new commit block
message = []
subject = line
continue
# Move issue number to subject line
elif re.match('Bug: ', line) is not None:
line = line.replace('Bug: ', '').replace('\n',' ')
subject = subject[:2] + line + subject[2:]
# Move issue number to subject line
elif re.match('Issue: ', line) is not None:
line = line.replace('Issue: ', 'issue ').replace('\n',' ')
subject = subject[:2] + line + subject[2:]
# Remove commit footers
elif re.match(r'((\w+-)+\w+:)', line) is not None:
continue
else:
if line == '\n':
# Don't add extra blank line if last one is already blank
if message[-1] != '+\n':
message.append('+\n')
else:
message.append(line)
|
|
02cad9118c4aa03e783899ce4637d4c4fe010f57
|
scripts/add-missing-terminal-part-ofs.py
|
scripts/add-missing-terminal-part-ofs.py
|
#!/usr/bin/env python
from common import db_connection
import sys
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>" % (sys.argv[0],)
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[2])
c = db_connection.cursor()
c.execute("SELECT id FROM relation WHERE project_id = %s AND relation_name='part_of'",
(project_id,))
part_of_id = c.fetchone()[0]
for direction in ('pre', 'post'):
query = '''
SELECT cit.id, cis.id
FROM class_instance cit, class_instance cis,
class cs, class ct,
treenode_class_instance tcit, treenode_class_instance tcis
WHERE ct.class_name = '{direction}synaptic terminal' AND
cit.class_id = ct.id AND
cs.class_name = 'skeleton' AND
cis.class_id = cs.id AND
tcit.class_instance_id = cit.id AND
tcis.class_instance_id = cis.id AND
tcit.treenode_id = tcis.treenode_id
'''.format(direction=direction)
c.execute(query)
rows = c.fetchall()
for terminal_id, skeleton_id in rows:
c.execute('''
INSERT INTO class_instance_class_instance
(user_id, project_id, relation_id, class_instance_a, class_instance_b)
VALUES (%(u)s, %(p)s, %(r)s, %(ca)s, %(cb)s)
''',
{'u': user_id,
'p': project_id,
'r': part_of_id,
'ca': terminal_id,
'cb': skeleton_id})
db_connection.commit()
c.close()
db_connection.close()
|
Add a script to add back the missing [terminal] part_of [skeleton] relationships
|
Add a script to add back the missing [terminal] part_of [skeleton] relationships
|
Python
|
agpl-3.0
|
htem/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,fzadow/CATMAID
|
Add a script to add back the missing [terminal] part_of [skeleton] relationships
|
#!/usr/bin/env python
from common import db_connection
import sys
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>" % (sys.argv[0],)
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[2])
c = db_connection.cursor()
c.execute("SELECT id FROM relation WHERE project_id = %s AND relation_name='part_of'",
(project_id,))
part_of_id = c.fetchone()[0]
for direction in ('pre', 'post'):
query = '''
SELECT cit.id, cis.id
FROM class_instance cit, class_instance cis,
class cs, class ct,
treenode_class_instance tcit, treenode_class_instance tcis
WHERE ct.class_name = '{direction}synaptic terminal' AND
cit.class_id = ct.id AND
cs.class_name = 'skeleton' AND
cis.class_id = cs.id AND
tcit.class_instance_id = cit.id AND
tcis.class_instance_id = cis.id AND
tcit.treenode_id = tcis.treenode_id
'''.format(direction=direction)
c.execute(query)
rows = c.fetchall()
for terminal_id, skeleton_id in rows:
c.execute('''
INSERT INTO class_instance_class_instance
(user_id, project_id, relation_id, class_instance_a, class_instance_b)
VALUES (%(u)s, %(p)s, %(r)s, %(ca)s, %(cb)s)
''',
{'u': user_id,
'p': project_id,
'r': part_of_id,
'ca': terminal_id,
'cb': skeleton_id})
db_connection.commit()
c.close()
db_connection.close()
|
<commit_before><commit_msg>Add a script to add back the missing [terminal] part_of [skeleton] relationships<commit_after>
|
#!/usr/bin/env python
from common import db_connection
import sys
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>" % (sys.argv[0],)
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[2])
c = db_connection.cursor()
c.execute("SELECT id FROM relation WHERE project_id = %s AND relation_name='part_of'",
(project_id,))
part_of_id = c.fetchone()[0]
for direction in ('pre', 'post'):
query = '''
SELECT cit.id, cis.id
FROM class_instance cit, class_instance cis,
class cs, class ct,
treenode_class_instance tcit, treenode_class_instance tcis
WHERE ct.class_name = '{direction}synaptic terminal' AND
cit.class_id = ct.id AND
cs.class_name = 'skeleton' AND
cis.class_id = cs.id AND
tcit.class_instance_id = cit.id AND
tcis.class_instance_id = cis.id AND
tcit.treenode_id = tcis.treenode_id
'''.format(direction=direction)
c.execute(query)
rows = c.fetchall()
for terminal_id, skeleton_id in rows:
c.execute('''
INSERT INTO class_instance_class_instance
(user_id, project_id, relation_id, class_instance_a, class_instance_b)
VALUES (%(u)s, %(p)s, %(r)s, %(ca)s, %(cb)s)
''',
{'u': user_id,
'p': project_id,
'r': part_of_id,
'ca': terminal_id,
'cb': skeleton_id})
db_connection.commit()
c.close()
db_connection.close()
|
Add a script to add back the missing [terminal] part_of [skeleton] relationships#!/usr/bin/env python
from common import db_connection
import sys
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>" % (sys.argv[0],)
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[2])
c = db_connection.cursor()
c.execute("SELECT id FROM relation WHERE project_id = %s AND relation_name='part_of'",
(project_id,))
part_of_id = c.fetchone()[0]
for direction in ('pre', 'post'):
query = '''
SELECT cit.id, cis.id
FROM class_instance cit, class_instance cis,
class cs, class ct,
treenode_class_instance tcit, treenode_class_instance tcis
WHERE ct.class_name = '{direction}synaptic terminal' AND
cit.class_id = ct.id AND
cs.class_name = 'skeleton' AND
cis.class_id = cs.id AND
tcit.class_instance_id = cit.id AND
tcis.class_instance_id = cis.id AND
tcit.treenode_id = tcis.treenode_id
'''.format(direction=direction)
c.execute(query)
rows = c.fetchall()
for terminal_id, skeleton_id in rows:
c.execute('''
INSERT INTO class_instance_class_instance
(user_id, project_id, relation_id, class_instance_a, class_instance_b)
VALUES (%(u)s, %(p)s, %(r)s, %(ca)s, %(cb)s)
''',
{'u': user_id,
'p': project_id,
'r': part_of_id,
'ca': terminal_id,
'cb': skeleton_id})
db_connection.commit()
c.close()
db_connection.close()
|
<commit_before><commit_msg>Add a script to add back the missing [terminal] part_of [skeleton] relationships<commit_after>#!/usr/bin/env python
from common import db_connection
import sys
if len(sys.argv) != 3:
print >> sys.stderr, "Usage: %s <PROJECT-ID> <USER-ID>" % (sys.argv[0],)
sys.exit(1)
project_id = int(sys.argv[1])
user_id = int(sys.argv[2])
c = db_connection.cursor()
c.execute("SELECT id FROM relation WHERE project_id = %s AND relation_name='part_of'",
(project_id,))
part_of_id = c.fetchone()[0]
for direction in ('pre', 'post'):
query = '''
SELECT cit.id, cis.id
FROM class_instance cit, class_instance cis,
class cs, class ct,
treenode_class_instance tcit, treenode_class_instance tcis
WHERE ct.class_name = '{direction}synaptic terminal' AND
cit.class_id = ct.id AND
cs.class_name = 'skeleton' AND
cis.class_id = cs.id AND
tcit.class_instance_id = cit.id AND
tcis.class_instance_id = cis.id AND
tcit.treenode_id = tcis.treenode_id
'''.format(direction=direction)
c.execute(query)
rows = c.fetchall()
for terminal_id, skeleton_id in rows:
c.execute('''
INSERT INTO class_instance_class_instance
(user_id, project_id, relation_id, class_instance_a, class_instance_b)
VALUES (%(u)s, %(p)s, %(r)s, %(ca)s, %(cb)s)
''',
{'u': user_id,
'p': project_id,
'r': part_of_id,
'ca': terminal_id,
'cb': skeleton_id})
db_connection.commit()
c.close()
db_connection.close()
|
|
58c10946515b37e55b06f3e455a59c44d3b6076d
|
buzzmobile/tests/rostest_utils.py
|
buzzmobile/tests/rostest_utils.py
|
import roslaunch
import functools
import subprocess
def with_roscore(obj):
"""Decorator to run all tests in a testcase with their own roscore.
This wraps the setUp and tearDown methods to start by first spinning up a
roscore process, and tears it down at the very end. This adds a small time
penalty, but its worth it.
Its worth deciding whether to make this work on a per-method, per object,
or both basis.
"""
old_setup = obj.setUp
old_teardown = obj.tearDown
def new_setup(self):
self.roscore = subprocess.Popen(['roscore'])
old_setup(self)
def new_teardown(self):
old_teardown(self)
self.roscore.kill()
obj.setUp = new_setup
obj.tearDown = new_teardown
return obj
def launch_node(package, name):
"""Decorator to manage running a node and shutting it down gracefully.
"""
def launcher(func):
@functools.wraps(func)
def new_test(self):
node = roslaunch.core.Node(package, name)
launch = roslaunch.scriptapi.ROSLaunch()
launch.start()
# can we do something async here?
process = launch.launch(node)
temp = func(self)
process.stop()
return temp
return new_test
return launcher
|
Add utilities to make writing ros tests less painful.
|
Add utilities to make writing ros tests less painful.
|
Python
|
mit
|
jgkamat/buzzmobile,gtagency/buzzmobile,gtagency/buzzmobile,jgkamat/buzzmobile,gtagency/buzzmobile,jgkamat/buzzmobile
|
Add utilities to make writing ros tests less painful.
|
import roslaunch
import functools
import subprocess
def with_roscore(obj):
"""Decorator to run all tests in a testcase with their own roscore.
This wraps the setUp and tearDown methods to start by first spinning up a
roscore process, and tears it down at the very end. This adds a small time
penalty, but its worth it.
Its worth deciding whether to make this work on a per-method, per object,
or both basis.
"""
old_setup = obj.setUp
old_teardown = obj.tearDown
def new_setup(self):
self.roscore = subprocess.Popen(['roscore'])
old_setup(self)
def new_teardown(self):
old_teardown(self)
self.roscore.kill()
obj.setUp = new_setup
obj.tearDown = new_teardown
return obj
def launch_node(package, name):
"""Decorator to manage running a node and shutting it down gracefully.
"""
def launcher(func):
@functools.wraps(func)
def new_test(self):
node = roslaunch.core.Node(package, name)
launch = roslaunch.scriptapi.ROSLaunch()
launch.start()
# can we do something async here?
process = launch.launch(node)
temp = func(self)
process.stop()
return temp
return new_test
return launcher
|
<commit_before><commit_msg>Add utilities to make writing ros tests less painful.<commit_after>
|
import roslaunch
import functools
import subprocess
def with_roscore(obj):
"""Decorator to run all tests in a testcase with their own roscore.
This wraps the setUp and tearDown methods to start by first spinning up a
roscore process, and tears it down at the very end. This adds a small time
penalty, but its worth it.
Its worth deciding whether to make this work on a per-method, per object,
or both basis.
"""
old_setup = obj.setUp
old_teardown = obj.tearDown
def new_setup(self):
self.roscore = subprocess.Popen(['roscore'])
old_setup(self)
def new_teardown(self):
old_teardown(self)
self.roscore.kill()
obj.setUp = new_setup
obj.tearDown = new_teardown
return obj
def launch_node(package, name):
"""Decorator to manage running a node and shutting it down gracefully.
"""
def launcher(func):
@functools.wraps(func)
def new_test(self):
node = roslaunch.core.Node(package, name)
launch = roslaunch.scriptapi.ROSLaunch()
launch.start()
# can we do something async here?
process = launch.launch(node)
temp = func(self)
process.stop()
return temp
return new_test
return launcher
|
Add utilities to make writing ros tests less painful.import roslaunch
import functools
import subprocess
def with_roscore(obj):
"""Decorator to run all tests in a testcase with their own roscore.
This wraps the setUp and tearDown methods to start by first spinning up a
roscore process, and tears it down at the very end. This adds a small time
penalty, but its worth it.
Its worth deciding whether to make this work on a per-method, per object,
or both basis.
"""
old_setup = obj.setUp
old_teardown = obj.tearDown
def new_setup(self):
self.roscore = subprocess.Popen(['roscore'])
old_setup(self)
def new_teardown(self):
old_teardown(self)
self.roscore.kill()
obj.setUp = new_setup
obj.tearDown = new_teardown
return obj
def launch_node(package, name):
"""Decorator to manage running a node and shutting it down gracefully.
"""
def launcher(func):
@functools.wraps(func)
def new_test(self):
node = roslaunch.core.Node(package, name)
launch = roslaunch.scriptapi.ROSLaunch()
launch.start()
# can we do something async here?
process = launch.launch(node)
temp = func(self)
process.stop()
return temp
return new_test
return launcher
|
<commit_before><commit_msg>Add utilities to make writing ros tests less painful.<commit_after>import roslaunch
import functools
import subprocess
def with_roscore(obj):
"""Decorator to run all tests in a testcase with their own roscore.
This wraps the setUp and tearDown methods to start by first spinning up a
roscore process, and tears it down at the very end. This adds a small time
penalty, but its worth it.
Its worth deciding whether to make this work on a per-method, per object,
or both basis.
"""
old_setup = obj.setUp
old_teardown = obj.tearDown
def new_setup(self):
self.roscore = subprocess.Popen(['roscore'])
old_setup(self)
def new_teardown(self):
old_teardown(self)
self.roscore.kill()
obj.setUp = new_setup
obj.tearDown = new_teardown
return obj
def launch_node(package, name):
"""Decorator to manage running a node and shutting it down gracefully.
"""
def launcher(func):
@functools.wraps(func)
def new_test(self):
node = roslaunch.core.Node(package, name)
launch = roslaunch.scriptapi.ROSLaunch()
launch.start()
# can we do something async here?
process = launch.launch(node)
temp = func(self)
process.stop()
return temp
return new_test
return launcher
|
|
e64b399370ae4ddb21a969d1b8c664d2201790f9
|
engines/ep/management/sync_repl.py
|
engines/ep/management/sync_repl.py
|
#!/usr/bin/env python
""" Simple CLI for basic SyncWrite operations."""
from __future__ import print_function
from collections import defaultdict
import mc_bin_client
import sys
if len(sys.argv) < 7:
print("Usage: {} <host[:port]> <user> <password> <bucket> <op> <key> [value]".format(sys.argv[0]), file = sys.stderr)
sys.exit(1)
(host, port) = sys.argv[1].split(":")
if not port:
port = 11210
client = mc_bin_client.MemcachedClient(host=host, port=port)
client.enable_xerror()
client.hello("set_durable")
client.sasl_auth_plain(user=sys.argv[2], password=sys.argv[3])
client.bucket_select(sys.argv[4])
op = sys.argv[5]
key = sys.argv[6]
if len(sys.argv) > 7:
value = sys.argv[7]
if op == "get":
print (client.get(key))
elif op == "set":
print (client.set(key, 0, 0, value))
elif op == "setD":
print (client.setDurable(key, 0, 0, value))
elif op == "add":
print (client.add(key, 0, 0, value))
elif op == "addD":
print (client.addDurable(key, 0, 0, value))
elif op == "replace":
print (client.replace(key, 0, 0, value))
elif op == "replaceD":
print (client.replaceDurable(key, 0, 0, value))
elif op == "delete":
print (client.delete(key, 0, 0))
elif op == "deleteD":
print (client.deleteDurable(key, 0, 0))
else:
print("Unknown op '" + op + "'", file=sys.stderr)
|
Add basic CLI for SyncWrite requests
|
[SR] Add basic CLI for SyncWrite requests
Add sync_repl.py; a basic command-line interface to making Synchronous
Replication requests.
Example usage - to perform a set with durability requirements to key
'test' setting its value to 'XXX':
./sync_repl.py localhost:12000 Administrator asdasd default setD test XXX
Change-Id: Ie54fdc8151b11e53bbbe360b0363790a9cdd5889
Reviewed-on: http://review.couchbase.org/103444
Reviewed-by: Trond Norbye <60edd2ef23891a753f231b0c6f161dc634079a93@gmail.com>
Reviewed-by: Paolo Cocchi <9dd88c9f3e5cbab6e19a6c020f107e0c648ac956@couchbase.com>
Tested-by: Dave Rigby <a09264da4832c7ff1d3bf1608a19f4b870f93750@couchbase.com>
|
Python
|
bsd-3-clause
|
daverigby/kv_engine,daverigby/kv_engine,daverigby/kv_engine,daverigby/kv_engine
|
[SR] Add basic CLI for SyncWrite requests
Add sync_repl.py; a basic command-line interface to making Synchronous
Replication requests.
Example usage - to perform a set with durability requirements to key
'test' setting its value to 'XXX':
./sync_repl.py localhost:12000 Administrator asdasd default setD test XXX
Change-Id: Ie54fdc8151b11e53bbbe360b0363790a9cdd5889
Reviewed-on: http://review.couchbase.org/103444
Reviewed-by: Trond Norbye <60edd2ef23891a753f231b0c6f161dc634079a93@gmail.com>
Reviewed-by: Paolo Cocchi <9dd88c9f3e5cbab6e19a6c020f107e0c648ac956@couchbase.com>
Tested-by: Dave Rigby <a09264da4832c7ff1d3bf1608a19f4b870f93750@couchbase.com>
|
#!/usr/bin/env python
""" Simple CLI for basic SyncWrite operations."""
from __future__ import print_function
from collections import defaultdict
import mc_bin_client
import sys
if len(sys.argv) < 7:
print("Usage: {} <host[:port]> <user> <password> <bucket> <op> <key> [value]".format(sys.argv[0]), file = sys.stderr)
sys.exit(1)
(host, port) = sys.argv[1].split(":")
if not port:
port = 11210
client = mc_bin_client.MemcachedClient(host=host, port=port)
client.enable_xerror()
client.hello("set_durable")
client.sasl_auth_plain(user=sys.argv[2], password=sys.argv[3])
client.bucket_select(sys.argv[4])
op = sys.argv[5]
key = sys.argv[6]
if len(sys.argv) > 7:
value = sys.argv[7]
if op == "get":
print (client.get(key))
elif op == "set":
print (client.set(key, 0, 0, value))
elif op == "setD":
print (client.setDurable(key, 0, 0, value))
elif op == "add":
print (client.add(key, 0, 0, value))
elif op == "addD":
print (client.addDurable(key, 0, 0, value))
elif op == "replace":
print (client.replace(key, 0, 0, value))
elif op == "replaceD":
print (client.replaceDurable(key, 0, 0, value))
elif op == "delete":
print (client.delete(key, 0, 0))
elif op == "deleteD":
print (client.deleteDurable(key, 0, 0))
else:
print("Unknown op '" + op + "'", file=sys.stderr)
|
<commit_before><commit_msg>[SR] Add basic CLI for SyncWrite requests
Add sync_repl.py; a basic command-line interface to making Synchronous
Replication requests.
Example usage - to perform a set with durability requirements to key
'test' setting its value to 'XXX':
./sync_repl.py localhost:12000 Administrator asdasd default setD test XXX
Change-Id: Ie54fdc8151b11e53bbbe360b0363790a9cdd5889
Reviewed-on: http://review.couchbase.org/103444
Reviewed-by: Trond Norbye <60edd2ef23891a753f231b0c6f161dc634079a93@gmail.com>
Reviewed-by: Paolo Cocchi <9dd88c9f3e5cbab6e19a6c020f107e0c648ac956@couchbase.com>
Tested-by: Dave Rigby <a09264da4832c7ff1d3bf1608a19f4b870f93750@couchbase.com><commit_after>
|
#!/usr/bin/env python
""" Simple CLI for basic SyncWrite operations."""
from __future__ import print_function
from collections import defaultdict
import mc_bin_client
import sys
if len(sys.argv) < 7:
print("Usage: {} <host[:port]> <user> <password> <bucket> <op> <key> [value]".format(sys.argv[0]), file = sys.stderr)
sys.exit(1)
(host, port) = sys.argv[1].split(":")
if not port:
port = 11210
client = mc_bin_client.MemcachedClient(host=host, port=port)
client.enable_xerror()
client.hello("set_durable")
client.sasl_auth_plain(user=sys.argv[2], password=sys.argv[3])
client.bucket_select(sys.argv[4])
op = sys.argv[5]
key = sys.argv[6]
if len(sys.argv) > 7:
value = sys.argv[7]
if op == "get":
print (client.get(key))
elif op == "set":
print (client.set(key, 0, 0, value))
elif op == "setD":
print (client.setDurable(key, 0, 0, value))
elif op == "add":
print (client.add(key, 0, 0, value))
elif op == "addD":
print (client.addDurable(key, 0, 0, value))
elif op == "replace":
print (client.replace(key, 0, 0, value))
elif op == "replaceD":
print (client.replaceDurable(key, 0, 0, value))
elif op == "delete":
print (client.delete(key, 0, 0))
elif op == "deleteD":
print (client.deleteDurable(key, 0, 0))
else:
print("Unknown op '" + op + "'", file=sys.stderr)
|
[SR] Add basic CLI for SyncWrite requests
Add sync_repl.py; a basic command-line interface to making Synchronous
Replication requests.
Example usage - to perform a set with durability requirements to key
'test' setting its value to 'XXX':
./sync_repl.py localhost:12000 Administrator asdasd default setD test XXX
Change-Id: Ie54fdc8151b11e53bbbe360b0363790a9cdd5889
Reviewed-on: http://review.couchbase.org/103444
Reviewed-by: Trond Norbye <60edd2ef23891a753f231b0c6f161dc634079a93@gmail.com>
Reviewed-by: Paolo Cocchi <9dd88c9f3e5cbab6e19a6c020f107e0c648ac956@couchbase.com>
Tested-by: Dave Rigby <a09264da4832c7ff1d3bf1608a19f4b870f93750@couchbase.com>#!/usr/bin/env python
""" Simple CLI for basic SyncWrite operations."""
from __future__ import print_function
from collections import defaultdict
import mc_bin_client
import sys
if len(sys.argv) < 7:
print("Usage: {} <host[:port]> <user> <password> <bucket> <op> <key> [value]".format(sys.argv[0]), file = sys.stderr)
sys.exit(1)
(host, port) = sys.argv[1].split(":")
if not port:
port = 11210
client = mc_bin_client.MemcachedClient(host=host, port=port)
client.enable_xerror()
client.hello("set_durable")
client.sasl_auth_plain(user=sys.argv[2], password=sys.argv[3])
client.bucket_select(sys.argv[4])
op = sys.argv[5]
key = sys.argv[6]
if len(sys.argv) > 7:
value = sys.argv[7]
if op == "get":
print (client.get(key))
elif op == "set":
print (client.set(key, 0, 0, value))
elif op == "setD":
print (client.setDurable(key, 0, 0, value))
elif op == "add":
print (client.add(key, 0, 0, value))
elif op == "addD":
print (client.addDurable(key, 0, 0, value))
elif op == "replace":
print (client.replace(key, 0, 0, value))
elif op == "replaceD":
print (client.replaceDurable(key, 0, 0, value))
elif op == "delete":
print (client.delete(key, 0, 0))
elif op == "deleteD":
print (client.deleteDurable(key, 0, 0))
else:
print("Unknown op '" + op + "'", file=sys.stderr)
|
<commit_before><commit_msg>[SR] Add basic CLI for SyncWrite requests
Add sync_repl.py; a basic command-line interface to making Synchronous
Replication requests.
Example usage - to perform a set with durability requirements to key
'test' setting its value to 'XXX':
./sync_repl.py localhost:12000 Administrator asdasd default setD test XXX
Change-Id: Ie54fdc8151b11e53bbbe360b0363790a9cdd5889
Reviewed-on: http://review.couchbase.org/103444
Reviewed-by: Trond Norbye <60edd2ef23891a753f231b0c6f161dc634079a93@gmail.com>
Reviewed-by: Paolo Cocchi <9dd88c9f3e5cbab6e19a6c020f107e0c648ac956@couchbase.com>
Tested-by: Dave Rigby <a09264da4832c7ff1d3bf1608a19f4b870f93750@couchbase.com><commit_after>#!/usr/bin/env python
""" Simple CLI for basic SyncWrite operations."""
from __future__ import print_function
from collections import defaultdict
import mc_bin_client
import sys
if len(sys.argv) < 7:
print("Usage: {} <host[:port]> <user> <password> <bucket> <op> <key> [value]".format(sys.argv[0]), file = sys.stderr)
sys.exit(1)
(host, port) = sys.argv[1].split(":")
if not port:
port = 11210
client = mc_bin_client.MemcachedClient(host=host, port=port)
client.enable_xerror()
client.hello("set_durable")
client.sasl_auth_plain(user=sys.argv[2], password=sys.argv[3])
client.bucket_select(sys.argv[4])
op = sys.argv[5]
key = sys.argv[6]
if len(sys.argv) > 7:
value = sys.argv[7]
if op == "get":
print (client.get(key))
elif op == "set":
print (client.set(key, 0, 0, value))
elif op == "setD":
print (client.setDurable(key, 0, 0, value))
elif op == "add":
print (client.add(key, 0, 0, value))
elif op == "addD":
print (client.addDurable(key, 0, 0, value))
elif op == "replace":
print (client.replace(key, 0, 0, value))
elif op == "replaceD":
print (client.replaceDurable(key, 0, 0, value))
elif op == "delete":
print (client.delete(key, 0, 0))
elif op == "deleteD":
print (client.deleteDurable(key, 0, 0))
else:
print("Unknown op '" + op + "'", file=sys.stderr)
|
|
e87f5c80d7723480544374c175cacc5540f9f6ab
|
Lib/test/leakers/test_generator_cycle.py
|
Lib/test/leakers/test_generator_cycle.py
|
# This leaks since the introduction of yield-expr and the use of generators
# as coroutines, trunk revision 39239. The cycle-GC doesn't seem to pick up
# the cycle, or decides it can't clean it up.
def leak():
def gen():
while True:
yield g
g = gen()
|
Add an example of a generator->freevar->cell->generator reference-cycle that doesn't get cleaned up and thus leaks.
|
Add an example of a generator->freevar->cell->generator reference-cycle that
doesn't get cleaned up and thus leaks.
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Add an example of a generator->freevar->cell->generator reference-cycle that
doesn't get cleaned up and thus leaks.
|
# This leaks since the introduction of yield-expr and the use of generators
# as coroutines, trunk revision 39239. The cycle-GC doesn't seem to pick up
# the cycle, or decides it can't clean it up.
def leak():
def gen():
while True:
yield g
g = gen()
|
<commit_before><commit_msg>Add an example of a generator->freevar->cell->generator reference-cycle that
doesn't get cleaned up and thus leaks.<commit_after>
|
# This leaks since the introduction of yield-expr and the use of generators
# as coroutines, trunk revision 39239. The cycle-GC doesn't seem to pick up
# the cycle, or decides it can't clean it up.
def leak():
def gen():
while True:
yield g
g = gen()
|
Add an example of a generator->freevar->cell->generator reference-cycle that
doesn't get cleaned up and thus leaks.
# This leaks since the introduction of yield-expr and the use of generators
# as coroutines, trunk revision 39239. The cycle-GC doesn't seem to pick up
# the cycle, or decides it can't clean it up.
def leak():
def gen():
while True:
yield g
g = gen()
|
<commit_before><commit_msg>Add an example of a generator->freevar->cell->generator reference-cycle that
doesn't get cleaned up and thus leaks.<commit_after>
# This leaks since the introduction of yield-expr and the use of generators
# as coroutines, trunk revision 39239. The cycle-GC doesn't seem to pick up
# the cycle, or decides it can't clean it up.
def leak():
def gen():
while True:
yield g
g = gen()
|
|
7529bbffd349c57d3ec527a91b620252d0814414
|
kolibri/core/test/test_key_urls.py
|
kolibri/core/test/test_key_urls.py
|
from __future__ import absolute_import, print_function, unicode_literals
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from kolibri.auth.models import Facility, DeviceOwner
class KolibriTagNavigationTestCase(APITestCase):
def test_redirect_to_setup_wizard(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.get("location"), reverse('kolibri:setupwizardplugin:setupwizard'))
def test_redirect_to_learn_root(self):
Facility.objects.create(name="QQQ")
DeviceOwner.objects.create(username="admin", password="***")
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.get("location"), reverse('kolibri:learnplugin:learn'))
|
Test that redirects from root URL are performed appropriately.
|
Test that redirects from root URL are performed appropriately.
|
Python
|
mit
|
mrpau/kolibri,rtibbles/kolibri,christianmemije/kolibri,jonboiser/kolibri,benjaoming/kolibri,jayoshih/kolibri,jtamiace/kolibri,learningequality/kolibri,jonboiser/kolibri,christianmemije/kolibri,jamalex/kolibri,DXCanas/kolibri,aronasorman/kolibri,whitzhu/kolibri,DXCanas/kolibri,lyw07/kolibri,MingDai/kolibri,lyw07/kolibri,whitzhu/kolibri,MingDai/kolibri,indirectlylit/kolibri,lyw07/kolibri,66eli77/kolibri,mrpau/kolibri,rtibbles/kolibri,mrpau/kolibri,aronasorman/kolibri,ralphiee22/kolibri,ralphiee22/kolibri,indirectlylit/kolibri,aronasorman/kolibri,DXCanas/kolibri,lyw07/kolibri,learningequality/kolibri,indirectlylit/kolibri,learningequality/kolibri,benjaoming/kolibri,ralphiee22/kolibri,66eli77/kolibri,jamalex/kolibri,rtibbles/kolibri,christianmemije/kolibri,benjaoming/kolibri,jtamiace/kolibri,jayoshih/kolibri,jtamiace/kolibri,jamalex/kolibri,DXCanas/kolibri,jonboiser/kolibri,benjaoming/kolibri,aronasorman/kolibri,66eli77/kolibri,MingDai/kolibri,jamalex/kolibri,jayoshih/kolibri,MingDai/kolibri,jayoshih/kolibri,jonboiser/kolibri,rtibbles/kolibri,christianmemije/kolibri,66eli77/kolibri,indirectlylit/kolibri,whitzhu/kolibri,ralphiee22/kolibri,jtamiace/kolibri,whitzhu/kolibri,mrpau/kolibri,learningequality/kolibri
|
Test that redirects from root URL are performed appropriately.
|
from __future__ import absolute_import, print_function, unicode_literals
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from kolibri.auth.models import Facility, DeviceOwner
class KolibriTagNavigationTestCase(APITestCase):
def test_redirect_to_setup_wizard(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.get("location"), reverse('kolibri:setupwizardplugin:setupwizard'))
def test_redirect_to_learn_root(self):
Facility.objects.create(name="QQQ")
DeviceOwner.objects.create(username="admin", password="***")
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.get("location"), reverse('kolibri:learnplugin:learn'))
|
<commit_before><commit_msg>Test that redirects from root URL are performed appropriately.<commit_after>
|
from __future__ import absolute_import, print_function, unicode_literals
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from kolibri.auth.models import Facility, DeviceOwner
class KolibriTagNavigationTestCase(APITestCase):
def test_redirect_to_setup_wizard(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.get("location"), reverse('kolibri:setupwizardplugin:setupwizard'))
def test_redirect_to_learn_root(self):
Facility.objects.create(name="QQQ")
DeviceOwner.objects.create(username="admin", password="***")
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.get("location"), reverse('kolibri:learnplugin:learn'))
|
Test that redirects from root URL are performed appropriately.from __future__ import absolute_import, print_function, unicode_literals
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from kolibri.auth.models import Facility, DeviceOwner
class KolibriTagNavigationTestCase(APITestCase):
def test_redirect_to_setup_wizard(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.get("location"), reverse('kolibri:setupwizardplugin:setupwizard'))
def test_redirect_to_learn_root(self):
Facility.objects.create(name="QQQ")
DeviceOwner.objects.create(username="admin", password="***")
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.get("location"), reverse('kolibri:learnplugin:learn'))
|
<commit_before><commit_msg>Test that redirects from root URL are performed appropriately.<commit_after>from __future__ import absolute_import, print_function, unicode_literals
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from kolibri.auth.models import Facility, DeviceOwner
class KolibriTagNavigationTestCase(APITestCase):
def test_redirect_to_setup_wizard(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.get("location"), reverse('kolibri:setupwizardplugin:setupwizard'))
def test_redirect_to_learn_root(self):
Facility.objects.create(name="QQQ")
DeviceOwner.objects.create(username="admin", password="***")
response = self.client.get("/")
self.assertEqual(response.status_code, 302)
self.assertEqual(response.get("location"), reverse('kolibri:learnplugin:learn'))
|
|
8b778a0564d424fc4c9e91da90ced7e8cec9b002
|
lava/utils/data_tables/backends.py
|
lava/utils/data_tables/backends.py
|
# Copyright (C) 2012 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of LAVA Server.
#
# LAVA Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation
#
# LAVA Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LAVA Server. If not, see <http://www.gnu.org/licenses/>.
from django.core.exceptions import ImproperlyConfigured
from lava.utils.data_tables.interface import IBackend
class _BackendBase(IBackend):
"""
Common code for data backends to data tables
"""
def process(self, query):
return {
'sEcho': query.sEcho,
'sColumns': query.sColumns
}
class ArrayBackend(_BackendBase):
"""
Array backend to data tables
Stores all data in a plain python list. All filtering is handled in the
running process. It is suitable for very small data sets only but has the
advantage of being unrelated to any databases.
"""
def __init__(self, data):
self.data = data
def process(self, query):
# Get the basic response structure
response = super(ArrayBackend, self).process(query)
# 0) Copy original data
# TODO: add support for lazy copy (only if really needed)
data = list(self.data)
response['iTotalRecords'] = len(data)
# 1) Apply search/filtering
if query.sSearch:
if query.bRegex:
raise NotImplementedError("Searching with regular expresions is not implemented")
else:
data = [row for row in data if any((query.sSearch in unicode(cell) for cell in row))]
# Remember how many records matched filtering
response['iTotalDisplayRecords'] = len(data)
# TODO: Support regex search
# TODO: Support per-column search
# 2) Apply sorting
for column_index, order in query.sorting_columns:
data.sort(key=lambda row: row[column_index], reverse=order=='desc')
# 3) Apply offset/limit
data = data[query.iDisplayStart:query.iDisplayStart + query.iDisplayLength]
# Remember the subset of the displayed data
response['aaData'] = data
return response
|
Add array backend for data tables
|
Add array backend for data tables
This simple backend performs all operations inside python's runtime
memory. It is suitable for working with small data sets and experimenting
with data-tables.
The actual backend is not fully finished as it is not essential. It lacks
per-column search, regexp support and multi-column sort (it is broken as
each sort operation is applied in reversed order)
|
Python
|
agpl-3.0
|
OSSystems/lava-server,Linaro/lava-server,OSSystems/lava-server,OSSystems/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server
|
Add array backend for data tables
This simple backend performs all operations inside python's runtime
memory. It is suitable for working with small data sets and experimenting
with data-tables.
The actual backend is not fully finished as it is not essential. It lacks
per-column search, regexp support and multi-column sort (it is broken as
each sort operation is applied in reversed order)
|
# Copyright (C) 2012 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of LAVA Server.
#
# LAVA Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation
#
# LAVA Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LAVA Server. If not, see <http://www.gnu.org/licenses/>.
from django.core.exceptions import ImproperlyConfigured
from lava.utils.data_tables.interface import IBackend
class _BackendBase(IBackend):
"""
Common code for data backends to data tables
"""
def process(self, query):
return {
'sEcho': query.sEcho,
'sColumns': query.sColumns
}
class ArrayBackend(_BackendBase):
"""
Array backend to data tables
Stores all data in a plain python list. All filtering is handled in the
running process. It is suitable for very small data sets only but has the
advantage of being unrelated to any databases.
"""
def __init__(self, data):
self.data = data
def process(self, query):
# Get the basic response structure
response = super(ArrayBackend, self).process(query)
# 0) Copy original data
# TODO: add support for lazy copy (only if really needed)
data = list(self.data)
response['iTotalRecords'] = len(data)
# 1) Apply search/filtering
if query.sSearch:
if query.bRegex:
raise NotImplementedError("Searching with regular expresions is not implemented")
else:
data = [row for row in data if any((query.sSearch in unicode(cell) for cell in row))]
# Remember how many records matched filtering
response['iTotalDisplayRecords'] = len(data)
# TODO: Support regex search
# TODO: Support per-column search
# 2) Apply sorting
for column_index, order in query.sorting_columns:
data.sort(key=lambda row: row[column_index], reverse=order=='desc')
# 3) Apply offset/limit
data = data[query.iDisplayStart:query.iDisplayStart + query.iDisplayLength]
# Remember the subset of the displayed data
response['aaData'] = data
return response
|
<commit_before><commit_msg>Add array backend for data tables
This simple backend performs all operations inside python's runtime
memory. It is suitable for working with small data sets and experimenting
with data-tables.
The actual backend is not fully finished as it is not essential. It lacks
per-column search, regexp support and multi-column sort (it is broken as
each sort operation is applied in reversed order)<commit_after>
|
# Copyright (C) 2012 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of LAVA Server.
#
# LAVA Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation
#
# LAVA Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LAVA Server. If not, see <http://www.gnu.org/licenses/>.
from django.core.exceptions import ImproperlyConfigured
from lava.utils.data_tables.interface import IBackend
class _BackendBase(IBackend):
"""
Common code for data backends to data tables
"""
def process(self, query):
return {
'sEcho': query.sEcho,
'sColumns': query.sColumns
}
class ArrayBackend(_BackendBase):
"""
Array backend to data tables
Stores all data in a plain python list. All filtering is handled in the
running process. It is suitable for very small data sets only but has the
advantage of being unrelated to any databases.
"""
def __init__(self, data):
self.data = data
def process(self, query):
# Get the basic response structure
response = super(ArrayBackend, self).process(query)
# 0) Copy original data
# TODO: add support for lazy copy (only if really needed)
data = list(self.data)
response['iTotalRecords'] = len(data)
# 1) Apply search/filtering
if query.sSearch:
if query.bRegex:
raise NotImplementedError("Searching with regular expresions is not implemented")
else:
data = [row for row in data if any((query.sSearch in unicode(cell) for cell in row))]
# Remember how many records matched filtering
response['iTotalDisplayRecords'] = len(data)
# TODO: Support regex search
# TODO: Support per-column search
# 2) Apply sorting
for column_index, order in query.sorting_columns:
data.sort(key=lambda row: row[column_index], reverse=order=='desc')
# 3) Apply offset/limit
data = data[query.iDisplayStart:query.iDisplayStart + query.iDisplayLength]
# Remember the subset of the displayed data
response['aaData'] = data
return response
|
Add array backend for data tables
This simple backend performs all operations inside python's runtime
memory. It is suitable for working with small data sets and experimenting
with data-tables.
The actual backend is not fully finished as it is not essential. It lacks
per-column search, regexp support and multi-column sort (it is broken as
each sort operation is applied in reversed order)# Copyright (C) 2012 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of LAVA Server.
#
# LAVA Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation
#
# LAVA Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LAVA Server. If not, see <http://www.gnu.org/licenses/>.
from django.core.exceptions import ImproperlyConfigured
from lava.utils.data_tables.interface import IBackend
class _BackendBase(IBackend):
"""
Common code for data backends to data tables
"""
def process(self, query):
return {
'sEcho': query.sEcho,
'sColumns': query.sColumns
}
class ArrayBackend(_BackendBase):
"""
Array backend to data tables
Stores all data in a plain python list. All filtering is handled in the
running process. It is suitable for very small data sets only but has the
advantage of being unrelated to any databases.
"""
def __init__(self, data):
self.data = data
def process(self, query):
# Get the basic response structure
response = super(ArrayBackend, self).process(query)
# 0) Copy original data
# TODO: add support for lazy copy (only if really needed)
data = list(self.data)
response['iTotalRecords'] = len(data)
# 1) Apply search/filtering
if query.sSearch:
if query.bRegex:
raise NotImplementedError("Searching with regular expresions is not implemented")
else:
data = [row for row in data if any((query.sSearch in unicode(cell) for cell in row))]
# Remember how many records matched filtering
response['iTotalDisplayRecords'] = len(data)
# TODO: Support regex search
# TODO: Support per-column search
# 2) Apply sorting
for column_index, order in query.sorting_columns:
data.sort(key=lambda row: row[column_index], reverse=order=='desc')
# 3) Apply offset/limit
data = data[query.iDisplayStart:query.iDisplayStart + query.iDisplayLength]
# Remember the subset of the displayed data
response['aaData'] = data
return response
|
<commit_before><commit_msg>Add array backend for data tables
This simple backend performs all operations inside python's runtime
memory. It is suitable for working with small data sets and experimenting
with data-tables.
The actual backend is not fully finished as it is not essential. It lacks
per-column search, regexp support and multi-column sort (it is broken as
each sort operation is applied in reversed order)<commit_after># Copyright (C) 2012 Linaro Limited
#
# Author: Zygmunt Krynicki <zygmunt.krynicki@linaro.org>
#
# This file is part of LAVA Server.
#
# LAVA Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation
#
# LAVA Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with LAVA Server. If not, see <http://www.gnu.org/licenses/>.
from django.core.exceptions import ImproperlyConfigured
from lava.utils.data_tables.interface import IBackend
class _BackendBase(IBackend):
"""
Common code for data backends to data tables
"""
def process(self, query):
return {
'sEcho': query.sEcho,
'sColumns': query.sColumns
}
class ArrayBackend(_BackendBase):
"""
Array backend to data tables
Stores all data in a plain python list. All filtering is handled in the
running process. It is suitable for very small data sets only but has the
advantage of being unrelated to any databases.
"""
def __init__(self, data):
self.data = data
def process(self, query):
# Get the basic response structure
response = super(ArrayBackend, self).process(query)
# 0) Copy original data
# TODO: add support for lazy copy (only if really needed)
data = list(self.data)
response['iTotalRecords'] = len(data)
# 1) Apply search/filtering
if query.sSearch:
if query.bRegex:
raise NotImplementedError("Searching with regular expresions is not implemented")
else:
data = [row for row in data if any((query.sSearch in unicode(cell) for cell in row))]
# Remember how many records matched filtering
response['iTotalDisplayRecords'] = len(data)
# TODO: Support regex search
# TODO: Support per-column search
# 2) Apply sorting
for column_index, order in query.sorting_columns:
data.sort(key=lambda row: row[column_index], reverse=order=='desc')
# 3) Apply offset/limit
data = data[query.iDisplayStart:query.iDisplayStart + query.iDisplayLength]
# Remember the subset of the displayed data
response['aaData'] = data
return response
|
|
b635b6f17e8fceba72e48ab074120d3bddd9388d
|
tools/process_EXO.py
|
tools/process_EXO.py
|
# EXO Convesion Script
# Using specification v1.01
# J.C. Loach (2013)
# Textual replacements
remove = {r"\centering":"", r"newline":"", r"tabular":""};
# Main loop through data file
with open("exo_data.txt") as f_in:
for line in f_in:
for i, j in remove.iteritems():
line = line.replace(i,j)
line = line.split("&")
line = [i.strip() for i in line]
with open("EXO_" + line[0].zfill(3) + "_v1.01.json", 'w') as f_out:
f_out.write("{\n")
f_out.write("\n")
f_out.write(" \"type\": \"measurement\",\n")
f_out.write("\n")
f_out.write(" \"grouping\": \"EXO(2008)\",\n")
f_out.write("\n")
# Sample
f_out.write(" \"sample\": {\n")
f_out.write(" \"m_name\": \"" + line[1] + "\",\n")
f_out.write(" \"m_description\": \"" + line[1] + "\",\n")
f_out.write(" \"m_id\": \"Table 3 Measurement " + line[0] + "\",\n")
f_out.write(" \"m_source\": \"\",\n")
f_out.write(" \"m_owner\": {\n")
f_out.write(" \"name\": \"\",\n")
f_out.write(" \"contact\": \"\"\n")
f_out.write(" }\n")
f_out.write(" },\n")
f_out.write("\n")
# Measurement
# ...
# Data source
# ...
f_out.write("}\n")
|
Add tool for converting EXO data from latex to JSON
|
Add tool for converting EXO data from latex to JSON
|
Python
|
apache-2.0
|
chrisstanford/persephone-darkside,nepahwin/persephone,nepahwin/persephone,chrisstanford/persephone-darkside
|
Add tool for converting EXO data from latex to JSON
|
# EXO Convesion Script
# Using specification v1.01
# J.C. Loach (2013)
# Textual replacements
remove = {r"\centering":"", r"newline":"", r"tabular":""};
# Main loop through data file
with open("exo_data.txt") as f_in:
for line in f_in:
for i, j in remove.iteritems():
line = line.replace(i,j)
line = line.split("&")
line = [i.strip() for i in line]
with open("EXO_" + line[0].zfill(3) + "_v1.01.json", 'w') as f_out:
f_out.write("{\n")
f_out.write("\n")
f_out.write(" \"type\": \"measurement\",\n")
f_out.write("\n")
f_out.write(" \"grouping\": \"EXO(2008)\",\n")
f_out.write("\n")
# Sample
f_out.write(" \"sample\": {\n")
f_out.write(" \"m_name\": \"" + line[1] + "\",\n")
f_out.write(" \"m_description\": \"" + line[1] + "\",\n")
f_out.write(" \"m_id\": \"Table 3 Measurement " + line[0] + "\",\n")
f_out.write(" \"m_source\": \"\",\n")
f_out.write(" \"m_owner\": {\n")
f_out.write(" \"name\": \"\",\n")
f_out.write(" \"contact\": \"\"\n")
f_out.write(" }\n")
f_out.write(" },\n")
f_out.write("\n")
# Measurement
# ...
# Data source
# ...
f_out.write("}\n")
|
<commit_before><commit_msg>Add tool for converting EXO data from latex to JSON<commit_after>
|
# EXO Convesion Script
# Using specification v1.01
# J.C. Loach (2013)
# Textual replacements
remove = {r"\centering":"", r"newline":"", r"tabular":""};
# Main loop through data file
with open("exo_data.txt") as f_in:
for line in f_in:
for i, j in remove.iteritems():
line = line.replace(i,j)
line = line.split("&")
line = [i.strip() for i in line]
with open("EXO_" + line[0].zfill(3) + "_v1.01.json", 'w') as f_out:
f_out.write("{\n")
f_out.write("\n")
f_out.write(" \"type\": \"measurement\",\n")
f_out.write("\n")
f_out.write(" \"grouping\": \"EXO(2008)\",\n")
f_out.write("\n")
# Sample
f_out.write(" \"sample\": {\n")
f_out.write(" \"m_name\": \"" + line[1] + "\",\n")
f_out.write(" \"m_description\": \"" + line[1] + "\",\n")
f_out.write(" \"m_id\": \"Table 3 Measurement " + line[0] + "\",\n")
f_out.write(" \"m_source\": \"\",\n")
f_out.write(" \"m_owner\": {\n")
f_out.write(" \"name\": \"\",\n")
f_out.write(" \"contact\": \"\"\n")
f_out.write(" }\n")
f_out.write(" },\n")
f_out.write("\n")
# Measurement
# ...
# Data source
# ...
f_out.write("}\n")
|
Add tool for converting EXO data from latex to JSON
# EXO Convesion Script
# Using specification v1.01
# J.C. Loach (2013)
# Textual replacements
remove = {r"\centering":"", r"newline":"", r"tabular":""};
# Main loop through data file
with open("exo_data.txt") as f_in:
for line in f_in:
for i, j in remove.iteritems():
line = line.replace(i,j)
line = line.split("&")
line = [i.strip() for i in line]
with open("EXO_" + line[0].zfill(3) + "_v1.01.json", 'w') as f_out:
f_out.write("{\n")
f_out.write("\n")
f_out.write(" \"type\": \"measurement\",\n")
f_out.write("\n")
f_out.write(" \"grouping\": \"EXO(2008)\",\n")
f_out.write("\n")
# Sample
f_out.write(" \"sample\": {\n")
f_out.write(" \"m_name\": \"" + line[1] + "\",\n")
f_out.write(" \"m_description\": \"" + line[1] + "\",\n")
f_out.write(" \"m_id\": \"Table 3 Measurement " + line[0] + "\",\n")
f_out.write(" \"m_source\": \"\",\n")
f_out.write(" \"m_owner\": {\n")
f_out.write(" \"name\": \"\",\n")
f_out.write(" \"contact\": \"\"\n")
f_out.write(" }\n")
f_out.write(" },\n")
f_out.write("\n")
# Measurement
# ...
# Data source
# ...
f_out.write("}\n")
|
<commit_before><commit_msg>Add tool for converting EXO data from latex to JSON<commit_after>
# EXO Convesion Script
# Using specification v1.01
# J.C. Loach (2013)
# Textual replacements
remove = {r"\centering":"", r"newline":"", r"tabular":""};
# Main loop through data file
with open("exo_data.txt") as f_in:
for line in f_in:
for i, j in remove.iteritems():
line = line.replace(i,j)
line = line.split("&")
line = [i.strip() for i in line]
with open("EXO_" + line[0].zfill(3) + "_v1.01.json", 'w') as f_out:
f_out.write("{\n")
f_out.write("\n")
f_out.write(" \"type\": \"measurement\",\n")
f_out.write("\n")
f_out.write(" \"grouping\": \"EXO(2008)\",\n")
f_out.write("\n")
# Sample
f_out.write(" \"sample\": {\n")
f_out.write(" \"m_name\": \"" + line[1] + "\",\n")
f_out.write(" \"m_description\": \"" + line[1] + "\",\n")
f_out.write(" \"m_id\": \"Table 3 Measurement " + line[0] + "\",\n")
f_out.write(" \"m_source\": \"\",\n")
f_out.write(" \"m_owner\": {\n")
f_out.write(" \"name\": \"\",\n")
f_out.write(" \"contact\": \"\"\n")
f_out.write(" }\n")
f_out.write(" },\n")
f_out.write("\n")
# Measurement
# ...
# Data source
# ...
f_out.write("}\n")
|
|
030e558b3b52900b8fa2cea9a92c055de3ec5b44
|
corehq/apps/domain/management/commands/migrate_domain_countries.py
|
corehq/apps/domain/management/commands/migrate_domain_countries.py
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if domain.deployment.country in country_lookup.keys():
abbr = [country_lookup[domain.deployment.country.lower()]]
else:
abbr = []
domain.deployment.country = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
Add management command to migrate countries to list
|
Add management command to migrate countries to list
|
Python
|
bsd-3-clause
|
puttarajubr/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq
|
Add management command to migrate countries to list
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if domain.deployment.country in country_lookup.keys():
abbr = [country_lookup[domain.deployment.country.lower()]]
else:
abbr = []
domain.deployment.country = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
<commit_before><commit_msg>Add management command to migrate countries to list<commit_after>
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if domain.deployment.country in country_lookup.keys():
abbr = [country_lookup[domain.deployment.country.lower()]]
else:
abbr = []
domain.deployment.country = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
Add management command to migrate countries to listfrom django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if domain.deployment.country in country_lookup.keys():
abbr = [country_lookup[domain.deployment.country.lower()]]
else:
abbr = []
domain.deployment.country = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
<commit_before><commit_msg>Add management command to migrate countries to list<commit_after>from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if domain.deployment.country in country_lookup.keys():
abbr = [country_lookup[domain.deployment.country.lower()]]
else:
abbr = []
domain.deployment.country = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
|
2e9c58f00db55e12f3a9df1a71fe33fbe440d7ce
|
corehq/apps/domain/management/commands/migrate_domain_countries.py
|
corehq/apps/domain/management/commands/migrate_domain_countries.py
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if domain.deployment.country in country_lookup.keys():
abbr = [country_lookup[domain.deployment.country.lower()]]
else:
abbr = []
domain.deployment.country = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if ',' in domain.deployment.country:
countries = domain.deployment.country.split(',')
elif ' and ' in domain.deployment.country:
countries = domain.deployment.country.split(' and ')
else:
countries = [domain.deployment.country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
Add migration for deployment country to countries
|
Add migration for deployment country to countries
|
Python
|
bsd-3-clause
|
qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if domain.deployment.country in country_lookup.keys():
abbr = [country_lookup[domain.deployment.country.lower()]]
else:
abbr = []
domain.deployment.country = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
Add migration for deployment country to countries
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if ',' in domain.deployment.country:
countries = domain.deployment.country.split(',')
elif ' and ' in domain.deployment.country:
countries = domain.deployment.country.split(' and ')
else:
countries = [domain.deployment.country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
<commit_before>from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if domain.deployment.country in country_lookup.keys():
abbr = [country_lookup[domain.deployment.country.lower()]]
else:
abbr = []
domain.deployment.country = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
<commit_msg>Add migration for deployment country to countries<commit_after>
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if ',' in domain.deployment.country:
countries = domain.deployment.country.split(',')
elif ' and ' in domain.deployment.country:
countries = domain.deployment.country.split(' and ')
else:
countries = [domain.deployment.country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if domain.deployment.country in country_lookup.keys():
abbr = [country_lookup[domain.deployment.country.lower()]]
else:
abbr = []
domain.deployment.country = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
Add migration for deployment country to countriesfrom django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if ',' in domain.deployment.country:
countries = domain.deployment.country.split(',')
elif ' and ' in domain.deployment.country:
countries = domain.deployment.country.split(' and ')
else:
countries = [domain.deployment.country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
<commit_before>from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if domain.deployment.country in country_lookup.keys():
abbr = [country_lookup[domain.deployment.country.lower()]]
else:
abbr = []
domain.deployment.country = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
<commit_msg>Add migration for deployment country to countries<commit_after>from django.core.management.base import LabelCommand
from django_countries.countries import COUNTRIES
from corehq.apps.domain.models import Domain
class Command(LabelCommand):
help = "Migrates old django domain countries from string to list. Sept 2014."
args = ""
label = ""
def handle(self, *args, **options):
print "Migrating Domain countries"
country_lookup = {x[1].lower(): x[0] for x in COUNTRIES}
#Special cases
country_lookup["USA"] = country_lookup["united states"]
country_lookup["California"] = country_lookup["united states"]
country_lookup["Wales"] = country_lookup["united kingdom"]
for domain in Domain.get_all():
try:
if isinstance(domain.deployment.country, basestring):
if ',' in domain.deployment.country:
countries = domain.deployment.country.split(',')
elif ' and ' in domain.deployment.country:
countries = domain.deployment.country.split(' and ')
else:
countries = [domain.deployment.country]
abbr = []
for country in countries:
country = country.strip().lower()
if country in country_lookup.keys():
abbr.append(country_lookup[country])
domain.deployment.countries = abbr
domain.save()
except Exception as e:
print "There was an error migrating the domain named %s." % domain.name
print "Error: %s", e
|
5b251c46581e286f05e02ec2363b9c83a228f3fb
|
data_collection/social_media/twitter/spellchecking/get_category.py
|
data_collection/social_media/twitter/spellchecking/get_category.py
|
#!/usr/bin/env python2
# Install nltk: 'sudo pip install -U pyyaml nltk'
from nltk.stem.lancaster import LancasterStemmer
c = {'cname1': ['hello', 'rice', 'increasing'], 'cname2': ['horses', 'horse']}
c_stems = {}
st = LancasterStemmer()
# 1. Build set for existing categories
def init_stem_sets():
for cname in c:
word_list = c[cname]
stem_set = set()
for word in word_list:
stem_set.add(st.stem(word))
c_stems[cname] = stem_set
# 2. Get a category for a given word
def get_category(w):
w_stem = st.stem(w)
for cname in c:
stem_set = c_stems[cname]
if w_stem in stem_set:
return cname
return None
init_stem_sets()
print c_stems
print get_category('increases')
|
Use NLTK to get word stems (useful for categorization)
|
Use NLTK to get word stems (useful for categorization)
|
Python
|
bsd-3-clause
|
FAB4D/humanitas,FAB4D/humanitas,FAB4D/humanitas
|
Use NLTK to get word stems (useful for categorization)
|
#!/usr/bin/env python2
# Install nltk: 'sudo pip install -U pyyaml nltk'
from nltk.stem.lancaster import LancasterStemmer
c = {'cname1': ['hello', 'rice', 'increasing'], 'cname2': ['horses', 'horse']}
c_stems = {}
st = LancasterStemmer()
# 1. Build set for existing categories
def init_stem_sets():
for cname in c:
word_list = c[cname]
stem_set = set()
for word in word_list:
stem_set.add(st.stem(word))
c_stems[cname] = stem_set
# 2. Get a category for a given word
def get_category(w):
w_stem = st.stem(w)
for cname in c:
stem_set = c_stems[cname]
if w_stem in stem_set:
return cname
return None
init_stem_sets()
print c_stems
print get_category('increases')
|
<commit_before><commit_msg>Use NLTK to get word stems (useful for categorization)<commit_after>
|
#!/usr/bin/env python2
# Install nltk: 'sudo pip install -U pyyaml nltk'
from nltk.stem.lancaster import LancasterStemmer
c = {'cname1': ['hello', 'rice', 'increasing'], 'cname2': ['horses', 'horse']}
c_stems = {}
st = LancasterStemmer()
# 1. Build set for existing categories
def init_stem_sets():
for cname in c:
word_list = c[cname]
stem_set = set()
for word in word_list:
stem_set.add(st.stem(word))
c_stems[cname] = stem_set
# 2. Get a category for a given word
def get_category(w):
w_stem = st.stem(w)
for cname in c:
stem_set = c_stems[cname]
if w_stem in stem_set:
return cname
return None
init_stem_sets()
print c_stems
print get_category('increases')
|
Use NLTK to get word stems (useful for categorization)#!/usr/bin/env python2
# Install nltk: 'sudo pip install -U pyyaml nltk'
from nltk.stem.lancaster import LancasterStemmer
c = {'cname1': ['hello', 'rice', 'increasing'], 'cname2': ['horses', 'horse']}
c_stems = {}
st = LancasterStemmer()
# 1. Build set for existing categories
def init_stem_sets():
for cname in c:
word_list = c[cname]
stem_set = set()
for word in word_list:
stem_set.add(st.stem(word))
c_stems[cname] = stem_set
# 2. Get a category for a given word
def get_category(w):
w_stem = st.stem(w)
for cname in c:
stem_set = c_stems[cname]
if w_stem in stem_set:
return cname
return None
init_stem_sets()
print c_stems
print get_category('increases')
|
<commit_before><commit_msg>Use NLTK to get word stems (useful for categorization)<commit_after>#!/usr/bin/env python2
# Install nltk: 'sudo pip install -U pyyaml nltk'
from nltk.stem.lancaster import LancasterStemmer
c = {'cname1': ['hello', 'rice', 'increasing'], 'cname2': ['horses', 'horse']}
c_stems = {}
st = LancasterStemmer()
# 1. Build set for existing categories
def init_stem_sets():
for cname in c:
word_list = c[cname]
stem_set = set()
for word in word_list:
stem_set.add(st.stem(word))
c_stems[cname] = stem_set
# 2. Get a category for a given word
def get_category(w):
w_stem = st.stem(w)
for cname in c:
stem_set = c_stems[cname]
if w_stem in stem_set:
return cname
return None
init_stem_sets()
print c_stems
print get_category('increases')
|
|
7ae164a58e229379f0acf674767bdb0b422cf13a
|
emission/net/ext_service/otp/test_otp.py
|
emission/net/ext_service/otp/test_otp.py
|
import unittest
import random
import datetime
import emission.net.ext_service.otp.otp as otp
import emission.core.wrapper.location as ecwl
import emission.storage.decorations.local_date_queries as ecsdlq
import emission.core.wrapper.user as ecwu
from past.utils import old_div
class TestOTPMethods(unittest.TestCase):
def setUp(self):
start_point = (37.77264255,-122.399714854263)
end_point = (37.42870635,-122.140926605802)
mode = "TRANSIT"
curr_time = datetime.datetime.now()
curr_month = curr_time.month
curr_year = curr_time.year
curr_minute = curr_time.minute
curr_day = random.randint(1, 28)
curr_hour = random.randint(0, 23)
date = "%s-%s-%s" % (curr_month, curr_day, curr_year)
time = "%s:%s" % (curr_hour, curr_minute)
self.opt_trip = otp.OTP(start_point, end_point, mode, date, time, bike=True)
def test_create_start_location_form_leg(self):
legs = self.opt_trip.get_json()["plan"]["itineraries"][0]['legs']
first_leg = legs[0]
start_loc = otp.create_start_location_from_leg(first_leg)
self.assertEqual(start_loc.ts,otp.otp_time_to_ours(first_leg['startTime']).timestamp )
self.assertEqual(start_loc.local_dt, ecsdlq.get_local_date(start_loc.ts, 'UTC'))
#print(start_loc)
def test_create_start_location_form_trip_plan(self):
trip_plan = self.opt_trip.get_json()["plan"]
start_loc = otp.create_start_location_from_trip_plan(trip_plan)
def test_get_json(self):
pass
def test_legs_json(self):
legs = self.opt_trip.get_json()["plan"]["itineraries"][0]['legs']
def test_turn_into_new_trip(self):
fake_user_email = 'test_otp_insert'
user = ecwu.User.register(fake_user_email)
override_uuid = user.uuid
#self.opt_trip.turn_into_new_trip(override_uuid)
def test_make_url(self):
print(self.opt_trip.make_url())
if __name__ == '__main__':
unittest.main()
|
Add test file for otp script
|
Add test file for otp script
|
Python
|
bsd-3-clause
|
e-mission/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,e-mission/e-mission-server
|
Add test file for otp script
|
import unittest
import random
import datetime
import emission.net.ext_service.otp.otp as otp
import emission.core.wrapper.location as ecwl
import emission.storage.decorations.local_date_queries as ecsdlq
import emission.core.wrapper.user as ecwu
from past.utils import old_div
class TestOTPMethods(unittest.TestCase):
def setUp(self):
start_point = (37.77264255,-122.399714854263)
end_point = (37.42870635,-122.140926605802)
mode = "TRANSIT"
curr_time = datetime.datetime.now()
curr_month = curr_time.month
curr_year = curr_time.year
curr_minute = curr_time.minute
curr_day = random.randint(1, 28)
curr_hour = random.randint(0, 23)
date = "%s-%s-%s" % (curr_month, curr_day, curr_year)
time = "%s:%s" % (curr_hour, curr_minute)
self.opt_trip = otp.OTP(start_point, end_point, mode, date, time, bike=True)
def test_create_start_location_form_leg(self):
legs = self.opt_trip.get_json()["plan"]["itineraries"][0]['legs']
first_leg = legs[0]
start_loc = otp.create_start_location_from_leg(first_leg)
self.assertEqual(start_loc.ts,otp.otp_time_to_ours(first_leg['startTime']).timestamp )
self.assertEqual(start_loc.local_dt, ecsdlq.get_local_date(start_loc.ts, 'UTC'))
#print(start_loc)
def test_create_start_location_form_trip_plan(self):
trip_plan = self.opt_trip.get_json()["plan"]
start_loc = otp.create_start_location_from_trip_plan(trip_plan)
def test_get_json(self):
pass
def test_legs_json(self):
legs = self.opt_trip.get_json()["plan"]["itineraries"][0]['legs']
def test_turn_into_new_trip(self):
fake_user_email = 'test_otp_insert'
user = ecwu.User.register(fake_user_email)
override_uuid = user.uuid
#self.opt_trip.turn_into_new_trip(override_uuid)
def test_make_url(self):
print(self.opt_trip.make_url())
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test file for otp script<commit_after>
|
import unittest
import random
import datetime
import emission.net.ext_service.otp.otp as otp
import emission.core.wrapper.location as ecwl
import emission.storage.decorations.local_date_queries as ecsdlq
import emission.core.wrapper.user as ecwu
from past.utils import old_div
class TestOTPMethods(unittest.TestCase):
def setUp(self):
start_point = (37.77264255,-122.399714854263)
end_point = (37.42870635,-122.140926605802)
mode = "TRANSIT"
curr_time = datetime.datetime.now()
curr_month = curr_time.month
curr_year = curr_time.year
curr_minute = curr_time.minute
curr_day = random.randint(1, 28)
curr_hour = random.randint(0, 23)
date = "%s-%s-%s" % (curr_month, curr_day, curr_year)
time = "%s:%s" % (curr_hour, curr_minute)
self.opt_trip = otp.OTP(start_point, end_point, mode, date, time, bike=True)
def test_create_start_location_form_leg(self):
legs = self.opt_trip.get_json()["plan"]["itineraries"][0]['legs']
first_leg = legs[0]
start_loc = otp.create_start_location_from_leg(first_leg)
self.assertEqual(start_loc.ts,otp.otp_time_to_ours(first_leg['startTime']).timestamp )
self.assertEqual(start_loc.local_dt, ecsdlq.get_local_date(start_loc.ts, 'UTC'))
#print(start_loc)
def test_create_start_location_form_trip_plan(self):
trip_plan = self.opt_trip.get_json()["plan"]
start_loc = otp.create_start_location_from_trip_plan(trip_plan)
def test_get_json(self):
pass
def test_legs_json(self):
legs = self.opt_trip.get_json()["plan"]["itineraries"][0]['legs']
def test_turn_into_new_trip(self):
fake_user_email = 'test_otp_insert'
user = ecwu.User.register(fake_user_email)
override_uuid = user.uuid
#self.opt_trip.turn_into_new_trip(override_uuid)
def test_make_url(self):
print(self.opt_trip.make_url())
if __name__ == '__main__':
unittest.main()
|
Add test file for otp scriptimport unittest
import random
import datetime
import emission.net.ext_service.otp.otp as otp
import emission.core.wrapper.location as ecwl
import emission.storage.decorations.local_date_queries as ecsdlq
import emission.core.wrapper.user as ecwu
from past.utils import old_div
class TestOTPMethods(unittest.TestCase):
def setUp(self):
start_point = (37.77264255,-122.399714854263)
end_point = (37.42870635,-122.140926605802)
mode = "TRANSIT"
curr_time = datetime.datetime.now()
curr_month = curr_time.month
curr_year = curr_time.year
curr_minute = curr_time.minute
curr_day = random.randint(1, 28)
curr_hour = random.randint(0, 23)
date = "%s-%s-%s" % (curr_month, curr_day, curr_year)
time = "%s:%s" % (curr_hour, curr_minute)
self.opt_trip = otp.OTP(start_point, end_point, mode, date, time, bike=True)
def test_create_start_location_form_leg(self):
legs = self.opt_trip.get_json()["plan"]["itineraries"][0]['legs']
first_leg = legs[0]
start_loc = otp.create_start_location_from_leg(first_leg)
self.assertEqual(start_loc.ts,otp.otp_time_to_ours(first_leg['startTime']).timestamp )
self.assertEqual(start_loc.local_dt, ecsdlq.get_local_date(start_loc.ts, 'UTC'))
#print(start_loc)
def test_create_start_location_form_trip_plan(self):
trip_plan = self.opt_trip.get_json()["plan"]
start_loc = otp.create_start_location_from_trip_plan(trip_plan)
def test_get_json(self):
pass
def test_legs_json(self):
legs = self.opt_trip.get_json()["plan"]["itineraries"][0]['legs']
def test_turn_into_new_trip(self):
fake_user_email = 'test_otp_insert'
user = ecwu.User.register(fake_user_email)
override_uuid = user.uuid
#self.opt_trip.turn_into_new_trip(override_uuid)
def test_make_url(self):
print(self.opt_trip.make_url())
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test file for otp script<commit_after>import unittest
import random
import datetime
import emission.net.ext_service.otp.otp as otp
import emission.core.wrapper.location as ecwl
import emission.storage.decorations.local_date_queries as ecsdlq
import emission.core.wrapper.user as ecwu
from past.utils import old_div
class TestOTPMethods(unittest.TestCase):
def setUp(self):
start_point = (37.77264255,-122.399714854263)
end_point = (37.42870635,-122.140926605802)
mode = "TRANSIT"
curr_time = datetime.datetime.now()
curr_month = curr_time.month
curr_year = curr_time.year
curr_minute = curr_time.minute
curr_day = random.randint(1, 28)
curr_hour = random.randint(0, 23)
date = "%s-%s-%s" % (curr_month, curr_day, curr_year)
time = "%s:%s" % (curr_hour, curr_minute)
self.opt_trip = otp.OTP(start_point, end_point, mode, date, time, bike=True)
def test_create_start_location_form_leg(self):
legs = self.opt_trip.get_json()["plan"]["itineraries"][0]['legs']
first_leg = legs[0]
start_loc = otp.create_start_location_from_leg(first_leg)
self.assertEqual(start_loc.ts,otp.otp_time_to_ours(first_leg['startTime']).timestamp )
self.assertEqual(start_loc.local_dt, ecsdlq.get_local_date(start_loc.ts, 'UTC'))
#print(start_loc)
def test_create_start_location_form_trip_plan(self):
trip_plan = self.opt_trip.get_json()["plan"]
start_loc = otp.create_start_location_from_trip_plan(trip_plan)
def test_get_json(self):
pass
def test_legs_json(self):
legs = self.opt_trip.get_json()["plan"]["itineraries"][0]['legs']
def test_turn_into_new_trip(self):
fake_user_email = 'test_otp_insert'
user = ecwu.User.register(fake_user_email)
override_uuid = user.uuid
#self.opt_trip.turn_into_new_trip(override_uuid)
def test_make_url(self):
print(self.opt_trip.make_url())
if __name__ == '__main__':
unittest.main()
|
|
6275c09373ac796876cebf62d74ac1f421eed3ca
|
ci/push/get_version_id.py
|
ci/push/get_version_id.py
|
import os
import sys
import csv
from push_api import SalesforcePushApi
# Force UTF8 output
reload(sys)
sys.setdefaultencoding('UTF8')
if __name__ == '__main__':
try:
username = os.environ.get('SF_USERNAME')
password = os.environ.get('SF_PASSWORD')
serverurl = os.environ.get('SF_SERVERURL')
namespace = os.environ.get('NAMESPACE')
version = os.environ.get('VERSION_NUMBER')
# Parse the version number string
major = None
minor = None
patch = None
build = None
state = 'Released'
version_parts = version.split('.')
if len(version_parts) >= 1:
major = version_parts[0]
if len(version_parts) == 2:
minor = version_parts[1]
if minor.find('Beta') != -1:
state = 'Beta'
minor, build = minor.replace(' (Beta ',',').replace(')','').split(',')
if len(version_parts) > 2:
minor = version_parts[1]
patch = version_parts[2]
if patch.find('Beta') != -1:
state = 'Beta'
patch, build = minor.replace(' (Beta ',',').replace(')','').split(',')
push_api = SalesforcePushApi(username, password, serverurl)
package = push_api.get_package_objs("NamespacePrefix = '%s'" % namespace, limit=1)[0]
version_where = "ReleaseState = '%s' AND MajorVersion = %s AND MinorVersion = %s" % (state, major, minor)
if patch:
version_where += " AND PatchVersion = %s" % patch
if state == 'Beta' and build:
version_where += " AND BuildNumber = %s" % build
version = push_api.get_package_version_objs(version_where, limit=1)[0]
print version.sf_id
except SystemExit:
sys.exit(1)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(2)
|
Add push api script to get a version's salesforce id from its namespace and version number including support for patch and beta versions
|
Add push api script to get a version's salesforce id from its namespace
and version number including support for patch and beta versions
|
Python
|
bsd-3-clause
|
steelbrick/CumulusCI,motivislearning/CumulusCI,steelbrick/CumulusCI,e02d96ec16/CumulusCI,cdcarter/CumulusCI,SalesforceFoundation/CumulusCI,Joble/CumulusCI,e02d96ec16/CumulusCI,motivislearning/CumulusCI,SalesforceFoundation/CumulusCI
|
Add push api script to get a version's salesforce id from its namespace
and version number including support for patch and beta versions
|
import os
import sys
import csv
from push_api import SalesforcePushApi
# Force UTF8 output
reload(sys)
sys.setdefaultencoding('UTF8')
if __name__ == '__main__':
try:
username = os.environ.get('SF_USERNAME')
password = os.environ.get('SF_PASSWORD')
serverurl = os.environ.get('SF_SERVERURL')
namespace = os.environ.get('NAMESPACE')
version = os.environ.get('VERSION_NUMBER')
# Parse the version number string
major = None
minor = None
patch = None
build = None
state = 'Released'
version_parts = version.split('.')
if len(version_parts) >= 1:
major = version_parts[0]
if len(version_parts) == 2:
minor = version_parts[1]
if minor.find('Beta') != -1:
state = 'Beta'
minor, build = minor.replace(' (Beta ',',').replace(')','').split(',')
if len(version_parts) > 2:
minor = version_parts[1]
patch = version_parts[2]
if patch.find('Beta') != -1:
state = 'Beta'
patch, build = minor.replace(' (Beta ',',').replace(')','').split(',')
push_api = SalesforcePushApi(username, password, serverurl)
package = push_api.get_package_objs("NamespacePrefix = '%s'" % namespace, limit=1)[0]
version_where = "ReleaseState = '%s' AND MajorVersion = %s AND MinorVersion = %s" % (state, major, minor)
if patch:
version_where += " AND PatchVersion = %s" % patch
if state == 'Beta' and build:
version_where += " AND BuildNumber = %s" % build
version = push_api.get_package_version_objs(version_where, limit=1)[0]
print version.sf_id
except SystemExit:
sys.exit(1)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(2)
|
<commit_before><commit_msg>Add push api script to get a version's salesforce id from its namespace
and version number including support for patch and beta versions<commit_after>
|
import os
import sys
import csv
from push_api import SalesforcePushApi
# Force UTF8 output
reload(sys)
sys.setdefaultencoding('UTF8')
if __name__ == '__main__':
try:
username = os.environ.get('SF_USERNAME')
password = os.environ.get('SF_PASSWORD')
serverurl = os.environ.get('SF_SERVERURL')
namespace = os.environ.get('NAMESPACE')
version = os.environ.get('VERSION_NUMBER')
# Parse the version number string
major = None
minor = None
patch = None
build = None
state = 'Released'
version_parts = version.split('.')
if len(version_parts) >= 1:
major = version_parts[0]
if len(version_parts) == 2:
minor = version_parts[1]
if minor.find('Beta') != -1:
state = 'Beta'
minor, build = minor.replace(' (Beta ',',').replace(')','').split(',')
if len(version_parts) > 2:
minor = version_parts[1]
patch = version_parts[2]
if patch.find('Beta') != -1:
state = 'Beta'
patch, build = minor.replace(' (Beta ',',').replace(')','').split(',')
push_api = SalesforcePushApi(username, password, serverurl)
package = push_api.get_package_objs("NamespacePrefix = '%s'" % namespace, limit=1)[0]
version_where = "ReleaseState = '%s' AND MajorVersion = %s AND MinorVersion = %s" % (state, major, minor)
if patch:
version_where += " AND PatchVersion = %s" % patch
if state == 'Beta' and build:
version_where += " AND BuildNumber = %s" % build
version = push_api.get_package_version_objs(version_where, limit=1)[0]
print version.sf_id
except SystemExit:
sys.exit(1)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(2)
|
Add push api script to get a version's salesforce id from its namespace
and version number including support for patch and beta versionsimport os
import sys
import csv
from push_api import SalesforcePushApi
# Force UTF8 output
reload(sys)
sys.setdefaultencoding('UTF8')
if __name__ == '__main__':
try:
username = os.environ.get('SF_USERNAME')
password = os.environ.get('SF_PASSWORD')
serverurl = os.environ.get('SF_SERVERURL')
namespace = os.environ.get('NAMESPACE')
version = os.environ.get('VERSION_NUMBER')
# Parse the version number string
major = None
minor = None
patch = None
build = None
state = 'Released'
version_parts = version.split('.')
if len(version_parts) >= 1:
major = version_parts[0]
if len(version_parts) == 2:
minor = version_parts[1]
if minor.find('Beta') != -1:
state = 'Beta'
minor, build = minor.replace(' (Beta ',',').replace(')','').split(',')
if len(version_parts) > 2:
minor = version_parts[1]
patch = version_parts[2]
if patch.find('Beta') != -1:
state = 'Beta'
patch, build = minor.replace(' (Beta ',',').replace(')','').split(',')
push_api = SalesforcePushApi(username, password, serverurl)
package = push_api.get_package_objs("NamespacePrefix = '%s'" % namespace, limit=1)[0]
version_where = "ReleaseState = '%s' AND MajorVersion = %s AND MinorVersion = %s" % (state, major, minor)
if patch:
version_where += " AND PatchVersion = %s" % patch
if state == 'Beta' and build:
version_where += " AND BuildNumber = %s" % build
version = push_api.get_package_version_objs(version_where, limit=1)[0]
print version.sf_id
except SystemExit:
sys.exit(1)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(2)
|
<commit_before><commit_msg>Add push api script to get a version's salesforce id from its namespace
and version number including support for patch and beta versions<commit_after>import os
import sys
import csv
from push_api import SalesforcePushApi
# Force UTF8 output
reload(sys)
sys.setdefaultencoding('UTF8')
if __name__ == '__main__':
try:
username = os.environ.get('SF_USERNAME')
password = os.environ.get('SF_PASSWORD')
serverurl = os.environ.get('SF_SERVERURL')
namespace = os.environ.get('NAMESPACE')
version = os.environ.get('VERSION_NUMBER')
# Parse the version number string
major = None
minor = None
patch = None
build = None
state = 'Released'
version_parts = version.split('.')
if len(version_parts) >= 1:
major = version_parts[0]
if len(version_parts) == 2:
minor = version_parts[1]
if minor.find('Beta') != -1:
state = 'Beta'
minor, build = minor.replace(' (Beta ',',').replace(')','').split(',')
if len(version_parts) > 2:
minor = version_parts[1]
patch = version_parts[2]
if patch.find('Beta') != -1:
state = 'Beta'
patch, build = minor.replace(' (Beta ',',').replace(')','').split(',')
push_api = SalesforcePushApi(username, password, serverurl)
package = push_api.get_package_objs("NamespacePrefix = '%s'" % namespace, limit=1)[0]
version_where = "ReleaseState = '%s' AND MajorVersion = %s AND MinorVersion = %s" % (state, major, minor)
if patch:
version_where += " AND PatchVersion = %s" % patch
if state == 'Beta' and build:
version_where += " AND BuildNumber = %s" % build
version = push_api.get_package_version_objs(version_where, limit=1)[0]
print version.sf_id
except SystemExit:
sys.exit(1)
except:
import traceback
exc_type, exc_value, exc_traceback = sys.exc_info()
print '-'*60
traceback.print_exception(exc_type, exc_value, exc_traceback, file=sys.stdout)
print '-'*60
sys.exit(2)
|
|
2f4722bb30e39510e985a08da1d28dbdd6ccd232
|
test/widgets/test_crypto_ticker.py
|
test/widgets/test_crypto_ticker.py
|
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This widget is based on GenPollUrl which has a separate.
# We just need to test parsing here.
from libqtile import widget
RESPONSE = {
"data": {
"base": "BTC",
"currency": "GBP",
"amount": "29625.02"
}
}
def test_set_defaults():
crypto = widget.CryptoTicker(currency="", symbol="")
assert crypto.currency == "USD"
assert crypto.symbol == "$"
def test_parse():
crypto = widget.CryptoTicker(currency="GBP", symbol="£", crypto="BTC")
assert crypto.url == "https://api.coinbase.com/v2/prices/BTC-GBP/spot"
assert crypto.parse(RESPONSE) == "BTC: £29625.02"
|
Add test for CryptoTicker widget
|
Add test for CryptoTicker widget
|
Python
|
mit
|
ramnes/qtile,qtile/qtile,ramnes/qtile,qtile/qtile
|
Add test for CryptoTicker widget
|
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This widget is based on GenPollUrl which has a separate.
# We just need to test parsing here.
from libqtile import widget
RESPONSE = {
"data": {
"base": "BTC",
"currency": "GBP",
"amount": "29625.02"
}
}
def test_set_defaults():
crypto = widget.CryptoTicker(currency="", symbol="")
assert crypto.currency == "USD"
assert crypto.symbol == "$"
def test_parse():
crypto = widget.CryptoTicker(currency="GBP", symbol="£", crypto="BTC")
assert crypto.url == "https://api.coinbase.com/v2/prices/BTC-GBP/spot"
assert crypto.parse(RESPONSE) == "BTC: £29625.02"
|
<commit_before><commit_msg>Add test for CryptoTicker widget<commit_after>
|
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This widget is based on GenPollUrl which has a separate.
# We just need to test parsing here.
from libqtile import widget
RESPONSE = {
"data": {
"base": "BTC",
"currency": "GBP",
"amount": "29625.02"
}
}
def test_set_defaults():
crypto = widget.CryptoTicker(currency="", symbol="")
assert crypto.currency == "USD"
assert crypto.symbol == "$"
def test_parse():
crypto = widget.CryptoTicker(currency="GBP", symbol="£", crypto="BTC")
assert crypto.url == "https://api.coinbase.com/v2/prices/BTC-GBP/spot"
assert crypto.parse(RESPONSE) == "BTC: £29625.02"
|
Add test for CryptoTicker widget# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This widget is based on GenPollUrl which has a separate.
# We just need to test parsing here.
from libqtile import widget
RESPONSE = {
"data": {
"base": "BTC",
"currency": "GBP",
"amount": "29625.02"
}
}
def test_set_defaults():
crypto = widget.CryptoTicker(currency="", symbol="")
assert crypto.currency == "USD"
assert crypto.symbol == "$"
def test_parse():
crypto = widget.CryptoTicker(currency="GBP", symbol="£", crypto="BTC")
assert crypto.url == "https://api.coinbase.com/v2/prices/BTC-GBP/spot"
assert crypto.parse(RESPONSE) == "BTC: £29625.02"
|
<commit_before><commit_msg>Add test for CryptoTicker widget<commit_after># Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This widget is based on GenPollUrl which has a separate.
# We just need to test parsing here.
from libqtile import widget
RESPONSE = {
"data": {
"base": "BTC",
"currency": "GBP",
"amount": "29625.02"
}
}
def test_set_defaults():
crypto = widget.CryptoTicker(currency="", symbol="")
assert crypto.currency == "USD"
assert crypto.symbol == "$"
def test_parse():
crypto = widget.CryptoTicker(currency="GBP", symbol="£", crypto="BTC")
assert crypto.url == "https://api.coinbase.com/v2/prices/BTC-GBP/spot"
assert crypto.parse(RESPONSE) == "BTC: £29625.02"
|
|
712ae25e8deda242856c2cc21afb86ba11c0258b
|
dockci/migrations/0003.py
|
dockci/migrations/0003.py
|
"""
Migrate version to tag in build models
"""
import os
import shutil
import yaml
job_dirs = (
filename for filename in
os.listdir(os.path.join('data', 'builds'))
if os.path.isdir(os.path.join('data', 'builds', filename))
)
for job_dir in job_dirs:
build_files = (
filename for filename in
os.listdir(os.path.join('data', 'builds', job_dir))
if filename[-5:] == '.yaml'
)
for build_file in build_files:
build_slug = build_file[:-5]
build_file_path = os.path.join('data', 'builds', job_dir, build_file)
with open(build_file_path) as handle:
build_dict = yaml.load(handle)
try:
version = build_dict.pop('version')
if version:
build_dict['tag'] = version
with open(build_file_path, 'w') as handle:
yaml.dump(build_dict, handle, default_flow_style=False)
except KeyError:
pass
|
Add migration to rename version -> tag field
|
Add migration to rename version -> tag field
|
Python
|
isc
|
RickyCook/paas-in-a-day-dockci,RickyCook/paas-in-a-day-dockci
|
Add migration to rename version -> tag field
|
"""
Migrate version to tag in build models
"""
import os
import shutil
import yaml
job_dirs = (
filename for filename in
os.listdir(os.path.join('data', 'builds'))
if os.path.isdir(os.path.join('data', 'builds', filename))
)
for job_dir in job_dirs:
build_files = (
filename for filename in
os.listdir(os.path.join('data', 'builds', job_dir))
if filename[-5:] == '.yaml'
)
for build_file in build_files:
build_slug = build_file[:-5]
build_file_path = os.path.join('data', 'builds', job_dir, build_file)
with open(build_file_path) as handle:
build_dict = yaml.load(handle)
try:
version = build_dict.pop('version')
if version:
build_dict['tag'] = version
with open(build_file_path, 'w') as handle:
yaml.dump(build_dict, handle, default_flow_style=False)
except KeyError:
pass
|
<commit_before><commit_msg>Add migration to rename version -> tag field<commit_after>
|
"""
Migrate version to tag in build models
"""
import os
import shutil
import yaml
job_dirs = (
filename for filename in
os.listdir(os.path.join('data', 'builds'))
if os.path.isdir(os.path.join('data', 'builds', filename))
)
for job_dir in job_dirs:
build_files = (
filename for filename in
os.listdir(os.path.join('data', 'builds', job_dir))
if filename[-5:] == '.yaml'
)
for build_file in build_files:
build_slug = build_file[:-5]
build_file_path = os.path.join('data', 'builds', job_dir, build_file)
with open(build_file_path) as handle:
build_dict = yaml.load(handle)
try:
version = build_dict.pop('version')
if version:
build_dict['tag'] = version
with open(build_file_path, 'w') as handle:
yaml.dump(build_dict, handle, default_flow_style=False)
except KeyError:
pass
|
Add migration to rename version -> tag field"""
Migrate version to tag in build models
"""
import os
import shutil
import yaml
job_dirs = (
filename for filename in
os.listdir(os.path.join('data', 'builds'))
if os.path.isdir(os.path.join('data', 'builds', filename))
)
for job_dir in job_dirs:
build_files = (
filename for filename in
os.listdir(os.path.join('data', 'builds', job_dir))
if filename[-5:] == '.yaml'
)
for build_file in build_files:
build_slug = build_file[:-5]
build_file_path = os.path.join('data', 'builds', job_dir, build_file)
with open(build_file_path) as handle:
build_dict = yaml.load(handle)
try:
version = build_dict.pop('version')
if version:
build_dict['tag'] = version
with open(build_file_path, 'w') as handle:
yaml.dump(build_dict, handle, default_flow_style=False)
except KeyError:
pass
|
<commit_before><commit_msg>Add migration to rename version -> tag field<commit_after>"""
Migrate version to tag in build models
"""
import os
import shutil
import yaml
job_dirs = (
filename for filename in
os.listdir(os.path.join('data', 'builds'))
if os.path.isdir(os.path.join('data', 'builds', filename))
)
for job_dir in job_dirs:
build_files = (
filename for filename in
os.listdir(os.path.join('data', 'builds', job_dir))
if filename[-5:] == '.yaml'
)
for build_file in build_files:
build_slug = build_file[:-5]
build_file_path = os.path.join('data', 'builds', job_dir, build_file)
with open(build_file_path) as handle:
build_dict = yaml.load(handle)
try:
version = build_dict.pop('version')
if version:
build_dict['tag'] = version
with open(build_file_path, 'w') as handle:
yaml.dump(build_dict, handle, default_flow_style=False)
except KeyError:
pass
|
|
c6108cacc7705c27b8913d82229c331551796830
|
openprescribing/frontend/management/commands/migrate_some_dmd_data.py
|
openprescribing/frontend/management/commands/migrate_some_dmd_data.py
|
# This is a one-off command to migrate all TariffPrice and NCSOConcession data
# from the dmd app to the frontend app. It can be deleted once the dmd app has
# been removed.
from django.db import transaction
from django.core.management import BaseCommand
from dmd.models import TariffPrice as TariffPriceOld
from dmd.models import NCSOConcession as NCSOConcessionOld
from frontend.models import TariffPrice as TariffPriceNew
from frontend.models import NCSOConcession as NCSOConcessionNew
class Command(BaseCommand):
def handle(self, *args, **kwargs):
with transaction.atomic():
TariffPriceNew.objects.all().delete()
NCSOConcessionNew.objects.all().delete()
TariffPriceNew.objects.bulk_create(
TariffPriceNew(
date=tp_old.date,
vmpp_id=tp_old.vmpp_id,
tariff_category_id=tp_old.tariff_category_id,
price_pence=tp_old.price_pence,
)
for tp_old in TariffPriceOld.objects.all()
)
NCSOConcessionNew.objects.bulk_create(
NCSOConcessionNew(
date=c_old.date,
vmpp_id=c_old.vmpp_id,
drug=c_old.drug,
pack_size=c_old.pack_size,
price_pence=c_old.price_concession_pence,
)
for c_old in NCSOConcessionOld.objects.all()
)
|
Add task to migrate TariffPrice and NCSOConcession data
|
Add task to migrate TariffPrice and NCSOConcession data
This file can be removed once the migration is done. (Should it be a
data migration?)
|
Python
|
mit
|
ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc
|
Add task to migrate TariffPrice and NCSOConcession data
This file can be removed once the migration is done. (Should it be a
data migration?)
|
# This is a one-off command to migrate all TariffPrice and NCSOConcession data
# from the dmd app to the frontend app. It can be deleted once the dmd app has
# been removed.
from django.db import transaction
from django.core.management import BaseCommand
from dmd.models import TariffPrice as TariffPriceOld
from dmd.models import NCSOConcession as NCSOConcessionOld
from frontend.models import TariffPrice as TariffPriceNew
from frontend.models import NCSOConcession as NCSOConcessionNew
class Command(BaseCommand):
def handle(self, *args, **kwargs):
with transaction.atomic():
TariffPriceNew.objects.all().delete()
NCSOConcessionNew.objects.all().delete()
TariffPriceNew.objects.bulk_create(
TariffPriceNew(
date=tp_old.date,
vmpp_id=tp_old.vmpp_id,
tariff_category_id=tp_old.tariff_category_id,
price_pence=tp_old.price_pence,
)
for tp_old in TariffPriceOld.objects.all()
)
NCSOConcessionNew.objects.bulk_create(
NCSOConcessionNew(
date=c_old.date,
vmpp_id=c_old.vmpp_id,
drug=c_old.drug,
pack_size=c_old.pack_size,
price_pence=c_old.price_concession_pence,
)
for c_old in NCSOConcessionOld.objects.all()
)
|
<commit_before><commit_msg>Add task to migrate TariffPrice and NCSOConcession data
This file can be removed once the migration is done. (Should it be a
data migration?)<commit_after>
|
# This is a one-off command to migrate all TariffPrice and NCSOConcession data
# from the dmd app to the frontend app. It can be deleted once the dmd app has
# been removed.
from django.db import transaction
from django.core.management import BaseCommand
from dmd.models import TariffPrice as TariffPriceOld
from dmd.models import NCSOConcession as NCSOConcessionOld
from frontend.models import TariffPrice as TariffPriceNew
from frontend.models import NCSOConcession as NCSOConcessionNew
class Command(BaseCommand):
def handle(self, *args, **kwargs):
with transaction.atomic():
TariffPriceNew.objects.all().delete()
NCSOConcessionNew.objects.all().delete()
TariffPriceNew.objects.bulk_create(
TariffPriceNew(
date=tp_old.date,
vmpp_id=tp_old.vmpp_id,
tariff_category_id=tp_old.tariff_category_id,
price_pence=tp_old.price_pence,
)
for tp_old in TariffPriceOld.objects.all()
)
NCSOConcessionNew.objects.bulk_create(
NCSOConcessionNew(
date=c_old.date,
vmpp_id=c_old.vmpp_id,
drug=c_old.drug,
pack_size=c_old.pack_size,
price_pence=c_old.price_concession_pence,
)
for c_old in NCSOConcessionOld.objects.all()
)
|
Add task to migrate TariffPrice and NCSOConcession data
This file can be removed once the migration is done. (Should it be a
data migration?)# This is a one-off command to migrate all TariffPrice and NCSOConcession data
# from the dmd app to the frontend app. It can be deleted once the dmd app has
# been removed.
from django.db import transaction
from django.core.management import BaseCommand
from dmd.models import TariffPrice as TariffPriceOld
from dmd.models import NCSOConcession as NCSOConcessionOld
from frontend.models import TariffPrice as TariffPriceNew
from frontend.models import NCSOConcession as NCSOConcessionNew
class Command(BaseCommand):
def handle(self, *args, **kwargs):
with transaction.atomic():
TariffPriceNew.objects.all().delete()
NCSOConcessionNew.objects.all().delete()
TariffPriceNew.objects.bulk_create(
TariffPriceNew(
date=tp_old.date,
vmpp_id=tp_old.vmpp_id,
tariff_category_id=tp_old.tariff_category_id,
price_pence=tp_old.price_pence,
)
for tp_old in TariffPriceOld.objects.all()
)
NCSOConcessionNew.objects.bulk_create(
NCSOConcessionNew(
date=c_old.date,
vmpp_id=c_old.vmpp_id,
drug=c_old.drug,
pack_size=c_old.pack_size,
price_pence=c_old.price_concession_pence,
)
for c_old in NCSOConcessionOld.objects.all()
)
|
<commit_before><commit_msg>Add task to migrate TariffPrice and NCSOConcession data
This file can be removed once the migration is done. (Should it be a
data migration?)<commit_after># This is a one-off command to migrate all TariffPrice and NCSOConcession data
# from the dmd app to the frontend app. It can be deleted once the dmd app has
# been removed.
from django.db import transaction
from django.core.management import BaseCommand
from dmd.models import TariffPrice as TariffPriceOld
from dmd.models import NCSOConcession as NCSOConcessionOld
from frontend.models import TariffPrice as TariffPriceNew
from frontend.models import NCSOConcession as NCSOConcessionNew
class Command(BaseCommand):
def handle(self, *args, **kwargs):
with transaction.atomic():
TariffPriceNew.objects.all().delete()
NCSOConcessionNew.objects.all().delete()
TariffPriceNew.objects.bulk_create(
TariffPriceNew(
date=tp_old.date,
vmpp_id=tp_old.vmpp_id,
tariff_category_id=tp_old.tariff_category_id,
price_pence=tp_old.price_pence,
)
for tp_old in TariffPriceOld.objects.all()
)
NCSOConcessionNew.objects.bulk_create(
NCSOConcessionNew(
date=c_old.date,
vmpp_id=c_old.vmpp_id,
drug=c_old.drug,
pack_size=c_old.pack_size,
price_pence=c_old.price_concession_pence,
)
for c_old in NCSOConcessionOld.objects.all()
)
|
|
ab935c2fed108aeb1e7c0cad3ba97c48967c676f
|
flexx/util/eventhelper.py
|
flexx/util/eventhelper.py
|
import os
def iter_filenames(dir, ignore):
for dirpath, dirnames, filenames in os.walk(dir):
for fname in filenames:
filename = os.path.join(dirpath, fname)
if fname.endswith('.py'):
if fname not in ignore and filename not in ignore:
yield os.path.join(dirpath, fname)
def event_helper(dir, ignore=()):
""" Helper function to convert from the old event system to the new one.
This function does some basic analysis of the code in the given directory
and gives locations where properties are used as functions, or where
handlers seem incorrect. When used inside Pyzo, you can just click on
the filename to go there and fix it.
"""
props = set()
# Collect properties
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
prevline = ''
for i, line in enumerate(text.splitlines()):
if prevline.strip().startswith(('@event.prop', '@event.readonly')):
funcname = line.strip().split('def ')[1].split('(')[0].strip()
props.add(funcname)
prevline = line
print('Found props/readonlies:')
print(props)
# Check correct use of properties
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
for i, line in enumerate(text.splitlines()):
for prop in props:
t = '.%s(' % prop
if t in line:
print('Old use of prop %s in File "%s", line %i' % (prop, filename, i+1))
# Check correct use of handlers
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
prevline = ''
for i, line in enumerate(text.splitlines()):
if prevline.strip().startswith('@event.connect'):
if '*events' not in line:
funcname = line.strip().split('def ')[1].split('(')[0].strip()
print('Suspicious handler %s in File "%s", line %i' % (funcname, filename, i+1))
prevline = line
if __name__ == '__main__':
event_helper(r'd:\dev\pylib\arbiter\arbiter\viz', ['transform.py'])
|
Add simple util to help convert to new event system
|
Add simple util to help convert to new event system
|
Python
|
bsd-2-clause
|
jrversteegh/flexx,JohnLunzer/flexx,JohnLunzer/flexx,zoofIO/flexx,zoofIO/flexx,JohnLunzer/flexx,jrversteegh/flexx
|
Add simple util to help convert to new event system
|
import os
def iter_filenames(dir, ignore):
for dirpath, dirnames, filenames in os.walk(dir):
for fname in filenames:
filename = os.path.join(dirpath, fname)
if fname.endswith('.py'):
if fname not in ignore and filename not in ignore:
yield os.path.join(dirpath, fname)
def event_helper(dir, ignore=()):
""" Helper function to convert from the old event system to the new one.
This function does some basic analysis of the code in the given directory
and gives locations where properties are used as functions, or where
handlers seem incorrect. When used inside Pyzo, you can just click on
the filename to go there and fix it.
"""
props = set()
# Collect properties
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
prevline = ''
for i, line in enumerate(text.splitlines()):
if prevline.strip().startswith(('@event.prop', '@event.readonly')):
funcname = line.strip().split('def ')[1].split('(')[0].strip()
props.add(funcname)
prevline = line
print('Found props/readonlies:')
print(props)
# Check correct use of properties
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
for i, line in enumerate(text.splitlines()):
for prop in props:
t = '.%s(' % prop
if t in line:
print('Old use of prop %s in File "%s", line %i' % (prop, filename, i+1))
# Check correct use of handlers
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
prevline = ''
for i, line in enumerate(text.splitlines()):
if prevline.strip().startswith('@event.connect'):
if '*events' not in line:
funcname = line.strip().split('def ')[1].split('(')[0].strip()
print('Suspicious handler %s in File "%s", line %i' % (funcname, filename, i+1))
prevline = line
if __name__ == '__main__':
event_helper(r'd:\dev\pylib\arbiter\arbiter\viz', ['transform.py'])
|
<commit_before><commit_msg>Add simple util to help convert to new event system<commit_after>
|
import os
def iter_filenames(dir, ignore):
for dirpath, dirnames, filenames in os.walk(dir):
for fname in filenames:
filename = os.path.join(dirpath, fname)
if fname.endswith('.py'):
if fname not in ignore and filename not in ignore:
yield os.path.join(dirpath, fname)
def event_helper(dir, ignore=()):
""" Helper function to convert from the old event system to the new one.
This function does some basic analysis of the code in the given directory
and gives locations where properties are used as functions, or where
handlers seem incorrect. When used inside Pyzo, you can just click on
the filename to go there and fix it.
"""
props = set()
# Collect properties
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
prevline = ''
for i, line in enumerate(text.splitlines()):
if prevline.strip().startswith(('@event.prop', '@event.readonly')):
funcname = line.strip().split('def ')[1].split('(')[0].strip()
props.add(funcname)
prevline = line
print('Found props/readonlies:')
print(props)
# Check correct use of properties
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
for i, line in enumerate(text.splitlines()):
for prop in props:
t = '.%s(' % prop
if t in line:
print('Old use of prop %s in File "%s", line %i' % (prop, filename, i+1))
# Check correct use of handlers
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
prevline = ''
for i, line in enumerate(text.splitlines()):
if prevline.strip().startswith('@event.connect'):
if '*events' not in line:
funcname = line.strip().split('def ')[1].split('(')[0].strip()
print('Suspicious handler %s in File "%s", line %i' % (funcname, filename, i+1))
prevline = line
if __name__ == '__main__':
event_helper(r'd:\dev\pylib\arbiter\arbiter\viz', ['transform.py'])
|
Add simple util to help convert to new event systemimport os
def iter_filenames(dir, ignore):
for dirpath, dirnames, filenames in os.walk(dir):
for fname in filenames:
filename = os.path.join(dirpath, fname)
if fname.endswith('.py'):
if fname not in ignore and filename not in ignore:
yield os.path.join(dirpath, fname)
def event_helper(dir, ignore=()):
""" Helper function to convert from the old event system to the new one.
This function does some basic analysis of the code in the given directory
and gives locations where properties are used as functions, or where
handlers seem incorrect. When used inside Pyzo, you can just click on
the filename to go there and fix it.
"""
props = set()
# Collect properties
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
prevline = ''
for i, line in enumerate(text.splitlines()):
if prevline.strip().startswith(('@event.prop', '@event.readonly')):
funcname = line.strip().split('def ')[1].split('(')[0].strip()
props.add(funcname)
prevline = line
print('Found props/readonlies:')
print(props)
# Check correct use of properties
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
for i, line in enumerate(text.splitlines()):
for prop in props:
t = '.%s(' % prop
if t in line:
print('Old use of prop %s in File "%s", line %i' % (prop, filename, i+1))
# Check correct use of handlers
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
prevline = ''
for i, line in enumerate(text.splitlines()):
if prevline.strip().startswith('@event.connect'):
if '*events' not in line:
funcname = line.strip().split('def ')[1].split('(')[0].strip()
print('Suspicious handler %s in File "%s", line %i' % (funcname, filename, i+1))
prevline = line
if __name__ == '__main__':
event_helper(r'd:\dev\pylib\arbiter\arbiter\viz', ['transform.py'])
|
<commit_before><commit_msg>Add simple util to help convert to new event system<commit_after>import os
def iter_filenames(dir, ignore):
for dirpath, dirnames, filenames in os.walk(dir):
for fname in filenames:
filename = os.path.join(dirpath, fname)
if fname.endswith('.py'):
if fname not in ignore and filename not in ignore:
yield os.path.join(dirpath, fname)
def event_helper(dir, ignore=()):
""" Helper function to convert from the old event system to the new one.
This function does some basic analysis of the code in the given directory
and gives locations where properties are used as functions, or where
handlers seem incorrect. When used inside Pyzo, you can just click on
the filename to go there and fix it.
"""
props = set()
# Collect properties
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
prevline = ''
for i, line in enumerate(text.splitlines()):
if prevline.strip().startswith(('@event.prop', '@event.readonly')):
funcname = line.strip().split('def ')[1].split('(')[0].strip()
props.add(funcname)
prevline = line
print('Found props/readonlies:')
print(props)
# Check correct use of properties
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
for i, line in enumerate(text.splitlines()):
for prop in props:
t = '.%s(' % prop
if t in line:
print('Old use of prop %s in File "%s", line %i' % (prop, filename, i+1))
# Check correct use of handlers
for filename in iter_filenames(dir, ignore):
text = open(filename, 'rb').read().decode()
prevline = ''
for i, line in enumerate(text.splitlines()):
if prevline.strip().startswith('@event.connect'):
if '*events' not in line:
funcname = line.strip().split('def ')[1].split('(')[0].strip()
print('Suspicious handler %s in File "%s", line %i' % (funcname, filename, i+1))
prevline = line
if __name__ == '__main__':
event_helper(r'd:\dev\pylib\arbiter\arbiter\viz', ['transform.py'])
|
|
1bb34ccef2f5c622f258a7dbe375e3a62b6cf63d
|
py/magical-string.py
|
py/magical-string.py
|
class Solution(object):
def magicalString(self, n):
"""
:type n: int
:rtype: int
"""
if n == 0:
return 0
elif n <= 3:
return 1
q = [1, 2, 2]
cur = 1
skip = 2
cnt_1 = 1
total_cnt = 3
for v in q:
if skip <= 0:
q += [cur] * v
v = min(n - total_cnt, v)
total_cnt += v
if cur == 1:
cnt_1 += v
if total_cnt == n:
return cnt_1
cur = 3 - cur
skip -= 1
|
Add py solution for 481. Magical String
|
Add py solution for 481. Magical String
481. Magical String: https://leetcode.com/problems/magical-string/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 481. Magical String
481. Magical String: https://leetcode.com/problems/magical-string/
|
class Solution(object):
def magicalString(self, n):
"""
:type n: int
:rtype: int
"""
if n == 0:
return 0
elif n <= 3:
return 1
q = [1, 2, 2]
cur = 1
skip = 2
cnt_1 = 1
total_cnt = 3
for v in q:
if skip <= 0:
q += [cur] * v
v = min(n - total_cnt, v)
total_cnt += v
if cur == 1:
cnt_1 += v
if total_cnt == n:
return cnt_1
cur = 3 - cur
skip -= 1
|
<commit_before><commit_msg>Add py solution for 481. Magical String
481. Magical String: https://leetcode.com/problems/magical-string/<commit_after>
|
class Solution(object):
def magicalString(self, n):
"""
:type n: int
:rtype: int
"""
if n == 0:
return 0
elif n <= 3:
return 1
q = [1, 2, 2]
cur = 1
skip = 2
cnt_1 = 1
total_cnt = 3
for v in q:
if skip <= 0:
q += [cur] * v
v = min(n - total_cnt, v)
total_cnt += v
if cur == 1:
cnt_1 += v
if total_cnt == n:
return cnt_1
cur = 3 - cur
skip -= 1
|
Add py solution for 481. Magical String
481. Magical String: https://leetcode.com/problems/magical-string/class Solution(object):
def magicalString(self, n):
"""
:type n: int
:rtype: int
"""
if n == 0:
return 0
elif n <= 3:
return 1
q = [1, 2, 2]
cur = 1
skip = 2
cnt_1 = 1
total_cnt = 3
for v in q:
if skip <= 0:
q += [cur] * v
v = min(n - total_cnt, v)
total_cnt += v
if cur == 1:
cnt_1 += v
if total_cnt == n:
return cnt_1
cur = 3 - cur
skip -= 1
|
<commit_before><commit_msg>Add py solution for 481. Magical String
481. Magical String: https://leetcode.com/problems/magical-string/<commit_after>class Solution(object):
def magicalString(self, n):
"""
:type n: int
:rtype: int
"""
if n == 0:
return 0
elif n <= 3:
return 1
q = [1, 2, 2]
cur = 1
skip = 2
cnt_1 = 1
total_cnt = 3
for v in q:
if skip <= 0:
q += [cur] * v
v = min(n - total_cnt, v)
total_cnt += v
if cur == 1:
cnt_1 += v
if total_cnt == n:
return cnt_1
cur = 3 - cur
skip -= 1
|
|
654d21100ff2908af6c44e2d2d71ba92b1e96722
|
script/lib/config.py
|
script/lib/config.py
|
#!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'aa87035cc012ce0d533bb56b947bca81a6e71b82'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
|
#!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'e375124044f9044ac88076eba0cd17361ee0997c'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
|
Upgrade libchromiumcontent to remove dom storage quota
|
Upgrade libchromiumcontent to remove dom storage quota
Closes #897.
|
Python
|
mit
|
beni55/electron,vHanda/electron,leftstick/electron,mubassirhayat/electron,simongregory/electron,voidbridge/electron,tomashanacek/electron,nicobot/electron,MaxWhere/electron,pandoraui/electron,farmisen/electron,shennushi/electron,thomsonreuters/electron,minggo/electron,LadyNaggaga/electron,MaxGraey/electron,lrlna/electron,meowlab/electron,mubassirhayat/electron,Neron-X5/electron,gabriel/electron,Gerhut/electron,cos2004/electron,jiaz/electron,setzer777/electron,preco21/electron,jsutcodes/electron,chriskdon/electron,rsvip/electron,nicholasess/electron,shaundunne/electron,kokdemo/electron,sshiting/electron,meowlab/electron,darwin/electron,simongregory/electron,SufianHassan/electron,DivyaKMenon/electron,Ivshti/electron,sshiting/electron,iftekeriba/electron,jannishuebl/electron,rreimann/electron,jjz/electron,JussMee15/electron,d-salas/electron,rhencke/electron,yalexx/electron,posix4e/electron,trankmichael/electron,takashi/electron,jjz/electron,thomsonreuters/electron,rajatsingla28/electron,shiftkey/electron,leethomas/electron,fffej/electron,gerhardberger/electron,cos2004/electron,posix4e/electron,Neron-X5/electron,jaanus/electron,mhkeller/electron,eric-seekas/electron,tinydew4/electron,destan/electron,LadyNaggaga/electron,shiftkey/electron,smczk/electron,jcblw/electron,greyhwndz/electron,brenca/electron,fabien-d/electron,pandoraui/electron,miniak/electron,tylergibson/electron,bobwol/electron,jonatasfreitasv/electron,joaomoreno/atom-shell,tylergibson/electron,fritx/electron,iftekeriba/electron,kikong/electron,brave/muon,dahal/electron,webmechanicx/electron,kcrt/electron,arusakov/electron,matiasinsaurralde/electron,mattdesl/electron,eriser/electron,neutrous/electron,brave/electron,christian-bromann/electron,evgenyzinoviev/electron,renaesop/electron,Gerhut/electron,noikiy/electron,robinvandernoord/electron,fritx/electron,jiaz/electron,bpasero/electron,anko/electron,Jonekee/electron,davazp/electron,pandoraui/electron,aaron-goshine/electron,carsonmcdonald/electron,bobwol/electron,Evercoder/electron,aliib/electron,shockone/electron,neutrous/electron,John-Lin/electron,coderhaoxin/electron,gbn972/electron,pirafrank/electron,rreimann/electron,brave/electron,anko/electron,aaron-goshine/electron,MaxGraey/electron,fomojola/electron,nekuz0r/electron,SufianHassan/electron,mrwizard82d1/electron,leethomas/electron,systembugtj/electron,leftstick/electron,beni55/electron,dahal/electron,etiktin/electron,brenca/electron,timruffles/electron,vHanda/electron,sky7sea/electron,ervinb/electron,darwin/electron,gstack/infinium-shell,mjaniszew/electron,beni55/electron,mrwizard82d1/electron,kcrt/electron,subblue/electron,brave/electron,aliib/electron,leethomas/electron,minggo/electron,RIAEvangelist/electron,aecca/electron,mrwizard82d1/electron,kikong/electron,aliib/electron,tinydew4/electron,gerhardberger/electron,Ivshti/electron,anko/electron,sshiting/electron,pandoraui/electron,roadev/electron,lzpfmh/electron,xfstudio/electron,tinydew4/electron,vHanda/electron,cqqccqc/electron,michaelchiche/electron,jlhbaseball15/electron,rsvip/electron,tonyganch/electron,astoilkov/electron,John-Lin/electron,cqqccqc/electron,sky7sea/electron,bright-sparks/electron,nicobot/electron,Floato/electron,cos2004/electron,biblerule/UMCTelnetHub,fabien-d/electron,farmisen/electron,pombredanne/electron,Zagorakiss/electron,jacksondc/electron,joneit/electron,xiruibing/electron,dongjoon-hyun/electron,Ivshti/electron,takashi/electron,tinydew4/electron,lrlna/electron,twolfson/electron,bpasero/electron,timruffles/electron,the-ress/electron,Floato/electron,RobertJGabriel/electron,nicholasess/electron,christian-bromann/electron,matiasinsaurralde/electron,jonatasfreitasv/electron,edulan/electron,bright-sparks/electron,nekuz0r/electron,arusakov/electron,adcentury/electron,GoooIce/electron,meowlab/electron,hokein/atom-shell,IonicaBizauKitchen/electron,pirafrank/electron,gerhardberger/electron,jannishuebl/electron,synaptek/electron,gerhardberger/electron,jcblw/electron,trigrass2/electron,minggo/electron,pombredanne/electron,mirrh/electron,tonyganch/electron,benweissmann/electron,yan-foto/electron,shockone/electron,tincan24/electron,timruffles/electron,GoooIce/electron,thingsinjars/electron,coderhaoxin/electron,xiruibing/electron,smczk/electron,icattlecoder/electron,baiwyc119/electron,deepak1556/atom-shell,DivyaKMenon/electron,howmuchcomputer/electron,Rokt33r/electron,edulan/electron,shockone/electron,coderhaoxin/electron,wan-qy/electron,BionicClick/electron,bbondy/electron,adamjgray/electron,gabriel/electron,zhakui/electron,adamjgray/electron,Jonekee/electron,jtburke/electron,Andrey-Pavlov/electron,jannishuebl/electron,jcblw/electron,ankitaggarwal011/electron,leftstick/electron,nekuz0r/electron,LadyNaggaga/electron,fabien-d/electron,jiaz/electron,brenca/electron,dkfiresky/electron,leolujuyi/electron,saronwei/electron,renaesop/electron,aichingm/electron,carsonmcdonald/electron,timruffles/electron,ianscrivener/electron,nekuz0r/electron,coderhaoxin/electron,MaxGraey/electron,kazupon/electron,egoist/electron,nagyistoce/electron-atom-shell,yalexx/electron,IonicaBizauKitchen/electron,fritx/electron,thompsonemerson/electron,fritx/electron,brave/muon,kostia/electron,etiktin/electron,greyhwndz/electron,gstack/infinium-shell,synaptek/electron,kcrt/electron,nagyistoce/electron-atom-shell,Jonekee/electron,wan-qy/electron,thingsinjars/electron,cqqccqc/electron,jcblw/electron,Jonekee/electron,baiwyc119/electron,oiledCode/electron,trigrass2/electron,fomojola/electron,soulteary/electron,chrisswk/electron,neutrous/electron,gabriel/electron,oiledCode/electron,greyhwndz/electron,DivyaKMenon/electron,thomsonreuters/electron,chrisswk/electron,jonatasfreitasv/electron,robinvandernoord/electron,shennushi/electron,sshiting/electron,lrlna/electron,vipulroxx/electron,anko/electron,gabriel/electron,Jonekee/electron,abhishekgahlot/electron,Ivshti/electron,icattlecoder/electron,saronwei/electron,jacksondc/electron,shaundunne/electron,benweissmann/electron,lrlna/electron,yan-foto/electron,davazp/electron,Faiz7412/electron,kenmozi/electron,baiwyc119/electron,thompsonemerson/electron,nicobot/electron,d-salas/electron,mattdesl/electron,Faiz7412/electron,trankmichael/electron,mrwizard82d1/electron,kcrt/electron,cqqccqc/electron,gabrielPeart/electron,robinvandernoord/electron,noikiy/electron,saronwei/electron,jlhbaseball15/electron,bpasero/electron,faizalpribadi/electron,stevekinney/electron,wan-qy/electron,soulteary/electron,dongjoon-hyun/electron,jsutcodes/electron,davazp/electron,fffej/electron,Faiz7412/electron,lzpfmh/electron,dkfiresky/electron,deed02392/electron,leolujuyi/electron,twolfson/electron,maxogden/atom-shell,rreimann/electron,kokdemo/electron,deed02392/electron,howmuchcomputer/electron,leethomas/electron,bpasero/electron,digideskio/electron,bright-sparks/electron,electron/electron,kenmozi/electron,vHanda/electron,jannishuebl/electron,jaanus/electron,cqqccqc/electron,jjz/electron,leethomas/electron,ankitaggarwal011/electron,aichingm/electron,bpasero/electron,Gerhut/electron,hokein/atom-shell,adamjgray/electron,GoooIce/electron,edulan/electron,icattlecoder/electron,astoilkov/electron,destan/electron,maxogden/atom-shell,vipulroxx/electron,jhen0409/electron,natgolov/electron,howmuchcomputer/electron,michaelchiche/electron,RIAEvangelist/electron,Jacobichou/electron,faizalpribadi/electron,nekuz0r/electron,JussMee15/electron,Jonekee/electron,gerhardberger/electron,maxogden/atom-shell,mattotodd/electron,felixrieseberg/electron,medixdev/electron,icattlecoder/electron,nicholasess/electron,shockone/electron,Jacobichou/electron,simongregory/electron,jlord/electron,tonyganch/electron,DivyaKMenon/electron,fomojola/electron,mattdesl/electron,rhencke/electron,kostia/electron,arusakov/electron,Zagorakiss/electron,BionicClick/electron,thomsonreuters/electron,rreimann/electron,baiwyc119/electron,bitemyapp/electron,brave/electron,vaginessa/electron,Andrey-Pavlov/electron,michaelchiche/electron,felixrieseberg/electron,mrwizard82d1/electron,biblerule/UMCTelnetHub,aecca/electron,thingsinjars/electron,zhakui/electron,ianscrivener/electron,arturts/electron,bitemyapp/electron,bitemyapp/electron,jhen0409/electron,ankitaggarwal011/electron,setzer777/electron,trigrass2/electron,BionicClick/electron,xfstudio/electron,stevemao/electron,bruce/electron,synaptek/electron,roadev/electron,coderhaoxin/electron,aichingm/electron,synaptek/electron,miniak/electron,faizalpribadi/electron,mattdesl/electron,deed02392/electron,Gerhut/electron,dongjoon-hyun/electron,anko/electron,fffej/electron,thingsinjars/electron,bwiggs/electron,mhkeller/electron,benweissmann/electron,rhencke/electron,brave/muon,JussMee15/electron,evgenyzinoviev/electron,systembugtj/electron,JussMee15/electron,vipulroxx/electron,bwiggs/electron,lzpfmh/electron,adcentury/electron,hokein/atom-shell,electron/electron,fabien-d/electron,mattotodd/electron,gbn972/electron,brenca/electron,seanchas116/electron,darwin/electron,shaundunne/electron,zhakui/electron,hokein/atom-shell,MaxGraey/electron,dongjoon-hyun/electron,xiruibing/electron,medixdev/electron,leftstick/electron,eriser/electron,joneit/electron,brave/electron,fffej/electron,synaptek/electron,John-Lin/electron,smczk/electron,systembugtj/electron,stevekinney/electron,thingsinjars/electron,posix4e/electron,arturts/electron,simonfork/electron,micalan/electron,bruce/electron,brave/muon,bwiggs/electron,jtburke/electron,kokdemo/electron,mhkeller/electron,tinydew4/electron,GoooIce/electron,Gerhut/electron,Jacobichou/electron,abhishekgahlot/electron,greyhwndz/electron,zhakui/electron,jannishuebl/electron,felixrieseberg/electron,jonatasfreitasv/electron,leolujuyi/electron,dahal/electron,natgolov/electron,twolfson/electron,seanchas116/electron,kcrt/electron,RobertJGabriel/electron,jtburke/electron,felixrieseberg/electron,thomsonreuters/electron,xiruibing/electron,pandoraui/electron,wan-qy/electron,kikong/electron,gbn972/electron,Jacobichou/electron,fireball-x/atom-shell,rhencke/electron,kokdemo/electron,jlhbaseball15/electron,kenmozi/electron,bbondy/electron,pandoraui/electron,bobwol/electron,aaron-goshine/electron,sircharleswatson/electron,chrisswk/electron,subblue/electron,astoilkov/electron,carsonmcdonald/electron,cos2004/electron,wolfflow/electron,nagyistoce/electron-atom-shell,yalexx/electron,kazupon/electron,John-Lin/electron,bitemyapp/electron,jlhbaseball15/electron,simonfork/electron,LadyNaggaga/electron,bwiggs/electron,jsutcodes/electron,the-ress/electron,rreimann/electron,shennushi/electron,iftekeriba/electron,davazp/electron,xiruibing/electron,christian-bromann/electron,astoilkov/electron,webmechanicx/electron,aaron-goshine/electron,pombredanne/electron,darwin/electron,DivyaKMenon/electron,vaginessa/electron,dongjoon-hyun/electron,voidbridge/electron,jcblw/electron,bruce/electron,jsutcodes/electron,bruce/electron,seanchas116/electron,seanchas116/electron,ervinb/electron,bobwol/electron,christian-bromann/electron,jacksondc/electron,preco21/electron,preco21/electron,chriskdon/electron,nicholasess/electron,oiledCode/electron,vaginessa/electron,mhkeller/electron,tylergibson/electron,pirafrank/electron,mjaniszew/electron,eriser/electron,JussMee15/electron,webmechanicx/electron,mattotodd/electron,micalan/electron,sircharleswatson/electron,the-ress/electron,robinvandernoord/electron,dkfiresky/electron,wolfflow/electron,yan-foto/electron,darwin/electron,lzpfmh/electron,thomsonreuters/electron,Rokt33r/electron,sircharleswatson/electron,chriskdon/electron,aliib/electron,mattotodd/electron,renaesop/electron,sky7sea/electron,arturts/electron,jaanus/electron,adamjgray/electron,shiftkey/electron,adcentury/electron,davazp/electron,dahal/electron,the-ress/electron,electron/electron,adamjgray/electron,oiledCode/electron,chriskdon/electron,noikiy/electron,soulteary/electron,jjz/electron,gabrielPeart/electron,MaxWhere/electron,electron/electron,ervinb/electron,bbondy/electron,edulan/electron,joneit/electron,sircharleswatson/electron,bright-sparks/electron,the-ress/electron,vaginessa/electron,adcentury/electron,yalexx/electron,tomashanacek/electron,nagyistoce/electron-atom-shell,farmisen/electron,SufianHassan/electron,felixrieseberg/electron,MaxWhere/electron,tomashanacek/electron,mhkeller/electron,kenmozi/electron,ankitaggarwal011/electron,robinvandernoord/electron,sky7sea/electron,takashi/electron,nicobot/electron,shennushi/electron,LadyNaggaga/electron,egoist/electron,mirrh/electron,seanchas116/electron,brenca/electron,gabrielPeart/electron,chriskdon/electron,Neron-X5/electron,astoilkov/electron,gabrielPeart/electron,shockone/electron,ianscrivener/electron,jhen0409/electron,miniak/electron,sshiting/electron,tonyganch/electron,edulan/electron,mattotodd/electron,bobwol/electron,stevemao/electron,matiasinsaurralde/electron,IonicaBizauKitchen/electron,sshiting/electron,the-ress/electron,vipulroxx/electron,iftekeriba/electron,sircharleswatson/electron,BionicClick/electron,mirrh/electron,fritx/electron,joneit/electron,kikong/electron,mattotodd/electron,mirrh/electron,mubassirhayat/electron,soulteary/electron,ankitaggarwal011/electron,medixdev/electron,deed02392/electron,nicholasess/electron,webmechanicx/electron,RIAEvangelist/electron,eric-seekas/electron,setzer777/electron,leolujuyi/electron,rhencke/electron,davazp/electron,jacksondc/electron,jlord/electron,roadev/electron,mubassirhayat/electron,miniak/electron,jcblw/electron,tinydew4/electron,benweissmann/electron,aaron-goshine/electron,neutrous/electron,stevekinney/electron,micalan/electron,simonfork/electron,d-salas/electron,eriser/electron,RobertJGabriel/electron,abhishekgahlot/electron,gamedevsam/electron,aecca/electron,minggo/electron,adcentury/electron,cos2004/electron,fffej/electron,abhishekgahlot/electron,pirafrank/electron,aliib/electron,bright-sparks/electron,tincan24/electron,Andrey-Pavlov/electron,ankitaggarwal011/electron,aecca/electron,aecca/electron,voidbridge/electron,wan-qy/electron,digideskio/electron,rajatsingla28/electron,Neron-X5/electron,faizalpribadi/electron,simonfork/electron,tincan24/electron,dkfiresky/electron,christian-bromann/electron,posix4e/electron,John-Lin/electron,trigrass2/electron,jlhbaseball15/electron,chrisswk/electron,jaanus/electron,egoist/electron,vaginessa/electron,fritx/electron,kostia/electron,twolfson/electron,preco21/electron,ianscrivener/electron,arusakov/electron,bbondy/electron,noikiy/electron,nicholasess/electron,brenca/electron,jlhbaseball15/electron,astoilkov/electron,carsonmcdonald/electron,vHanda/electron,GoooIce/electron,fffej/electron,sircharleswatson/electron,renaesop/electron,tincan24/electron,joaomoreno/atom-shell,mirrh/electron,natgolov/electron,trankmichael/electron,Gerhut/electron,baiwyc119/electron,yalexx/electron,oiledCode/electron,Evercoder/electron,miniak/electron,tylergibson/electron,kazupon/electron,gabrielPeart/electron,dongjoon-hyun/electron,Andrey-Pavlov/electron,yalexx/electron,tonyganch/electron,shockone/electron,nicobot/electron,benweissmann/electron,meowlab/electron,sky7sea/electron,medixdev/electron,shaundunne/electron,setzer777/electron,farmisen/electron,wolfflow/electron,SufianHassan/electron,thompsonemerson/electron,RIAEvangelist/electron,Faiz7412/electron,tonyganch/electron,gbn972/electron,natgolov/electron,Floato/electron,evgenyzinoviev/electron,systembugtj/electron,rajatsingla28/electron,mubassirhayat/electron,Evercoder/electron,bruce/electron,etiktin/electron,fireball-x/atom-shell,joaomoreno/atom-shell,natgolov/electron,biblerule/UMCTelnetHub,kokdemo/electron,nicobot/electron,aliib/electron,smczk/electron,fomojola/electron,deed02392/electron,smczk/electron,stevemao/electron,micalan/electron,Evercoder/electron,etiktin/electron,gstack/infinium-shell,icattlecoder/electron,matiasinsaurralde/electron,d-salas/electron,adamjgray/electron,micalan/electron,systembugtj/electron,simongregory/electron,tylergibson/electron,bwiggs/electron,gstack/infinium-shell,kenmozi/electron,jaanus/electron,zhakui/electron,noikiy/electron,mattdesl/electron,vipulroxx/electron,GoooIce/electron,rsvip/electron,Evercoder/electron,posix4e/electron,medixdev/electron,evgenyzinoviev/electron,Neron-X5/electron,jhen0409/electron,xfstudio/electron,neutrous/electron,xfstudio/electron,ervinb/electron,RobertJGabriel/electron,deepak1556/atom-shell,noikiy/electron,BionicClick/electron,robinvandernoord/electron,meowlab/electron,Zagorakiss/electron,destan/electron,jonatasfreitasv/electron,preco21/electron,dkfiresky/electron,leftstick/electron,tomashanacek/electron,jiaz/electron,yan-foto/electron,faizalpribadi/electron,trankmichael/electron,BionicClick/electron,rajatsingla28/electron,the-ress/electron,tincan24/electron,shaundunne/electron,fomojola/electron,jjz/electron,evgenyzinoviev/electron,Evercoder/electron,destan/electron,tylergibson/electron,etiktin/electron,electron/electron,saronwei/electron,brave/muon,deepak1556/atom-shell,takashi/electron,kostia/electron,subblue/electron,shiftkey/electron,howmuchcomputer/electron,egoist/electron,gabriel/electron,egoist/electron,nagyistoce/electron-atom-shell,kostia/electron,kostia/electron,carsonmcdonald/electron,lzpfmh/electron,Zagorakiss/electron,LadyNaggaga/electron,gerhardberger/electron,tomashanacek/electron,bobwol/electron,leolujuyi/electron,eric-seekas/electron,vaginessa/electron,fireball-x/atom-shell,gstack/infinium-shell,yan-foto/electron,synaptek/electron,jannishuebl/electron,soulteary/electron,aaron-goshine/electron,roadev/electron,aichingm/electron,Rokt33r/electron,jsutcodes/electron,bitemyapp/electron,arusakov/electron,joaomoreno/atom-shell,jtburke/electron,brave/electron,Rokt33r/electron,arturts/electron,ervinb/electron,kazupon/electron,JussMee15/electron,bbondy/electron,Neron-X5/electron,jlord/electron,tomashanacek/electron,wan-qy/electron,thompsonemerson/electron,michaelchiche/electron,destan/electron,greyhwndz/electron,yan-foto/electron,ianscrivener/electron,stevekinney/electron,rreimann/electron,renaesop/electron,digideskio/electron,shiftkey/electron,simongregory/electron,farmisen/electron,MaxWhere/electron,bright-sparks/electron,egoist/electron,saronwei/electron,pirafrank/electron,coderhaoxin/electron,bbondy/electron,rajatsingla28/electron,simongregory/electron,Floato/electron,Andrey-Pavlov/electron,cqqccqc/electron,maxogden/atom-shell,eriser/electron,digideskio/electron,mjaniszew/electron,gamedevsam/electron,rsvip/electron,micalan/electron,jacksondc/electron,Floato/electron,digideskio/electron,jjz/electron,deepak1556/atom-shell,digideskio/electron,bpasero/electron,voidbridge/electron,biblerule/UMCTelnetHub,arturts/electron,gamedevsam/electron,DivyaKMenon/electron,adcentury/electron,destan/electron,bwiggs/electron,neutrous/electron,Jacobichou/electron,JesselJohn/electron,IonicaBizauKitchen/electron,shiftkey/electron,mhkeller/electron,jtburke/electron,Rokt33r/electron,fireball-x/atom-shell,oiledCode/electron,beni55/electron,biblerule/UMCTelnetHub,wolfflow/electron,shennushi/electron,gerhardberger/electron,shennushi/electron,fireball-x/atom-shell,brave/muon,Jacobichou/electron,xiruibing/electron,nekuz0r/electron,deepak1556/atom-shell,RobertJGabriel/electron,subblue/electron,SufianHassan/electron,subblue/electron,abhishekgahlot/electron,wolfflow/electron,twolfson/electron,miniak/electron,faizalpribadi/electron,Zagorakiss/electron,voidbridge/electron,jlord/electron,thingsinjars/electron,eriser/electron,leolujuyi/electron,mirrh/electron,michaelchiche/electron,fomojola/electron,jacksondc/electron,trigrass2/electron,dkfiresky/electron,iftekeriba/electron,christian-bromann/electron,benweissmann/electron,aichingm/electron,saronwei/electron,stevemao/electron,preco21/electron,renaesop/electron,edulan/electron,deed02392/electron,IonicaBizauKitchen/electron,jhen0409/electron,gbn972/electron,wolfflow/electron,electron/electron,stevekinney/electron,stevekinney/electron,natgolov/electron,thompsonemerson/electron,pombredanne/electron,sky7sea/electron,setzer777/electron,seanchas116/electron,smczk/electron,JesselJohn/electron,howmuchcomputer/electron,SufianHassan/electron,dahal/electron,beni55/electron,pirafrank/electron,MaxWhere/electron,medixdev/electron,mjaniszew/electron,systembugtj/electron,ervinb/electron,joaomoreno/atom-shell,jonatasfreitasv/electron,eric-seekas/electron,gbn972/electron,beni55/electron,hokein/atom-shell,mrwizard82d1/electron,kazupon/electron,RIAEvangelist/electron,IonicaBizauKitchen/electron,minggo/electron,bruce/electron,gabriel/electron,felixrieseberg/electron,joneit/electron,jiaz/electron,tincan24/electron,xfstudio/electron,gamedevsam/electron,mattdesl/electron,matiasinsaurralde/electron,howmuchcomputer/electron,jlord/electron,trigrass2/electron,baiwyc119/electron,vipulroxx/electron,pombredanne/electron,Faiz7412/electron,webmechanicx/electron,posix4e/electron,thompsonemerson/electron,rsvip/electron,JesselJohn/electron,roadev/electron,electron/electron,zhakui/electron,eric-seekas/electron,JesselJohn/electron,voidbridge/electron,chriskdon/electron,d-salas/electron,minggo/electron,rajatsingla28/electron,MaxGraey/electron,lrlna/electron,rhencke/electron,eric-seekas/electron,JesselJohn/electron,ianscrivener/electron,joneit/electron,takashi/electron,twolfson/electron,mjaniszew/electron,RobertJGabriel/electron,kokdemo/electron,kazupon/electron,stevemao/electron,abhishekgahlot/electron,jiaz/electron,xfstudio/electron,JesselJohn/electron,MaxWhere/electron,iftekeriba/electron,kcrt/electron,John-Lin/electron,fabien-d/electron,farmisen/electron,evgenyzinoviev/electron,trankmichael/electron,Zagorakiss/electron,chrisswk/electron,cos2004/electron,webmechanicx/electron,aecca/electron,gabrielPeart/electron,setzer777/electron,biblerule/UMCTelnetHub,jsutcodes/electron,d-salas/electron,Andrey-Pavlov/electron,aichingm/electron,kikong/electron,maxogden/atom-shell,subblue/electron,lzpfmh/electron,shaundunne/electron,jhen0409/electron,greyhwndz/electron,vHanda/electron,gamedevsam/electron,trankmichael/electron,Ivshti/electron,bpasero/electron,etiktin/electron,lrlna/electron,Floato/electron,gamedevsam/electron,Rokt33r/electron,jtburke/electron,carsonmcdonald/electron,stevemao/electron,pombredanne/electron,leethomas/electron,mjaniszew/electron,arturts/electron,kenmozi/electron,simonfork/electron,bitemyapp/electron,RIAEvangelist/electron,icattlecoder/electron,soulteary/electron,timruffles/electron,meowlab/electron,jaanus/electron,matiasinsaurralde/electron,michaelchiche/electron,leftstick/electron,joaomoreno/atom-shell,anko/electron,arusakov/electron,takashi/electron,simonfork/electron,dahal/electron,roadev/electron
|
#!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'aa87035cc012ce0d533bb56b947bca81a6e71b82'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
Upgrade libchromiumcontent to remove dom storage quota
Closes #897.
|
#!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'e375124044f9044ac88076eba0cd17361ee0997c'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
|
<commit_before>#!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'aa87035cc012ce0d533bb56b947bca81a6e71b82'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
<commit_msg>Upgrade libchromiumcontent to remove dom storage quota
Closes #897.<commit_after>
|
#!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'e375124044f9044ac88076eba0cd17361ee0997c'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
|
#!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'aa87035cc012ce0d533bb56b947bca81a6e71b82'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
Upgrade libchromiumcontent to remove dom storage quota
Closes #897.#!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'e375124044f9044ac88076eba0cd17361ee0997c'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
|
<commit_before>#!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'aa87035cc012ce0d533bb56b947bca81a6e71b82'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
<commit_msg>Upgrade libchromiumcontent to remove dom storage quota
Closes #897.<commit_after>#!/usr/bin/env python
import platform
import sys
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'e375124044f9044ac88076eba0cd17361ee0997c'
ARCH = {
'cygwin': '32bit',
'darwin': '64bit',
'linux2': platform.architecture()[0],
'win32': '32bit',
}[sys.platform]
DIST_ARCH = {
'32bit': 'ia32',
'64bit': 'x64',
}[ARCH]
TARGET_PLATFORM = {
'cygwin': 'win32',
'darwin': 'darwin',
'linux2': 'linux',
'win32': 'win32',
}[sys.platform]
verbose_mode = False
def enable_verbose_mode():
print 'Running in verbose mode'
global verbose_mode
verbose_mode = True
def is_verbose_mode():
return verbose_mode
|
d9348ebad71376dd6e8c350f9d559cdd54c60cd0
|
games/management/commands/resize_media.py
|
games/management/commands/resize_media.py
|
"""Resize all game banners to a fixed size"""
import os
import shutil
from django.core.management.base import BaseCommand
from django.conf import settings
from sorl.thumbnail import get_thumbnail
from games.models import Game
class Command(BaseCommand):
"""Resize banners and icons"""
ICON_PATH = os.path.join(settings.MEDIA_ROOT, "game-icons/128")
BANNER_PATH = os.path.join(settings.MEDIA_ROOT, "game-banners/184")
def resize_icon(self, game):
"""Resize icon to fixed size"""
dest_file = os.path.join(self.ICON_PATH, "%s.png" % game.slug)
if os.path.exists(dest_file):
return
thumbnail = get_thumbnail(
game.icon,
settings.ICON_SIZE,
crop="center",
format="PNG"
)
shutil.copy(os.path.join(settings.MEDIA_ROOT, thumbnail.name), dest_file)
def resize_banner(self, game):
"""Resize banner to fixed size"""
dest_file = os.path.join(self.BANNER_PATH, "%s.jpg" % game.slug)
if os.path.exists(dest_file):
return
thumbnail = get_thumbnail(
game.title_logo,
settings.BANNER_SIZE,
crop="center"
)
shutil.copy(os.path.join(settings.MEDIA_ROOT, thumbnail.name), dest_file)
def handle(self, *args, **_kwargs):
"""Run command"""
if not os.path.exists(self.ICON_PATH):
os.makedirs(self.ICON_PATH)
if not os.path.exists(self.BANNER_PATH):
os.makedirs(self.BANNER_PATH)
games = Game.objects.all()
for game in games:
icon_path = os.path.join(settings.MEDIA_ROOT, game.icon.name)
if game.icon.name:
if not os.path.exists(icon_path):
print("%s is missing icon" % game)
else:
self.resize_icon(game)
banner_path = os.path.join(settings.MEDIA_ROOT, game.title_logo.name)
if game.title_logo.name:
if not os.path.exists(banner_path):
print("%s is missing banner" % game)
else:
self.resize_banner(game)
|
Add management command to move icons and banners to static locations
|
Add management command to move icons and banners to static locations
|
Python
|
agpl-3.0
|
lutris/website,lutris/website,lutris/website,lutris/website
|
Add management command to move icons and banners to static locations
|
"""Resize all game banners to a fixed size"""
import os
import shutil
from django.core.management.base import BaseCommand
from django.conf import settings
from sorl.thumbnail import get_thumbnail
from games.models import Game
class Command(BaseCommand):
"""Resize banners and icons"""
ICON_PATH = os.path.join(settings.MEDIA_ROOT, "game-icons/128")
BANNER_PATH = os.path.join(settings.MEDIA_ROOT, "game-banners/184")
def resize_icon(self, game):
"""Resize icon to fixed size"""
dest_file = os.path.join(self.ICON_PATH, "%s.png" % game.slug)
if os.path.exists(dest_file):
return
thumbnail = get_thumbnail(
game.icon,
settings.ICON_SIZE,
crop="center",
format="PNG"
)
shutil.copy(os.path.join(settings.MEDIA_ROOT, thumbnail.name), dest_file)
def resize_banner(self, game):
"""Resize banner to fixed size"""
dest_file = os.path.join(self.BANNER_PATH, "%s.jpg" % game.slug)
if os.path.exists(dest_file):
return
thumbnail = get_thumbnail(
game.title_logo,
settings.BANNER_SIZE,
crop="center"
)
shutil.copy(os.path.join(settings.MEDIA_ROOT, thumbnail.name), dest_file)
def handle(self, *args, **_kwargs):
"""Run command"""
if not os.path.exists(self.ICON_PATH):
os.makedirs(self.ICON_PATH)
if not os.path.exists(self.BANNER_PATH):
os.makedirs(self.BANNER_PATH)
games = Game.objects.all()
for game in games:
icon_path = os.path.join(settings.MEDIA_ROOT, game.icon.name)
if game.icon.name:
if not os.path.exists(icon_path):
print("%s is missing icon" % game)
else:
self.resize_icon(game)
banner_path = os.path.join(settings.MEDIA_ROOT, game.title_logo.name)
if game.title_logo.name:
if not os.path.exists(banner_path):
print("%s is missing banner" % game)
else:
self.resize_banner(game)
|
<commit_before><commit_msg>Add management command to move icons and banners to static locations<commit_after>
|
"""Resize all game banners to a fixed size"""
import os
import shutil
from django.core.management.base import BaseCommand
from django.conf import settings
from sorl.thumbnail import get_thumbnail
from games.models import Game
class Command(BaseCommand):
"""Resize banners and icons"""
ICON_PATH = os.path.join(settings.MEDIA_ROOT, "game-icons/128")
BANNER_PATH = os.path.join(settings.MEDIA_ROOT, "game-banners/184")
def resize_icon(self, game):
"""Resize icon to fixed size"""
dest_file = os.path.join(self.ICON_PATH, "%s.png" % game.slug)
if os.path.exists(dest_file):
return
thumbnail = get_thumbnail(
game.icon,
settings.ICON_SIZE,
crop="center",
format="PNG"
)
shutil.copy(os.path.join(settings.MEDIA_ROOT, thumbnail.name), dest_file)
def resize_banner(self, game):
"""Resize banner to fixed size"""
dest_file = os.path.join(self.BANNER_PATH, "%s.jpg" % game.slug)
if os.path.exists(dest_file):
return
thumbnail = get_thumbnail(
game.title_logo,
settings.BANNER_SIZE,
crop="center"
)
shutil.copy(os.path.join(settings.MEDIA_ROOT, thumbnail.name), dest_file)
def handle(self, *args, **_kwargs):
"""Run command"""
if not os.path.exists(self.ICON_PATH):
os.makedirs(self.ICON_PATH)
if not os.path.exists(self.BANNER_PATH):
os.makedirs(self.BANNER_PATH)
games = Game.objects.all()
for game in games:
icon_path = os.path.join(settings.MEDIA_ROOT, game.icon.name)
if game.icon.name:
if not os.path.exists(icon_path):
print("%s is missing icon" % game)
else:
self.resize_icon(game)
banner_path = os.path.join(settings.MEDIA_ROOT, game.title_logo.name)
if game.title_logo.name:
if not os.path.exists(banner_path):
print("%s is missing banner" % game)
else:
self.resize_banner(game)
|
Add management command to move icons and banners to static locations"""Resize all game banners to a fixed size"""
import os
import shutil
from django.core.management.base import BaseCommand
from django.conf import settings
from sorl.thumbnail import get_thumbnail
from games.models import Game
class Command(BaseCommand):
"""Resize banners and icons"""
ICON_PATH = os.path.join(settings.MEDIA_ROOT, "game-icons/128")
BANNER_PATH = os.path.join(settings.MEDIA_ROOT, "game-banners/184")
def resize_icon(self, game):
"""Resize icon to fixed size"""
dest_file = os.path.join(self.ICON_PATH, "%s.png" % game.slug)
if os.path.exists(dest_file):
return
thumbnail = get_thumbnail(
game.icon,
settings.ICON_SIZE,
crop="center",
format="PNG"
)
shutil.copy(os.path.join(settings.MEDIA_ROOT, thumbnail.name), dest_file)
def resize_banner(self, game):
"""Resize banner to fixed size"""
dest_file = os.path.join(self.BANNER_PATH, "%s.jpg" % game.slug)
if os.path.exists(dest_file):
return
thumbnail = get_thumbnail(
game.title_logo,
settings.BANNER_SIZE,
crop="center"
)
shutil.copy(os.path.join(settings.MEDIA_ROOT, thumbnail.name), dest_file)
def handle(self, *args, **_kwargs):
"""Run command"""
if not os.path.exists(self.ICON_PATH):
os.makedirs(self.ICON_PATH)
if not os.path.exists(self.BANNER_PATH):
os.makedirs(self.BANNER_PATH)
games = Game.objects.all()
for game in games:
icon_path = os.path.join(settings.MEDIA_ROOT, game.icon.name)
if game.icon.name:
if not os.path.exists(icon_path):
print("%s is missing icon" % game)
else:
self.resize_icon(game)
banner_path = os.path.join(settings.MEDIA_ROOT, game.title_logo.name)
if game.title_logo.name:
if not os.path.exists(banner_path):
print("%s is missing banner" % game)
else:
self.resize_banner(game)
|
<commit_before><commit_msg>Add management command to move icons and banners to static locations<commit_after>"""Resize all game banners to a fixed size"""
import os
import shutil
from django.core.management.base import BaseCommand
from django.conf import settings
from sorl.thumbnail import get_thumbnail
from games.models import Game
class Command(BaseCommand):
"""Resize banners and icons"""
ICON_PATH = os.path.join(settings.MEDIA_ROOT, "game-icons/128")
BANNER_PATH = os.path.join(settings.MEDIA_ROOT, "game-banners/184")
def resize_icon(self, game):
"""Resize icon to fixed size"""
dest_file = os.path.join(self.ICON_PATH, "%s.png" % game.slug)
if os.path.exists(dest_file):
return
thumbnail = get_thumbnail(
game.icon,
settings.ICON_SIZE,
crop="center",
format="PNG"
)
shutil.copy(os.path.join(settings.MEDIA_ROOT, thumbnail.name), dest_file)
def resize_banner(self, game):
"""Resize banner to fixed size"""
dest_file = os.path.join(self.BANNER_PATH, "%s.jpg" % game.slug)
if os.path.exists(dest_file):
return
thumbnail = get_thumbnail(
game.title_logo,
settings.BANNER_SIZE,
crop="center"
)
shutil.copy(os.path.join(settings.MEDIA_ROOT, thumbnail.name), dest_file)
def handle(self, *args, **_kwargs):
"""Run command"""
if not os.path.exists(self.ICON_PATH):
os.makedirs(self.ICON_PATH)
if not os.path.exists(self.BANNER_PATH):
os.makedirs(self.BANNER_PATH)
games = Game.objects.all()
for game in games:
icon_path = os.path.join(settings.MEDIA_ROOT, game.icon.name)
if game.icon.name:
if not os.path.exists(icon_path):
print("%s is missing icon" % game)
else:
self.resize_icon(game)
banner_path = os.path.join(settings.MEDIA_ROOT, game.title_logo.name)
if game.title_logo.name:
if not os.path.exists(banner_path):
print("%s is missing banner" % game)
else:
self.resize_banner(game)
|
|
67b90064cfaa09c55ea138895eb6c7ed9513bc96
|
Python-demo/test-demo/test_loop_with_else.py
|
Python-demo/test-demo/test_loop_with_else.py
|
# coding=utf-8
"""
# test_loop_with_else.py
#
# Copyright(C) By AbsentM. 2018
#
# Author: AbsentM
# Date: 2018/03/18
#
# Description: Test python loop with else demo
#
# Change Log:
# 2018/03/18 AbsentM Create the file
# 2018/03/18 AbsentM Add test demo
#
"""
def run_for_else_validation(fruits):
"""
Test python for else
:param fruits: A string of fruits split with ','
"""
if fruits is None:
fruits_result = "*"
else:
fruits = fruits.split(",")
print "fruits >>> {}".format(fruits)
for item in fruits:
print "item >>> {}".format(item)
else:
fruits_result = fruits
print "total >>> {}".format(fruits_result)
def run_while_else_validation():
"""
Test python while else
"""
index = 0
while index <= 10:
index += 1
print "index {}: {} ".format(index, index)
else:
print "in while else"
if __name__ == '__main__':
print "---------------------------"
print "Run first test..."
test_1 = None
run_for_else_validation(test_1)
print "Run first test finished"
print "---------------------------"
print "Run second test..."
test_2 = "apple"
run_for_else_validation(test_2)
print "Run second test finished"
print "---------------------------"
print "Run third test..."
test_3 = "apple,pear,orange"
run_for_else_validation(test_3)
print "Run third test finished"
print "---------------------------"
print "Run fourth test..."
run_while_else_validation()
print "Run fourth test finished"
"""
The output as follow:
---------------------------
Run first test...
total >>> *
Run first test finished
---------------------------
Run second test...
fruits >>> ['apple']
item >>> apple
total >>> ['apple']
Run second test finished
---------------------------
Run third test...
fruits >>> ['apple', 'pear', 'orange']
item >>> apple
item >>> pear
item >>> orange
total >>> ['apple', 'pear', 'orange']
Run third test finished
---------------------------
Run fourth test...
index 1: 1
index 2: 2
index 3: 3
index 4: 4
index 5: 5
index 6: 6
index 7: 7
index 8: 8
index 9: 9
index 10: 10
index 11: 11
in while else
Run fourth test finished
[Finished in 0.3s]
"""
|
Add python loop with else demo
|
Add python loop with else demo
|
Python
|
mit
|
absentm/Demo,absentm/Demo,absentm/Demo,absentm/Demo,absentm/Demo,absentm/Demo
|
Add python loop with else demo
|
# coding=utf-8
"""
# test_loop_with_else.py
#
# Copyright(C) By AbsentM. 2018
#
# Author: AbsentM
# Date: 2018/03/18
#
# Description: Test python loop with else demo
#
# Change Log:
# 2018/03/18 AbsentM Create the file
# 2018/03/18 AbsentM Add test demo
#
"""
def run_for_else_validation(fruits):
"""
Test python for else
:param fruits: A string of fruits split with ','
"""
if fruits is None:
fruits_result = "*"
else:
fruits = fruits.split(",")
print "fruits >>> {}".format(fruits)
for item in fruits:
print "item >>> {}".format(item)
else:
fruits_result = fruits
print "total >>> {}".format(fruits_result)
def run_while_else_validation():
"""
Test python while else
"""
index = 0
while index <= 10:
index += 1
print "index {}: {} ".format(index, index)
else:
print "in while else"
if __name__ == '__main__':
print "---------------------------"
print "Run first test..."
test_1 = None
run_for_else_validation(test_1)
print "Run first test finished"
print "---------------------------"
print "Run second test..."
test_2 = "apple"
run_for_else_validation(test_2)
print "Run second test finished"
print "---------------------------"
print "Run third test..."
test_3 = "apple,pear,orange"
run_for_else_validation(test_3)
print "Run third test finished"
print "---------------------------"
print "Run fourth test..."
run_while_else_validation()
print "Run fourth test finished"
"""
The output as follow:
---------------------------
Run first test...
total >>> *
Run first test finished
---------------------------
Run second test...
fruits >>> ['apple']
item >>> apple
total >>> ['apple']
Run second test finished
---------------------------
Run third test...
fruits >>> ['apple', 'pear', 'orange']
item >>> apple
item >>> pear
item >>> orange
total >>> ['apple', 'pear', 'orange']
Run third test finished
---------------------------
Run fourth test...
index 1: 1
index 2: 2
index 3: 3
index 4: 4
index 5: 5
index 6: 6
index 7: 7
index 8: 8
index 9: 9
index 10: 10
index 11: 11
in while else
Run fourth test finished
[Finished in 0.3s]
"""
|
<commit_before><commit_msg>Add python loop with else demo<commit_after>
|
# coding=utf-8
"""
# test_loop_with_else.py
#
# Copyright(C) By AbsentM. 2018
#
# Author: AbsentM
# Date: 2018/03/18
#
# Description: Test python loop with else demo
#
# Change Log:
# 2018/03/18 AbsentM Create the file
# 2018/03/18 AbsentM Add test demo
#
"""
def run_for_else_validation(fruits):
"""
Test python for else
:param fruits: A string of fruits split with ','
"""
if fruits is None:
fruits_result = "*"
else:
fruits = fruits.split(",")
print "fruits >>> {}".format(fruits)
for item in fruits:
print "item >>> {}".format(item)
else:
fruits_result = fruits
print "total >>> {}".format(fruits_result)
def run_while_else_validation():
"""
Test python while else
"""
index = 0
while index <= 10:
index += 1
print "index {}: {} ".format(index, index)
else:
print "in while else"
if __name__ == '__main__':
print "---------------------------"
print "Run first test..."
test_1 = None
run_for_else_validation(test_1)
print "Run first test finished"
print "---------------------------"
print "Run second test..."
test_2 = "apple"
run_for_else_validation(test_2)
print "Run second test finished"
print "---------------------------"
print "Run third test..."
test_3 = "apple,pear,orange"
run_for_else_validation(test_3)
print "Run third test finished"
print "---------------------------"
print "Run fourth test..."
run_while_else_validation()
print "Run fourth test finished"
"""
The output as follow:
---------------------------
Run first test...
total >>> *
Run first test finished
---------------------------
Run second test...
fruits >>> ['apple']
item >>> apple
total >>> ['apple']
Run second test finished
---------------------------
Run third test...
fruits >>> ['apple', 'pear', 'orange']
item >>> apple
item >>> pear
item >>> orange
total >>> ['apple', 'pear', 'orange']
Run third test finished
---------------------------
Run fourth test...
index 1: 1
index 2: 2
index 3: 3
index 4: 4
index 5: 5
index 6: 6
index 7: 7
index 8: 8
index 9: 9
index 10: 10
index 11: 11
in while else
Run fourth test finished
[Finished in 0.3s]
"""
|
Add python loop with else demo# coding=utf-8
"""
# test_loop_with_else.py
#
# Copyright(C) By AbsentM. 2018
#
# Author: AbsentM
# Date: 2018/03/18
#
# Description: Test python loop with else demo
#
# Change Log:
# 2018/03/18 AbsentM Create the file
# 2018/03/18 AbsentM Add test demo
#
"""
def run_for_else_validation(fruits):
"""
Test python for else
:param fruits: A string of fruits split with ','
"""
if fruits is None:
fruits_result = "*"
else:
fruits = fruits.split(",")
print "fruits >>> {}".format(fruits)
for item in fruits:
print "item >>> {}".format(item)
else:
fruits_result = fruits
print "total >>> {}".format(fruits_result)
def run_while_else_validation():
"""
Test python while else
"""
index = 0
while index <= 10:
index += 1
print "index {}: {} ".format(index, index)
else:
print "in while else"
if __name__ == '__main__':
print "---------------------------"
print "Run first test..."
test_1 = None
run_for_else_validation(test_1)
print "Run first test finished"
print "---------------------------"
print "Run second test..."
test_2 = "apple"
run_for_else_validation(test_2)
print "Run second test finished"
print "---------------------------"
print "Run third test..."
test_3 = "apple,pear,orange"
run_for_else_validation(test_3)
print "Run third test finished"
print "---------------------------"
print "Run fourth test..."
run_while_else_validation()
print "Run fourth test finished"
"""
The output as follow:
---------------------------
Run first test...
total >>> *
Run first test finished
---------------------------
Run second test...
fruits >>> ['apple']
item >>> apple
total >>> ['apple']
Run second test finished
---------------------------
Run third test...
fruits >>> ['apple', 'pear', 'orange']
item >>> apple
item >>> pear
item >>> orange
total >>> ['apple', 'pear', 'orange']
Run third test finished
---------------------------
Run fourth test...
index 1: 1
index 2: 2
index 3: 3
index 4: 4
index 5: 5
index 6: 6
index 7: 7
index 8: 8
index 9: 9
index 10: 10
index 11: 11
in while else
Run fourth test finished
[Finished in 0.3s]
"""
|
<commit_before><commit_msg>Add python loop with else demo<commit_after># coding=utf-8
"""
# test_loop_with_else.py
#
# Copyright(C) By AbsentM. 2018
#
# Author: AbsentM
# Date: 2018/03/18
#
# Description: Test python loop with else demo
#
# Change Log:
# 2018/03/18 AbsentM Create the file
# 2018/03/18 AbsentM Add test demo
#
"""
def run_for_else_validation(fruits):
"""
Test python for else
:param fruits: A string of fruits split with ','
"""
if fruits is None:
fruits_result = "*"
else:
fruits = fruits.split(",")
print "fruits >>> {}".format(fruits)
for item in fruits:
print "item >>> {}".format(item)
else:
fruits_result = fruits
print "total >>> {}".format(fruits_result)
def run_while_else_validation():
"""
Test python while else
"""
index = 0
while index <= 10:
index += 1
print "index {}: {} ".format(index, index)
else:
print "in while else"
if __name__ == '__main__':
print "---------------------------"
print "Run first test..."
test_1 = None
run_for_else_validation(test_1)
print "Run first test finished"
print "---------------------------"
print "Run second test..."
test_2 = "apple"
run_for_else_validation(test_2)
print "Run second test finished"
print "---------------------------"
print "Run third test..."
test_3 = "apple,pear,orange"
run_for_else_validation(test_3)
print "Run third test finished"
print "---------------------------"
print "Run fourth test..."
run_while_else_validation()
print "Run fourth test finished"
"""
The output as follow:
---------------------------
Run first test...
total >>> *
Run first test finished
---------------------------
Run second test...
fruits >>> ['apple']
item >>> apple
total >>> ['apple']
Run second test finished
---------------------------
Run third test...
fruits >>> ['apple', 'pear', 'orange']
item >>> apple
item >>> pear
item >>> orange
total >>> ['apple', 'pear', 'orange']
Run third test finished
---------------------------
Run fourth test...
index 1: 1
index 2: 2
index 3: 3
index 4: 4
index 5: 5
index 6: 6
index 7: 7
index 8: 8
index 9: 9
index 10: 10
index 11: 11
in while else
Run fourth test finished
[Finished in 0.3s]
"""
|
|
c8336a85f002dfe8437c6092f3d44bca2c5e0b45
|
dbaas/maintenance/scripts/migrate_email.py
|
dbaas/maintenance/scripts/migrate_email.py
|
from django.contrib.auth.models import User
from copy import deepcopy
def _migrate(file_path):
arq = open(file_path)
for line in arq.readlines():
old_email, new_email = line.strip().split(',')
old_email = old_email.strip()
new_email = new_email.strip()
try:
old_user = User.objects.get(username=old_email)
except User.DoesNotExist:
continue
new_user = User.objects.filter(username=new_email)
if new_user:
continue
new_user = deepcopy(old_user)
new_user.id = None
new_user.username = new_email
new_user.email = new_email
new_user.save()
map(new_user.team_set.add, old_user.team_set.all())
arq.close()
def migrate_corp():
_migrate('/tmp/corp.csv')
def migrate_prestadores():
_migrate('/tmp/prestadores.csv')
def migrate_tvglobo():
_migrate('/tmp/tvglobo.csv')
def update_username():
from django.db.models import Q
for user in User.objects.all().exclude(
Q(username='admin') | Q(username='slack_bot')):
if user.email:
user.username = user.email
user.save()
|
Create script to migrate email
|
Create script to migrate email
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
Create script to migrate email
|
from django.contrib.auth.models import User
from copy import deepcopy
def _migrate(file_path):
arq = open(file_path)
for line in arq.readlines():
old_email, new_email = line.strip().split(',')
old_email = old_email.strip()
new_email = new_email.strip()
try:
old_user = User.objects.get(username=old_email)
except User.DoesNotExist:
continue
new_user = User.objects.filter(username=new_email)
if new_user:
continue
new_user = deepcopy(old_user)
new_user.id = None
new_user.username = new_email
new_user.email = new_email
new_user.save()
map(new_user.team_set.add, old_user.team_set.all())
arq.close()
def migrate_corp():
_migrate('/tmp/corp.csv')
def migrate_prestadores():
_migrate('/tmp/prestadores.csv')
def migrate_tvglobo():
_migrate('/tmp/tvglobo.csv')
def update_username():
from django.db.models import Q
for user in User.objects.all().exclude(
Q(username='admin') | Q(username='slack_bot')):
if user.email:
user.username = user.email
user.save()
|
<commit_before><commit_msg>Create script to migrate email<commit_after>
|
from django.contrib.auth.models import User
from copy import deepcopy
def _migrate(file_path):
arq = open(file_path)
for line in arq.readlines():
old_email, new_email = line.strip().split(',')
old_email = old_email.strip()
new_email = new_email.strip()
try:
old_user = User.objects.get(username=old_email)
except User.DoesNotExist:
continue
new_user = User.objects.filter(username=new_email)
if new_user:
continue
new_user = deepcopy(old_user)
new_user.id = None
new_user.username = new_email
new_user.email = new_email
new_user.save()
map(new_user.team_set.add, old_user.team_set.all())
arq.close()
def migrate_corp():
_migrate('/tmp/corp.csv')
def migrate_prestadores():
_migrate('/tmp/prestadores.csv')
def migrate_tvglobo():
_migrate('/tmp/tvglobo.csv')
def update_username():
from django.db.models import Q
for user in User.objects.all().exclude(
Q(username='admin') | Q(username='slack_bot')):
if user.email:
user.username = user.email
user.save()
|
Create script to migrate emailfrom django.contrib.auth.models import User
from copy import deepcopy
def _migrate(file_path):
arq = open(file_path)
for line in arq.readlines():
old_email, new_email = line.strip().split(',')
old_email = old_email.strip()
new_email = new_email.strip()
try:
old_user = User.objects.get(username=old_email)
except User.DoesNotExist:
continue
new_user = User.objects.filter(username=new_email)
if new_user:
continue
new_user = deepcopy(old_user)
new_user.id = None
new_user.username = new_email
new_user.email = new_email
new_user.save()
map(new_user.team_set.add, old_user.team_set.all())
arq.close()
def migrate_corp():
_migrate('/tmp/corp.csv')
def migrate_prestadores():
_migrate('/tmp/prestadores.csv')
def migrate_tvglobo():
_migrate('/tmp/tvglobo.csv')
def update_username():
from django.db.models import Q
for user in User.objects.all().exclude(
Q(username='admin') | Q(username='slack_bot')):
if user.email:
user.username = user.email
user.save()
|
<commit_before><commit_msg>Create script to migrate email<commit_after>from django.contrib.auth.models import User
from copy import deepcopy
def _migrate(file_path):
arq = open(file_path)
for line in arq.readlines():
old_email, new_email = line.strip().split(',')
old_email = old_email.strip()
new_email = new_email.strip()
try:
old_user = User.objects.get(username=old_email)
except User.DoesNotExist:
continue
new_user = User.objects.filter(username=new_email)
if new_user:
continue
new_user = deepcopy(old_user)
new_user.id = None
new_user.username = new_email
new_user.email = new_email
new_user.save()
map(new_user.team_set.add, old_user.team_set.all())
arq.close()
def migrate_corp():
_migrate('/tmp/corp.csv')
def migrate_prestadores():
_migrate('/tmp/prestadores.csv')
def migrate_tvglobo():
_migrate('/tmp/tvglobo.csv')
def update_username():
from django.db.models import Q
for user in User.objects.all().exclude(
Q(username='admin') | Q(username='slack_bot')):
if user.email:
user.username = user.email
user.save()
|
|
fe45b745e5f0a859f10b6edf2ad8f1e24cc01a56
|
heat_integrationtests/functional/test_simultaneous_update.py
|
heat_integrationtests/functional/test_simultaneous_update.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import time
from heat_integrationtests.common import test
from heat_integrationtests.functional import functional_base
_test_template = {
'heat_template_version': 'pike',
'description': 'Test template to create two resources.',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0,
}
},
'test2': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0,
'action_wait_secs': {
'create': 30,
}
},
'depends_on': ['test1']
}
}
}
def get_templates(fail=False, delay_s=None):
before = copy.deepcopy(_test_template)
after = copy.deepcopy(before)
for r in after['resources'].values():
r['properties']['value'] = 'Test2'
before_props = before['resources']['test2']['properties']
before_props['fail'] = fail
if delay_s is not None:
before_props['action_wait_secs']['create'] = delay_s
return before, after
class SimultaneousUpdateStackTest(functional_base.FunctionalTestsBase):
@test.requires_convergence
def test_retrigger_success(self):
before, after = get_templates()
stack_id = self.stack_create(template=before,
expected_status='CREATE_IN_PROGRESS')
time.sleep(10)
self.update_stack(stack_id, after)
@test.requires_convergence
def test_retrigger_failure(self):
before, after = get_templates(fail=True)
stack_id = self.stack_create(template=before,
expected_status='CREATE_IN_PROGRESS')
time.sleep(10)
self.update_stack(stack_id, after)
|
Add integration tests for simultaneous updates
|
Add integration tests for simultaneous updates
Convergence allows us to start a stack update while a previous update (or
create) is still in progress. If a resource is found to be locked by a
previous graph traversal when the new traversal comes to update it, the
earlier traversal is responsible for retriggering the resource after it
completes.
This patch adds functional tests to confirm that this works when the
long-running resource update ends in success or failure (as opposed to
timing out or being cancelled).
Change-Id: I5cb0cfd6bb05a94cd32709b5cda8454df6e81a61
|
Python
|
apache-2.0
|
openstack/heat,noironetworks/heat,noironetworks/heat,openstack/heat
|
Add integration tests for simultaneous updates
Convergence allows us to start a stack update while a previous update (or
create) is still in progress. If a resource is found to be locked by a
previous graph traversal when the new traversal comes to update it, the
earlier traversal is responsible for retriggering the resource after it
completes.
This patch adds functional tests to confirm that this works when the
long-running resource update ends in success or failure (as opposed to
timing out or being cancelled).
Change-Id: I5cb0cfd6bb05a94cd32709b5cda8454df6e81a61
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import time
from heat_integrationtests.common import test
from heat_integrationtests.functional import functional_base
_test_template = {
'heat_template_version': 'pike',
'description': 'Test template to create two resources.',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0,
}
},
'test2': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0,
'action_wait_secs': {
'create': 30,
}
},
'depends_on': ['test1']
}
}
}
def get_templates(fail=False, delay_s=None):
before = copy.deepcopy(_test_template)
after = copy.deepcopy(before)
for r in after['resources'].values():
r['properties']['value'] = 'Test2'
before_props = before['resources']['test2']['properties']
before_props['fail'] = fail
if delay_s is not None:
before_props['action_wait_secs']['create'] = delay_s
return before, after
class SimultaneousUpdateStackTest(functional_base.FunctionalTestsBase):
@test.requires_convergence
def test_retrigger_success(self):
before, after = get_templates()
stack_id = self.stack_create(template=before,
expected_status='CREATE_IN_PROGRESS')
time.sleep(10)
self.update_stack(stack_id, after)
@test.requires_convergence
def test_retrigger_failure(self):
before, after = get_templates(fail=True)
stack_id = self.stack_create(template=before,
expected_status='CREATE_IN_PROGRESS')
time.sleep(10)
self.update_stack(stack_id, after)
|
<commit_before><commit_msg>Add integration tests for simultaneous updates
Convergence allows us to start a stack update while a previous update (or
create) is still in progress. If a resource is found to be locked by a
previous graph traversal when the new traversal comes to update it, the
earlier traversal is responsible for retriggering the resource after it
completes.
This patch adds functional tests to confirm that this works when the
long-running resource update ends in success or failure (as opposed to
timing out or being cancelled).
Change-Id: I5cb0cfd6bb05a94cd32709b5cda8454df6e81a61<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import time
from heat_integrationtests.common import test
from heat_integrationtests.functional import functional_base
_test_template = {
'heat_template_version': 'pike',
'description': 'Test template to create two resources.',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0,
}
},
'test2': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0,
'action_wait_secs': {
'create': 30,
}
},
'depends_on': ['test1']
}
}
}
def get_templates(fail=False, delay_s=None):
before = copy.deepcopy(_test_template)
after = copy.deepcopy(before)
for r in after['resources'].values():
r['properties']['value'] = 'Test2'
before_props = before['resources']['test2']['properties']
before_props['fail'] = fail
if delay_s is not None:
before_props['action_wait_secs']['create'] = delay_s
return before, after
class SimultaneousUpdateStackTest(functional_base.FunctionalTestsBase):
@test.requires_convergence
def test_retrigger_success(self):
before, after = get_templates()
stack_id = self.stack_create(template=before,
expected_status='CREATE_IN_PROGRESS')
time.sleep(10)
self.update_stack(stack_id, after)
@test.requires_convergence
def test_retrigger_failure(self):
before, after = get_templates(fail=True)
stack_id = self.stack_create(template=before,
expected_status='CREATE_IN_PROGRESS')
time.sleep(10)
self.update_stack(stack_id, after)
|
Add integration tests for simultaneous updates
Convergence allows us to start a stack update while a previous update (or
create) is still in progress. If a resource is found to be locked by a
previous graph traversal when the new traversal comes to update it, the
earlier traversal is responsible for retriggering the resource after it
completes.
This patch adds functional tests to confirm that this works when the
long-running resource update ends in success or failure (as opposed to
timing out or being cancelled).
Change-Id: I5cb0cfd6bb05a94cd32709b5cda8454df6e81a61# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import time
from heat_integrationtests.common import test
from heat_integrationtests.functional import functional_base
_test_template = {
'heat_template_version': 'pike',
'description': 'Test template to create two resources.',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0,
}
},
'test2': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0,
'action_wait_secs': {
'create': 30,
}
},
'depends_on': ['test1']
}
}
}
def get_templates(fail=False, delay_s=None):
before = copy.deepcopy(_test_template)
after = copy.deepcopy(before)
for r in after['resources'].values():
r['properties']['value'] = 'Test2'
before_props = before['resources']['test2']['properties']
before_props['fail'] = fail
if delay_s is not None:
before_props['action_wait_secs']['create'] = delay_s
return before, after
class SimultaneousUpdateStackTest(functional_base.FunctionalTestsBase):
@test.requires_convergence
def test_retrigger_success(self):
before, after = get_templates()
stack_id = self.stack_create(template=before,
expected_status='CREATE_IN_PROGRESS')
time.sleep(10)
self.update_stack(stack_id, after)
@test.requires_convergence
def test_retrigger_failure(self):
before, after = get_templates(fail=True)
stack_id = self.stack_create(template=before,
expected_status='CREATE_IN_PROGRESS')
time.sleep(10)
self.update_stack(stack_id, after)
|
<commit_before><commit_msg>Add integration tests for simultaneous updates
Convergence allows us to start a stack update while a previous update (or
create) is still in progress. If a resource is found to be locked by a
previous graph traversal when the new traversal comes to update it, the
earlier traversal is responsible for retriggering the resource after it
completes.
This patch adds functional tests to confirm that this works when the
long-running resource update ends in success or failure (as opposed to
timing out or being cancelled).
Change-Id: I5cb0cfd6bb05a94cd32709b5cda8454df6e81a61<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import time
from heat_integrationtests.common import test
from heat_integrationtests.functional import functional_base
_test_template = {
'heat_template_version': 'pike',
'description': 'Test template to create two resources.',
'resources': {
'test1': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0,
}
},
'test2': {
'type': 'OS::Heat::TestResource',
'properties': {
'value': 'Test1',
'fail': False,
'update_replace': False,
'wait_secs': 0,
'action_wait_secs': {
'create': 30,
}
},
'depends_on': ['test1']
}
}
}
def get_templates(fail=False, delay_s=None):
before = copy.deepcopy(_test_template)
after = copy.deepcopy(before)
for r in after['resources'].values():
r['properties']['value'] = 'Test2'
before_props = before['resources']['test2']['properties']
before_props['fail'] = fail
if delay_s is not None:
before_props['action_wait_secs']['create'] = delay_s
return before, after
class SimultaneousUpdateStackTest(functional_base.FunctionalTestsBase):
@test.requires_convergence
def test_retrigger_success(self):
before, after = get_templates()
stack_id = self.stack_create(template=before,
expected_status='CREATE_IN_PROGRESS')
time.sleep(10)
self.update_stack(stack_id, after)
@test.requires_convergence
def test_retrigger_failure(self):
before, after = get_templates(fail=True)
stack_id = self.stack_create(template=before,
expected_status='CREATE_IN_PROGRESS')
time.sleep(10)
self.update_stack(stack_id, after)
|
|
35aa49152ab28d3364afeac4a053b41082f0b3b9
|
systrace/systrace/monitor_unittest.py
|
systrace/systrace/monitor_unittest.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from systrace import decorators
from systrace import update_systrace_trace_viewer
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STABLE_VIEWER_PATH = os.path.join(SCRIPT_DIR, 'systrace_trace_viewer.html')
# Tests presence and content of static HTML files used not only for Python
# systrace capture, but also Java-based capture in the android SDK tools.
#
# NOTE: changes to this file should typically be accompanied by changes to the
# Android SDK's method of systrace capture.
class MonitorTest(unittest.TestCase):
@decorators.HostOnlyTest
def test_systrace_trace_viewer(self):
self.assertEqual(STABLE_VIEWER_PATH,
update_systrace_trace_viewer.SYSTRACE_TRACE_VIEWER_HTML_FILE)
update_systrace_trace_viewer.update(force_update=True)
with open(STABLE_VIEWER_PATH) as f:
content = f.read().strip()
# expect big html file
self.assertGreater(5 * 1024 * 1024, len(content))
self.assertEqual('<', content[0])
os.remove(f.name)
@decorators.HostOnlyTest
def test_prefix(self):
with open(os.path.join(SCRIPT_DIR, 'prefix.html')) as f:
content = f.read().strip()
self.assertTrue("<html>" in content)
self.assertTrue("<title>Android System Trace</title>" in content)
self.assertTrue("{{SYSTRACE_TRACE_VIEWER_HTML}}" in content)
@decorators.HostOnlyTest
def test_suffix(self):
with open(os.path.join(SCRIPT_DIR, 'suffix.html')) as f:
content = f.read().strip()
self.assertTrue("</html>" in content)
|
Validate files needed by Android tools
|
Validate files needed by Android tools
BUG=catapult:#2546
Review-Url: https://codereview.chromium.org/2194743002
|
Python
|
bsd-3-clause
|
sahiljain/catapult,catapult-project/catapult-csm,benschmaus/catapult,catapult-project/catapult,catapult-project/catapult,benschmaus/catapult,benschmaus/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,catapult-project/catapult-csm,benschmaus/catapult,sahiljain/catapult,sahiljain/catapult,benschmaus/catapult,sahiljain/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,catapult-project/catapult,sahiljain/catapult,sahiljain/catapult,catapult-project/catapult-csm
|
Validate files needed by Android tools
BUG=catapult:#2546
Review-Url: https://codereview.chromium.org/2194743002
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from systrace import decorators
from systrace import update_systrace_trace_viewer
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STABLE_VIEWER_PATH = os.path.join(SCRIPT_DIR, 'systrace_trace_viewer.html')
# Tests presence and content of static HTML files used not only for Python
# systrace capture, but also Java-based capture in the android SDK tools.
#
# NOTE: changes to this file should typically be accompanied by changes to the
# Android SDK's method of systrace capture.
class MonitorTest(unittest.TestCase):
@decorators.HostOnlyTest
def test_systrace_trace_viewer(self):
self.assertEqual(STABLE_VIEWER_PATH,
update_systrace_trace_viewer.SYSTRACE_TRACE_VIEWER_HTML_FILE)
update_systrace_trace_viewer.update(force_update=True)
with open(STABLE_VIEWER_PATH) as f:
content = f.read().strip()
# expect big html file
self.assertGreater(5 * 1024 * 1024, len(content))
self.assertEqual('<', content[0])
os.remove(f.name)
@decorators.HostOnlyTest
def test_prefix(self):
with open(os.path.join(SCRIPT_DIR, 'prefix.html')) as f:
content = f.read().strip()
self.assertTrue("<html>" in content)
self.assertTrue("<title>Android System Trace</title>" in content)
self.assertTrue("{{SYSTRACE_TRACE_VIEWER_HTML}}" in content)
@decorators.HostOnlyTest
def test_suffix(self):
with open(os.path.join(SCRIPT_DIR, 'suffix.html')) as f:
content = f.read().strip()
self.assertTrue("</html>" in content)
|
<commit_before><commit_msg>Validate files needed by Android tools
BUG=catapult:#2546
Review-Url: https://codereview.chromium.org/2194743002<commit_after>
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from systrace import decorators
from systrace import update_systrace_trace_viewer
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STABLE_VIEWER_PATH = os.path.join(SCRIPT_DIR, 'systrace_trace_viewer.html')
# Tests presence and content of static HTML files used not only for Python
# systrace capture, but also Java-based capture in the android SDK tools.
#
# NOTE: changes to this file should typically be accompanied by changes to the
# Android SDK's method of systrace capture.
class MonitorTest(unittest.TestCase):
@decorators.HostOnlyTest
def test_systrace_trace_viewer(self):
self.assertEqual(STABLE_VIEWER_PATH,
update_systrace_trace_viewer.SYSTRACE_TRACE_VIEWER_HTML_FILE)
update_systrace_trace_viewer.update(force_update=True)
with open(STABLE_VIEWER_PATH) as f:
content = f.read().strip()
# expect big html file
self.assertGreater(5 * 1024 * 1024, len(content))
self.assertEqual('<', content[0])
os.remove(f.name)
@decorators.HostOnlyTest
def test_prefix(self):
with open(os.path.join(SCRIPT_DIR, 'prefix.html')) as f:
content = f.read().strip()
self.assertTrue("<html>" in content)
self.assertTrue("<title>Android System Trace</title>" in content)
self.assertTrue("{{SYSTRACE_TRACE_VIEWER_HTML}}" in content)
@decorators.HostOnlyTest
def test_suffix(self):
with open(os.path.join(SCRIPT_DIR, 'suffix.html')) as f:
content = f.read().strip()
self.assertTrue("</html>" in content)
|
Validate files needed by Android tools
BUG=catapult:#2546
Review-Url: https://codereview.chromium.org/2194743002# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from systrace import decorators
from systrace import update_systrace_trace_viewer
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STABLE_VIEWER_PATH = os.path.join(SCRIPT_DIR, 'systrace_trace_viewer.html')
# Tests presence and content of static HTML files used not only for Python
# systrace capture, but also Java-based capture in the android SDK tools.
#
# NOTE: changes to this file should typically be accompanied by changes to the
# Android SDK's method of systrace capture.
class MonitorTest(unittest.TestCase):
@decorators.HostOnlyTest
def test_systrace_trace_viewer(self):
self.assertEqual(STABLE_VIEWER_PATH,
update_systrace_trace_viewer.SYSTRACE_TRACE_VIEWER_HTML_FILE)
update_systrace_trace_viewer.update(force_update=True)
with open(STABLE_VIEWER_PATH) as f:
content = f.read().strip()
# expect big html file
self.assertGreater(5 * 1024 * 1024, len(content))
self.assertEqual('<', content[0])
os.remove(f.name)
@decorators.HostOnlyTest
def test_prefix(self):
with open(os.path.join(SCRIPT_DIR, 'prefix.html')) as f:
content = f.read().strip()
self.assertTrue("<html>" in content)
self.assertTrue("<title>Android System Trace</title>" in content)
self.assertTrue("{{SYSTRACE_TRACE_VIEWER_HTML}}" in content)
@decorators.HostOnlyTest
def test_suffix(self):
with open(os.path.join(SCRIPT_DIR, 'suffix.html')) as f:
content = f.read().strip()
self.assertTrue("</html>" in content)
|
<commit_before><commit_msg>Validate files needed by Android tools
BUG=catapult:#2546
Review-Url: https://codereview.chromium.org/2194743002<commit_after># Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from systrace import decorators
from systrace import update_systrace_trace_viewer
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
STABLE_VIEWER_PATH = os.path.join(SCRIPT_DIR, 'systrace_trace_viewer.html')
# Tests presence and content of static HTML files used not only for Python
# systrace capture, but also Java-based capture in the android SDK tools.
#
# NOTE: changes to this file should typically be accompanied by changes to the
# Android SDK's method of systrace capture.
class MonitorTest(unittest.TestCase):
@decorators.HostOnlyTest
def test_systrace_trace_viewer(self):
self.assertEqual(STABLE_VIEWER_PATH,
update_systrace_trace_viewer.SYSTRACE_TRACE_VIEWER_HTML_FILE)
update_systrace_trace_viewer.update(force_update=True)
with open(STABLE_VIEWER_PATH) as f:
content = f.read().strip()
# expect big html file
self.assertGreater(5 * 1024 * 1024, len(content))
self.assertEqual('<', content[0])
os.remove(f.name)
@decorators.HostOnlyTest
def test_prefix(self):
with open(os.path.join(SCRIPT_DIR, 'prefix.html')) as f:
content = f.read().strip()
self.assertTrue("<html>" in content)
self.assertTrue("<title>Android System Trace</title>" in content)
self.assertTrue("{{SYSTRACE_TRACE_VIEWER_HTML}}" in content)
@decorators.HostOnlyTest
def test_suffix(self):
with open(os.path.join(SCRIPT_DIR, 'suffix.html')) as f:
content = f.read().strip()
self.assertTrue("</html>" in content)
|
|
cc0d1d493a4f9d6d702951f21cce805a5e102a6f
|
spectral_graph_display.py
|
spectral_graph_display.py
|
import numpy as np
from scipy import io, sparse, linalg
# run this from elegant scipy chapter
chem = np.load('chem-network.npy')
gap = np.load('gap-network.npy')
neuron_types = np.load('neuron-types.npy')
neuron_ids = np.load('neurons.npy')
A = chem + gap
n = A.shape[0]
c = (A + A.T) / 2
d = sparse.diags([np.sum(c, axis=0)], [0])
d = d.toarray()
L = np.array(d - c)
b = np.sum(c * np.sign(A - A.T), axis=1)
z = np.linalg.pinv(L) @ b
# IPython log file
dinv2 = np.copy(d)
diag = (np.arange(n), np.arange(n))
dinv2[diag] = dinv[diag] ** (-.5)
q = dinv2 @ L @ dinv2
eigvals, vec = linalg.eig(q)
x = dinv2 @ vec[:, 1]
x.shape
from matplotlib import pyplot as plt
from matplotlib import colors
ii, jj = np.nonzero(c)
plt.scatter(x, z, c=neuron_types, cmap=colors.ListedColormap(((1, 0, 0), (0, 1, 0), (0, 0, 1))), zorder=1)
for src, dst in zip(ii, jj):
plt.plot(x[[src, dst]], z[[src, dst]], c=(0.85, 0.85, 0.85), lw=0.2, alpha=0.5, zorder=0)
for x0, z0, neuron_id in zip(x, z, neuron_ids):
plt.text(x0, z0, ' ' + neuron_id,
horizontalalignment='left', verticalalignment='center',
fontsize=4, zorder=2)
|
Add spectral graph display script
|
Add spectral graph display script
|
Python
|
bsd-3-clause
|
jni/useful-histories
|
Add spectral graph display script
|
import numpy as np
from scipy import io, sparse, linalg
# run this from elegant scipy chapter
chem = np.load('chem-network.npy')
gap = np.load('gap-network.npy')
neuron_types = np.load('neuron-types.npy')
neuron_ids = np.load('neurons.npy')
A = chem + gap
n = A.shape[0]
c = (A + A.T) / 2
d = sparse.diags([np.sum(c, axis=0)], [0])
d = d.toarray()
L = np.array(d - c)
b = np.sum(c * np.sign(A - A.T), axis=1)
z = np.linalg.pinv(L) @ b
# IPython log file
dinv2 = np.copy(d)
diag = (np.arange(n), np.arange(n))
dinv2[diag] = dinv[diag] ** (-.5)
q = dinv2 @ L @ dinv2
eigvals, vec = linalg.eig(q)
x = dinv2 @ vec[:, 1]
x.shape
from matplotlib import pyplot as plt
from matplotlib import colors
ii, jj = np.nonzero(c)
plt.scatter(x, z, c=neuron_types, cmap=colors.ListedColormap(((1, 0, 0), (0, 1, 0), (0, 0, 1))), zorder=1)
for src, dst in zip(ii, jj):
plt.plot(x[[src, dst]], z[[src, dst]], c=(0.85, 0.85, 0.85), lw=0.2, alpha=0.5, zorder=0)
for x0, z0, neuron_id in zip(x, z, neuron_ids):
plt.text(x0, z0, ' ' + neuron_id,
horizontalalignment='left', verticalalignment='center',
fontsize=4, zorder=2)
|
<commit_before><commit_msg>Add spectral graph display script<commit_after>
|
import numpy as np
from scipy import io, sparse, linalg
# run this from elegant scipy chapter
chem = np.load('chem-network.npy')
gap = np.load('gap-network.npy')
neuron_types = np.load('neuron-types.npy')
neuron_ids = np.load('neurons.npy')
A = chem + gap
n = A.shape[0]
c = (A + A.T) / 2
d = sparse.diags([np.sum(c, axis=0)], [0])
d = d.toarray()
L = np.array(d - c)
b = np.sum(c * np.sign(A - A.T), axis=1)
z = np.linalg.pinv(L) @ b
# IPython log file
dinv2 = np.copy(d)
diag = (np.arange(n), np.arange(n))
dinv2[diag] = dinv[diag] ** (-.5)
q = dinv2 @ L @ dinv2
eigvals, vec = linalg.eig(q)
x = dinv2 @ vec[:, 1]
x.shape
from matplotlib import pyplot as plt
from matplotlib import colors
ii, jj = np.nonzero(c)
plt.scatter(x, z, c=neuron_types, cmap=colors.ListedColormap(((1, 0, 0), (0, 1, 0), (0, 0, 1))), zorder=1)
for src, dst in zip(ii, jj):
plt.plot(x[[src, dst]], z[[src, dst]], c=(0.85, 0.85, 0.85), lw=0.2, alpha=0.5, zorder=0)
for x0, z0, neuron_id in zip(x, z, neuron_ids):
plt.text(x0, z0, ' ' + neuron_id,
horizontalalignment='left', verticalalignment='center',
fontsize=4, zorder=2)
|
Add spectral graph display scriptimport numpy as np
from scipy import io, sparse, linalg
# run this from elegant scipy chapter
chem = np.load('chem-network.npy')
gap = np.load('gap-network.npy')
neuron_types = np.load('neuron-types.npy')
neuron_ids = np.load('neurons.npy')
A = chem + gap
n = A.shape[0]
c = (A + A.T) / 2
d = sparse.diags([np.sum(c, axis=0)], [0])
d = d.toarray()
L = np.array(d - c)
b = np.sum(c * np.sign(A - A.T), axis=1)
z = np.linalg.pinv(L) @ b
# IPython log file
dinv2 = np.copy(d)
diag = (np.arange(n), np.arange(n))
dinv2[diag] = dinv[diag] ** (-.5)
q = dinv2 @ L @ dinv2
eigvals, vec = linalg.eig(q)
x = dinv2 @ vec[:, 1]
x.shape
from matplotlib import pyplot as plt
from matplotlib import colors
ii, jj = np.nonzero(c)
plt.scatter(x, z, c=neuron_types, cmap=colors.ListedColormap(((1, 0, 0), (0, 1, 0), (0, 0, 1))), zorder=1)
for src, dst in zip(ii, jj):
plt.plot(x[[src, dst]], z[[src, dst]], c=(0.85, 0.85, 0.85), lw=0.2, alpha=0.5, zorder=0)
for x0, z0, neuron_id in zip(x, z, neuron_ids):
plt.text(x0, z0, ' ' + neuron_id,
horizontalalignment='left', verticalalignment='center',
fontsize=4, zorder=2)
|
<commit_before><commit_msg>Add spectral graph display script<commit_after>import numpy as np
from scipy import io, sparse, linalg
# run this from elegant scipy chapter
chem = np.load('chem-network.npy')
gap = np.load('gap-network.npy')
neuron_types = np.load('neuron-types.npy')
neuron_ids = np.load('neurons.npy')
A = chem + gap
n = A.shape[0]
c = (A + A.T) / 2
d = sparse.diags([np.sum(c, axis=0)], [0])
d = d.toarray()
L = np.array(d - c)
b = np.sum(c * np.sign(A - A.T), axis=1)
z = np.linalg.pinv(L) @ b
# IPython log file
dinv2 = np.copy(d)
diag = (np.arange(n), np.arange(n))
dinv2[diag] = dinv[diag] ** (-.5)
q = dinv2 @ L @ dinv2
eigvals, vec = linalg.eig(q)
x = dinv2 @ vec[:, 1]
x.shape
from matplotlib import pyplot as plt
from matplotlib import colors
ii, jj = np.nonzero(c)
plt.scatter(x, z, c=neuron_types, cmap=colors.ListedColormap(((1, 0, 0), (0, 1, 0), (0, 0, 1))), zorder=1)
for src, dst in zip(ii, jj):
plt.plot(x[[src, dst]], z[[src, dst]], c=(0.85, 0.85, 0.85), lw=0.2, alpha=0.5, zorder=0)
for x0, z0, neuron_id in zip(x, z, neuron_ids):
plt.text(x0, z0, ' ' + neuron_id,
horizontalalignment='left', verticalalignment='center',
fontsize=4, zorder=2)
|
|
53436d9ea71b11c45fb7208c13b626d1b47d1f70
|
tools/tests/test_linter_custom_check.py
|
tools/tests/test_linter_custom_check.py
|
import os
from itertools import chain
from mock import patch, MagicMock
from unittest import TestCase
from tools.linter_lib.custom_check import build_custom_checkers
ROOT_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
CHECK_MESSAGE = "Fix the corresponding rule in `tools/linter_lib/custom_check.py`."
@patch('tools.linter_lib.custom_check.custom_check_file', return_value=False)
class TestCustomRulesFormat(TestCase):
def test_paths_in_rules(self, mock_custom_check_file):
# type: (MagicMock) -> None
"""Verifies that the paths mentoned in linter rules actually exist"""
by_lang = dict.fromkeys(['py', 'js', 'sh', 'css', 'handlebars', 'html', 'json', 'md', 'txt', 'text', 'yaml'],
['foo/bar.baz'])
check_custom_checks_py, check_custom_checks_nonpy = build_custom_checkers(by_lang)
check_custom_checks_py()
check_custom_checks_nonpy()
for call_args in mock_custom_check_file.call_args_list:
rule_set = call_args[0][2]
for rule in rule_set:
for path in rule.get('exclude', {}):
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.exists(abs_path),
"'{}' is neither an existing file, nor a directory. {}".format(path, CHECK_MESSAGE))
for line_tuple in rule.get('exclude_line', {}):
path = line_tuple[0]
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.isfile(abs_path),
"The file '{}' doesn't exist. {}".format(path, CHECK_MESSAGE))
for path in rule.get('include_only', {}):
if not os.path.splitext(path)[1]:
self.assertTrue(path.endswith('/'),
"The path '{}' should end with '/'. {}".format(path, CHECK_MESSAGE))
|
Add test case for custom_check.py
|
linter: Add test case for custom_check.py
Fixes #5406.
|
Python
|
apache-2.0
|
dhcrzf/zulip,jackrzhang/zulip,amanharitsh123/zulip,Galexrt/zulip,Galexrt/zulip,timabbott/zulip,rht/zulip,dhcrzf/zulip,rishig/zulip,zulip/zulip,zulip/zulip,showell/zulip,mahim97/zulip,timabbott/zulip,kou/zulip,kou/zulip,rht/zulip,synicalsyntax/zulip,punchagan/zulip,brainwane/zulip,timabbott/zulip,showell/zulip,synicalsyntax/zulip,rht/zulip,shubhamdhama/zulip,andersk/zulip,rishig/zulip,jackrzhang/zulip,eeshangarg/zulip,tommyip/zulip,showell/zulip,mahim97/zulip,kou/zulip,brainwane/zulip,amanharitsh123/zulip,rishig/zulip,mahim97/zulip,tommyip/zulip,hackerkid/zulip,punchagan/zulip,punchagan/zulip,dhcrzf/zulip,zulip/zulip,eeshangarg/zulip,hackerkid/zulip,shubhamdhama/zulip,punchagan/zulip,hackerkid/zulip,eeshangarg/zulip,kou/zulip,eeshangarg/zulip,eeshangarg/zulip,tommyip/zulip,brockwhittaker/zulip,shubhamdhama/zulip,dhcrzf/zulip,andersk/zulip,rht/zulip,punchagan/zulip,andersk/zulip,kou/zulip,rht/zulip,brockwhittaker/zulip,eeshangarg/zulip,andersk/zulip,hackerkid/zulip,andersk/zulip,tommyip/zulip,amanharitsh123/zulip,synicalsyntax/zulip,brainwane/zulip,Galexrt/zulip,brockwhittaker/zulip,brockwhittaker/zulip,zulip/zulip,jackrzhang/zulip,Galexrt/zulip,Galexrt/zulip,brockwhittaker/zulip,mahim97/zulip,zulip/zulip,shubhamdhama/zulip,andersk/zulip,jackrzhang/zulip,verma-varsha/zulip,punchagan/zulip,amanharitsh123/zulip,rishig/zulip,Galexrt/zulip,andersk/zulip,verma-varsha/zulip,shubhamdhama/zulip,shubhamdhama/zulip,showell/zulip,brainwane/zulip,tommyip/zulip,mahim97/zulip,rht/zulip,jackrzhang/zulip,synicalsyntax/zulip,kou/zulip,dhcrzf/zulip,brainwane/zulip,timabbott/zulip,tommyip/zulip,zulip/zulip,rishig/zulip,verma-varsha/zulip,jackrzhang/zulip,kou/zulip,jackrzhang/zulip,eeshangarg/zulip,amanharitsh123/zulip,brainwane/zulip,timabbott/zulip,rishig/zulip,timabbott/zulip,rishig/zulip,hackerkid/zulip,brainwane/zulip,synicalsyntax/zulip,amanharitsh123/zulip,dhcrzf/zulip,shubhamdhama/zulip,tommyip/zulip,showell/zulip,verma-varsha/zulip,showell/zulip,punchagan/zulip,showell/zulip,zulip/zulip,verma-varsha/zulip,dhcrzf/zulip,synicalsyntax/zulip,rht/zulip,timabbott/zulip,hackerkid/zulip,brockwhittaker/zulip,synicalsyntax/zulip,hackerkid/zulip,verma-varsha/zulip,mahim97/zulip,Galexrt/zulip
|
linter: Add test case for custom_check.py
Fixes #5406.
|
import os
from itertools import chain
from mock import patch, MagicMock
from unittest import TestCase
from tools.linter_lib.custom_check import build_custom_checkers
ROOT_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
CHECK_MESSAGE = "Fix the corresponding rule in `tools/linter_lib/custom_check.py`."
@patch('tools.linter_lib.custom_check.custom_check_file', return_value=False)
class TestCustomRulesFormat(TestCase):
def test_paths_in_rules(self, mock_custom_check_file):
# type: (MagicMock) -> None
"""Verifies that the paths mentoned in linter rules actually exist"""
by_lang = dict.fromkeys(['py', 'js', 'sh', 'css', 'handlebars', 'html', 'json', 'md', 'txt', 'text', 'yaml'],
['foo/bar.baz'])
check_custom_checks_py, check_custom_checks_nonpy = build_custom_checkers(by_lang)
check_custom_checks_py()
check_custom_checks_nonpy()
for call_args in mock_custom_check_file.call_args_list:
rule_set = call_args[0][2]
for rule in rule_set:
for path in rule.get('exclude', {}):
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.exists(abs_path),
"'{}' is neither an existing file, nor a directory. {}".format(path, CHECK_MESSAGE))
for line_tuple in rule.get('exclude_line', {}):
path = line_tuple[0]
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.isfile(abs_path),
"The file '{}' doesn't exist. {}".format(path, CHECK_MESSAGE))
for path in rule.get('include_only', {}):
if not os.path.splitext(path)[1]:
self.assertTrue(path.endswith('/'),
"The path '{}' should end with '/'. {}".format(path, CHECK_MESSAGE))
|
<commit_before><commit_msg>linter: Add test case for custom_check.py
Fixes #5406.<commit_after>
|
import os
from itertools import chain
from mock import patch, MagicMock
from unittest import TestCase
from tools.linter_lib.custom_check import build_custom_checkers
ROOT_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
CHECK_MESSAGE = "Fix the corresponding rule in `tools/linter_lib/custom_check.py`."
@patch('tools.linter_lib.custom_check.custom_check_file', return_value=False)
class TestCustomRulesFormat(TestCase):
def test_paths_in_rules(self, mock_custom_check_file):
# type: (MagicMock) -> None
"""Verifies that the paths mentoned in linter rules actually exist"""
by_lang = dict.fromkeys(['py', 'js', 'sh', 'css', 'handlebars', 'html', 'json', 'md', 'txt', 'text', 'yaml'],
['foo/bar.baz'])
check_custom_checks_py, check_custom_checks_nonpy = build_custom_checkers(by_lang)
check_custom_checks_py()
check_custom_checks_nonpy()
for call_args in mock_custom_check_file.call_args_list:
rule_set = call_args[0][2]
for rule in rule_set:
for path in rule.get('exclude', {}):
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.exists(abs_path),
"'{}' is neither an existing file, nor a directory. {}".format(path, CHECK_MESSAGE))
for line_tuple in rule.get('exclude_line', {}):
path = line_tuple[0]
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.isfile(abs_path),
"The file '{}' doesn't exist. {}".format(path, CHECK_MESSAGE))
for path in rule.get('include_only', {}):
if not os.path.splitext(path)[1]:
self.assertTrue(path.endswith('/'),
"The path '{}' should end with '/'. {}".format(path, CHECK_MESSAGE))
|
linter: Add test case for custom_check.py
Fixes #5406.import os
from itertools import chain
from mock import patch, MagicMock
from unittest import TestCase
from tools.linter_lib.custom_check import build_custom_checkers
ROOT_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
CHECK_MESSAGE = "Fix the corresponding rule in `tools/linter_lib/custom_check.py`."
@patch('tools.linter_lib.custom_check.custom_check_file', return_value=False)
class TestCustomRulesFormat(TestCase):
def test_paths_in_rules(self, mock_custom_check_file):
# type: (MagicMock) -> None
"""Verifies that the paths mentoned in linter rules actually exist"""
by_lang = dict.fromkeys(['py', 'js', 'sh', 'css', 'handlebars', 'html', 'json', 'md', 'txt', 'text', 'yaml'],
['foo/bar.baz'])
check_custom_checks_py, check_custom_checks_nonpy = build_custom_checkers(by_lang)
check_custom_checks_py()
check_custom_checks_nonpy()
for call_args in mock_custom_check_file.call_args_list:
rule_set = call_args[0][2]
for rule in rule_set:
for path in rule.get('exclude', {}):
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.exists(abs_path),
"'{}' is neither an existing file, nor a directory. {}".format(path, CHECK_MESSAGE))
for line_tuple in rule.get('exclude_line', {}):
path = line_tuple[0]
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.isfile(abs_path),
"The file '{}' doesn't exist. {}".format(path, CHECK_MESSAGE))
for path in rule.get('include_only', {}):
if not os.path.splitext(path)[1]:
self.assertTrue(path.endswith('/'),
"The path '{}' should end with '/'. {}".format(path, CHECK_MESSAGE))
|
<commit_before><commit_msg>linter: Add test case for custom_check.py
Fixes #5406.<commit_after>import os
from itertools import chain
from mock import patch, MagicMock
from unittest import TestCase
from tools.linter_lib.custom_check import build_custom_checkers
ROOT_DIR = os.path.abspath(os.path.join(__file__, '..', '..', '..'))
CHECK_MESSAGE = "Fix the corresponding rule in `tools/linter_lib/custom_check.py`."
@patch('tools.linter_lib.custom_check.custom_check_file', return_value=False)
class TestCustomRulesFormat(TestCase):
def test_paths_in_rules(self, mock_custom_check_file):
# type: (MagicMock) -> None
"""Verifies that the paths mentoned in linter rules actually exist"""
by_lang = dict.fromkeys(['py', 'js', 'sh', 'css', 'handlebars', 'html', 'json', 'md', 'txt', 'text', 'yaml'],
['foo/bar.baz'])
check_custom_checks_py, check_custom_checks_nonpy = build_custom_checkers(by_lang)
check_custom_checks_py()
check_custom_checks_nonpy()
for call_args in mock_custom_check_file.call_args_list:
rule_set = call_args[0][2]
for rule in rule_set:
for path in rule.get('exclude', {}):
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.exists(abs_path),
"'{}' is neither an existing file, nor a directory. {}".format(path, CHECK_MESSAGE))
for line_tuple in rule.get('exclude_line', {}):
path = line_tuple[0]
abs_path = os.path.abspath(os.path.join(ROOT_DIR, path))
self.assertTrue(os.path.isfile(abs_path),
"The file '{}' doesn't exist. {}".format(path, CHECK_MESSAGE))
for path in rule.get('include_only', {}):
if not os.path.splitext(path)[1]:
self.assertTrue(path.endswith('/'),
"The path '{}' should end with '/'. {}".format(path, CHECK_MESSAGE))
|
|
9475f33d5689d348721c3f7409f7bb9ae2f0e639
|
solidity/hypothesis/SolidityTest.py
|
solidity/hypothesis/SolidityTest.py
|
import json
from hypothesis import given, assume, example
import hypothesis.strategies as st
import unittest
from web3 import Web3, TestRPCProvider, RPCProvider
import os
import sys
sys.path.append(os.path.join(os.path.join(os.path.dirname(__file__),'..'),'python'))
import BancorFormula
class TestFormula(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.web3 = Web3(RPCProvider())
abi = json.loads(open('../contracts/build/BancorFormula.abi').read())
bin = open('../contracts/build/BancorFormula.bin').read()
formula = cls.web3.eth.contract(abi=abi, bytecode=bin)
tx = formula.deploy()
cls.formula = formula(cls.web3.eth.getTransactionReceipt(tx)['contractAddress'])
@given(st.integers(min_value=100), st.integers(min_value=100), st.integers(min_value=1, max_value=100), st.integers(min_value=0))
def testPurchaseReturn(self, supply, reserveBalance, reserveRatio, depositAmount):
solidity = self.formula.call().calculatePurchaseReturn(supply, reserveBalance, reserveRatio, depositAmount)
python = BancorFormula.calculatePurchaseReturn(supply, reserveBalance, reserveRatio, depositAmount)
print 'solidity = {}, python = {}'.format(solidity,python)
@given(st.integers(min_value=100), st.integers(min_value=100), st.integers(min_value=1, max_value=100), st.integers(min_value=0))
def testSaleReturn(self, supply, reserveBalance, reserveRatio, sellAmount):
solidity = self.formula.call().calculateSaleReturn(supply, reserveBalance, reserveRatio, sellAmount)
python = BancorFormula.calculateSaleReturn(supply, reserveBalance, reserveRatio, sellAmount)
print 'solidity = {}, python = {}'.format(solidity,python)
unittest.main()
|
Add a test which executes the Solidity contract over an Ethereum RPC, and then compares the result with the Python emulation of the Solidity contract.
|
Add a test which executes the Solidity contract over an Ethereum RPC, and then compares the result with the Python emulation of the Solidity contract.
|
Python
|
apache-2.0
|
enjin/contracts
|
Add a test which executes the Solidity contract over an Ethereum RPC, and then compares the result with the Python emulation of the Solidity contract.
|
import json
from hypothesis import given, assume, example
import hypothesis.strategies as st
import unittest
from web3 import Web3, TestRPCProvider, RPCProvider
import os
import sys
sys.path.append(os.path.join(os.path.join(os.path.dirname(__file__),'..'),'python'))
import BancorFormula
class TestFormula(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.web3 = Web3(RPCProvider())
abi = json.loads(open('../contracts/build/BancorFormula.abi').read())
bin = open('../contracts/build/BancorFormula.bin').read()
formula = cls.web3.eth.contract(abi=abi, bytecode=bin)
tx = formula.deploy()
cls.formula = formula(cls.web3.eth.getTransactionReceipt(tx)['contractAddress'])
@given(st.integers(min_value=100), st.integers(min_value=100), st.integers(min_value=1, max_value=100), st.integers(min_value=0))
def testPurchaseReturn(self, supply, reserveBalance, reserveRatio, depositAmount):
solidity = self.formula.call().calculatePurchaseReturn(supply, reserveBalance, reserveRatio, depositAmount)
python = BancorFormula.calculatePurchaseReturn(supply, reserveBalance, reserveRatio, depositAmount)
print 'solidity = {}, python = {}'.format(solidity,python)
@given(st.integers(min_value=100), st.integers(min_value=100), st.integers(min_value=1, max_value=100), st.integers(min_value=0))
def testSaleReturn(self, supply, reserveBalance, reserveRatio, sellAmount):
solidity = self.formula.call().calculateSaleReturn(supply, reserveBalance, reserveRatio, sellAmount)
python = BancorFormula.calculateSaleReturn(supply, reserveBalance, reserveRatio, sellAmount)
print 'solidity = {}, python = {}'.format(solidity,python)
unittest.main()
|
<commit_before><commit_msg>Add a test which executes the Solidity contract over an Ethereum RPC, and then compares the result with the Python emulation of the Solidity contract.<commit_after>
|
import json
from hypothesis import given, assume, example
import hypothesis.strategies as st
import unittest
from web3 import Web3, TestRPCProvider, RPCProvider
import os
import sys
sys.path.append(os.path.join(os.path.join(os.path.dirname(__file__),'..'),'python'))
import BancorFormula
class TestFormula(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.web3 = Web3(RPCProvider())
abi = json.loads(open('../contracts/build/BancorFormula.abi').read())
bin = open('../contracts/build/BancorFormula.bin').read()
formula = cls.web3.eth.contract(abi=abi, bytecode=bin)
tx = formula.deploy()
cls.formula = formula(cls.web3.eth.getTransactionReceipt(tx)['contractAddress'])
@given(st.integers(min_value=100), st.integers(min_value=100), st.integers(min_value=1, max_value=100), st.integers(min_value=0))
def testPurchaseReturn(self, supply, reserveBalance, reserveRatio, depositAmount):
solidity = self.formula.call().calculatePurchaseReturn(supply, reserveBalance, reserveRatio, depositAmount)
python = BancorFormula.calculatePurchaseReturn(supply, reserveBalance, reserveRatio, depositAmount)
print 'solidity = {}, python = {}'.format(solidity,python)
@given(st.integers(min_value=100), st.integers(min_value=100), st.integers(min_value=1, max_value=100), st.integers(min_value=0))
def testSaleReturn(self, supply, reserveBalance, reserveRatio, sellAmount):
solidity = self.formula.call().calculateSaleReturn(supply, reserveBalance, reserveRatio, sellAmount)
python = BancorFormula.calculateSaleReturn(supply, reserveBalance, reserveRatio, sellAmount)
print 'solidity = {}, python = {}'.format(solidity,python)
unittest.main()
|
Add a test which executes the Solidity contract over an Ethereum RPC, and then compares the result with the Python emulation of the Solidity contract.import json
from hypothesis import given, assume, example
import hypothesis.strategies as st
import unittest
from web3 import Web3, TestRPCProvider, RPCProvider
import os
import sys
sys.path.append(os.path.join(os.path.join(os.path.dirname(__file__),'..'),'python'))
import BancorFormula
class TestFormula(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.web3 = Web3(RPCProvider())
abi = json.loads(open('../contracts/build/BancorFormula.abi').read())
bin = open('../contracts/build/BancorFormula.bin').read()
formula = cls.web3.eth.contract(abi=abi, bytecode=bin)
tx = formula.deploy()
cls.formula = formula(cls.web3.eth.getTransactionReceipt(tx)['contractAddress'])
@given(st.integers(min_value=100), st.integers(min_value=100), st.integers(min_value=1, max_value=100), st.integers(min_value=0))
def testPurchaseReturn(self, supply, reserveBalance, reserveRatio, depositAmount):
solidity = self.formula.call().calculatePurchaseReturn(supply, reserveBalance, reserveRatio, depositAmount)
python = BancorFormula.calculatePurchaseReturn(supply, reserveBalance, reserveRatio, depositAmount)
print 'solidity = {}, python = {}'.format(solidity,python)
@given(st.integers(min_value=100), st.integers(min_value=100), st.integers(min_value=1, max_value=100), st.integers(min_value=0))
def testSaleReturn(self, supply, reserveBalance, reserveRatio, sellAmount):
solidity = self.formula.call().calculateSaleReturn(supply, reserveBalance, reserveRatio, sellAmount)
python = BancorFormula.calculateSaleReturn(supply, reserveBalance, reserveRatio, sellAmount)
print 'solidity = {}, python = {}'.format(solidity,python)
unittest.main()
|
<commit_before><commit_msg>Add a test which executes the Solidity contract over an Ethereum RPC, and then compares the result with the Python emulation of the Solidity contract.<commit_after>import json
from hypothesis import given, assume, example
import hypothesis.strategies as st
import unittest
from web3 import Web3, TestRPCProvider, RPCProvider
import os
import sys
sys.path.append(os.path.join(os.path.join(os.path.dirname(__file__),'..'),'python'))
import BancorFormula
class TestFormula(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.web3 = Web3(RPCProvider())
abi = json.loads(open('../contracts/build/BancorFormula.abi').read())
bin = open('../contracts/build/BancorFormula.bin').read()
formula = cls.web3.eth.contract(abi=abi, bytecode=bin)
tx = formula.deploy()
cls.formula = formula(cls.web3.eth.getTransactionReceipt(tx)['contractAddress'])
@given(st.integers(min_value=100), st.integers(min_value=100), st.integers(min_value=1, max_value=100), st.integers(min_value=0))
def testPurchaseReturn(self, supply, reserveBalance, reserveRatio, depositAmount):
solidity = self.formula.call().calculatePurchaseReturn(supply, reserveBalance, reserveRatio, depositAmount)
python = BancorFormula.calculatePurchaseReturn(supply, reserveBalance, reserveRatio, depositAmount)
print 'solidity = {}, python = {}'.format(solidity,python)
@given(st.integers(min_value=100), st.integers(min_value=100), st.integers(min_value=1, max_value=100), st.integers(min_value=0))
def testSaleReturn(self, supply, reserveBalance, reserveRatio, sellAmount):
solidity = self.formula.call().calculateSaleReturn(supply, reserveBalance, reserveRatio, sellAmount)
python = BancorFormula.calculateSaleReturn(supply, reserveBalance, reserveRatio, sellAmount)
print 'solidity = {}, python = {}'.format(solidity,python)
unittest.main()
|
|
45d7a440067f6c0aed45468863814fa0ee1c9449
|
rembed/tests/test_plain_rnn.py
|
rembed/tests/test_plain_rnn.py
|
import unittest
import numpy as np
import theano
from theano import tensor as T
from rembed.plain_rnn import RNN
from rembed.util import VariableStore, CropAndPad, IdentityLayer
class RNNTestCase(unittest.TestCase):
"""Basic functional tests for RNN with dummy data."""
def _make_rnn(self, seq_length=4):
self.embedding_dim = embedding_dim = 3
self.vocab_size = vocab_size = 10
self.seq_length = seq_length
def compose_network(h_prev, inp, embedding_dim, model_dim, vs, name="compose"):
# Just add the two embeddings!
W = T.concatenate([T.eye(model_dim), T.eye(model_dim)], axis=0)
i = T.concatenate([h_prev, inp], axis=1)
return i.dot(W)
X = T.imatrix("X")
training_mode = T.scalar("training_mode")
vs = VariableStore()
embeddings = np.arange(vocab_size).reshape(
(vocab_size, 1)).repeat(embedding_dim, axis=1)
self.model = RNN(
embedding_dim, embedding_dim, vocab_size, seq_length, compose_network,
IdentityLayer, training_mode, None, vs,
X=X, make_test_fn=True, initial_embeddings=embeddings)
def test_basic_ff(self):
self._make_rnn(4)
X = np.array([
[3, 1, 2, 0],
[3, 2, 4, 5]
], dtype=np.int32)
expected = np.array([[6, 6, 6],
[14, 14, 14]])
ret = self.model.scan_fn(X, 1.0)
np.testing.assert_almost_equal(ret, expected)
if __name__ == '__main__':
unittest.main()
|
Check in plain RNN test.
|
Check in plain RNN test.
|
Python
|
mit
|
stanfordnlp/spinn,nyu-mll/spinn,stanfordnlp/spinn,nyu-mll/spinn,nyu-mll/spinn,stanfordnlp/spinn
|
Check in plain RNN test.
|
import unittest
import numpy as np
import theano
from theano import tensor as T
from rembed.plain_rnn import RNN
from rembed.util import VariableStore, CropAndPad, IdentityLayer
class RNNTestCase(unittest.TestCase):
"""Basic functional tests for RNN with dummy data."""
def _make_rnn(self, seq_length=4):
self.embedding_dim = embedding_dim = 3
self.vocab_size = vocab_size = 10
self.seq_length = seq_length
def compose_network(h_prev, inp, embedding_dim, model_dim, vs, name="compose"):
# Just add the two embeddings!
W = T.concatenate([T.eye(model_dim), T.eye(model_dim)], axis=0)
i = T.concatenate([h_prev, inp], axis=1)
return i.dot(W)
X = T.imatrix("X")
training_mode = T.scalar("training_mode")
vs = VariableStore()
embeddings = np.arange(vocab_size).reshape(
(vocab_size, 1)).repeat(embedding_dim, axis=1)
self.model = RNN(
embedding_dim, embedding_dim, vocab_size, seq_length, compose_network,
IdentityLayer, training_mode, None, vs,
X=X, make_test_fn=True, initial_embeddings=embeddings)
def test_basic_ff(self):
self._make_rnn(4)
X = np.array([
[3, 1, 2, 0],
[3, 2, 4, 5]
], dtype=np.int32)
expected = np.array([[6, 6, 6],
[14, 14, 14]])
ret = self.model.scan_fn(X, 1.0)
np.testing.assert_almost_equal(ret, expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Check in plain RNN test.<commit_after>
|
import unittest
import numpy as np
import theano
from theano import tensor as T
from rembed.plain_rnn import RNN
from rembed.util import VariableStore, CropAndPad, IdentityLayer
class RNNTestCase(unittest.TestCase):
"""Basic functional tests for RNN with dummy data."""
def _make_rnn(self, seq_length=4):
self.embedding_dim = embedding_dim = 3
self.vocab_size = vocab_size = 10
self.seq_length = seq_length
def compose_network(h_prev, inp, embedding_dim, model_dim, vs, name="compose"):
# Just add the two embeddings!
W = T.concatenate([T.eye(model_dim), T.eye(model_dim)], axis=0)
i = T.concatenate([h_prev, inp], axis=1)
return i.dot(W)
X = T.imatrix("X")
training_mode = T.scalar("training_mode")
vs = VariableStore()
embeddings = np.arange(vocab_size).reshape(
(vocab_size, 1)).repeat(embedding_dim, axis=1)
self.model = RNN(
embedding_dim, embedding_dim, vocab_size, seq_length, compose_network,
IdentityLayer, training_mode, None, vs,
X=X, make_test_fn=True, initial_embeddings=embeddings)
def test_basic_ff(self):
self._make_rnn(4)
X = np.array([
[3, 1, 2, 0],
[3, 2, 4, 5]
], dtype=np.int32)
expected = np.array([[6, 6, 6],
[14, 14, 14]])
ret = self.model.scan_fn(X, 1.0)
np.testing.assert_almost_equal(ret, expected)
if __name__ == '__main__':
unittest.main()
|
Check in plain RNN test.import unittest
import numpy as np
import theano
from theano import tensor as T
from rembed.plain_rnn import RNN
from rembed.util import VariableStore, CropAndPad, IdentityLayer
class RNNTestCase(unittest.TestCase):
"""Basic functional tests for RNN with dummy data."""
def _make_rnn(self, seq_length=4):
self.embedding_dim = embedding_dim = 3
self.vocab_size = vocab_size = 10
self.seq_length = seq_length
def compose_network(h_prev, inp, embedding_dim, model_dim, vs, name="compose"):
# Just add the two embeddings!
W = T.concatenate([T.eye(model_dim), T.eye(model_dim)], axis=0)
i = T.concatenate([h_prev, inp], axis=1)
return i.dot(W)
X = T.imatrix("X")
training_mode = T.scalar("training_mode")
vs = VariableStore()
embeddings = np.arange(vocab_size).reshape(
(vocab_size, 1)).repeat(embedding_dim, axis=1)
self.model = RNN(
embedding_dim, embedding_dim, vocab_size, seq_length, compose_network,
IdentityLayer, training_mode, None, vs,
X=X, make_test_fn=True, initial_embeddings=embeddings)
def test_basic_ff(self):
self._make_rnn(4)
X = np.array([
[3, 1, 2, 0],
[3, 2, 4, 5]
], dtype=np.int32)
expected = np.array([[6, 6, 6],
[14, 14, 14]])
ret = self.model.scan_fn(X, 1.0)
np.testing.assert_almost_equal(ret, expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Check in plain RNN test.<commit_after>import unittest
import numpy as np
import theano
from theano import tensor as T
from rembed.plain_rnn import RNN
from rembed.util import VariableStore, CropAndPad, IdentityLayer
class RNNTestCase(unittest.TestCase):
"""Basic functional tests for RNN with dummy data."""
def _make_rnn(self, seq_length=4):
self.embedding_dim = embedding_dim = 3
self.vocab_size = vocab_size = 10
self.seq_length = seq_length
def compose_network(h_prev, inp, embedding_dim, model_dim, vs, name="compose"):
# Just add the two embeddings!
W = T.concatenate([T.eye(model_dim), T.eye(model_dim)], axis=0)
i = T.concatenate([h_prev, inp], axis=1)
return i.dot(W)
X = T.imatrix("X")
training_mode = T.scalar("training_mode")
vs = VariableStore()
embeddings = np.arange(vocab_size).reshape(
(vocab_size, 1)).repeat(embedding_dim, axis=1)
self.model = RNN(
embedding_dim, embedding_dim, vocab_size, seq_length, compose_network,
IdentityLayer, training_mode, None, vs,
X=X, make_test_fn=True, initial_embeddings=embeddings)
def test_basic_ff(self):
self._make_rnn(4)
X = np.array([
[3, 1, 2, 0],
[3, 2, 4, 5]
], dtype=np.int32)
expected = np.array([[6, 6, 6],
[14, 14, 14]])
ret = self.model.scan_fn(X, 1.0)
np.testing.assert_almost_equal(ret, expected)
if __name__ == '__main__':
unittest.main()
|
|
5974132232605d27fde2704631e3c8a6d928a723
|
src/simple-http-server.py
|
src/simple-http-server.py
|
#!/usr/bin/env python3
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os, http.server
def main(args):
os.chdir(args.directory)
addr = ('' ,args.port)
httpd = http.server.HTTPServer(addr, http.server.SimpleHTTPRequestHandler)
httpd.serve_forever()
if __name__ == '__main__':
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--port', default=8000,
help='Port on which to listen')
parser.add_argument('-d', '--directory', metavar='DIR', default=os.getcwd(),
help='Directory to serve')
args = parser.parse_args()
try: exit(main(args))
except KeyboardInterrupt as e: pass
|
Add stupidly simple HTTP server using standard python 3
|
Add stupidly simple HTTP server using standard python 3
|
Python
|
unlicense
|
pastly/python-snippits
|
Add stupidly simple HTTP server using standard python 3
|
#!/usr/bin/env python3
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os, http.server
def main(args):
os.chdir(args.directory)
addr = ('' ,args.port)
httpd = http.server.HTTPServer(addr, http.server.SimpleHTTPRequestHandler)
httpd.serve_forever()
if __name__ == '__main__':
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--port', default=8000,
help='Port on which to listen')
parser.add_argument('-d', '--directory', metavar='DIR', default=os.getcwd(),
help='Directory to serve')
args = parser.parse_args()
try: exit(main(args))
except KeyboardInterrupt as e: pass
|
<commit_before><commit_msg>Add stupidly simple HTTP server using standard python 3<commit_after>
|
#!/usr/bin/env python3
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os, http.server
def main(args):
os.chdir(args.directory)
addr = ('' ,args.port)
httpd = http.server.HTTPServer(addr, http.server.SimpleHTTPRequestHandler)
httpd.serve_forever()
if __name__ == '__main__':
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--port', default=8000,
help='Port on which to listen')
parser.add_argument('-d', '--directory', metavar='DIR', default=os.getcwd(),
help='Directory to serve')
args = parser.parse_args()
try: exit(main(args))
except KeyboardInterrupt as e: pass
|
Add stupidly simple HTTP server using standard python 3#!/usr/bin/env python3
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os, http.server
def main(args):
os.chdir(args.directory)
addr = ('' ,args.port)
httpd = http.server.HTTPServer(addr, http.server.SimpleHTTPRequestHandler)
httpd.serve_forever()
if __name__ == '__main__':
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--port', default=8000,
help='Port on which to listen')
parser.add_argument('-d', '--directory', metavar='DIR', default=os.getcwd(),
help='Directory to serve')
args = parser.parse_args()
try: exit(main(args))
except KeyboardInterrupt as e: pass
|
<commit_before><commit_msg>Add stupidly simple HTTP server using standard python 3<commit_after>#!/usr/bin/env python3
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import os, http.server
def main(args):
os.chdir(args.directory)
addr = ('' ,args.port)
httpd = http.server.HTTPServer(addr, http.server.SimpleHTTPRequestHandler)
httpd.serve_forever()
if __name__ == '__main__':
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-p', '--port', default=8000,
help='Port on which to listen')
parser.add_argument('-d', '--directory', metavar='DIR', default=os.getcwd(),
help='Directory to serve')
args = parser.parse_args()
try: exit(main(args))
except KeyboardInterrupt as e: pass
|
|
82162a334595ad47090dc1a8991d53ab5ece3736
|
components/expression_evaluator.py
|
components/expression_evaluator.py
|
"""A set of utility functions to evaluate expressions.
Sample Usage:
print(SgExpressionEvaluator.EvaluateExpressionInRow(["a", "bb", "ccc"], [1, 2, 3], "bb + 2.0 + ccc / a"))
print(SgExpressionEvaluator.EvaluateExpressionsInRow(["a", "bb", "ccc"], [1, 2, 3], ["bb + 2.0 + ccc / a", "a + bb + ccc"]))
t = tb.SgTable()
t.SetFields(["a", "bb", "ccc"])
t.Append([1, 2, 3])
t.Append([2, 4, 6])
print(SgExpressionEvaluator.EvaluateExpressionsInTable(t, ["bb + 2.0 + ccc / a", "a + bb + ccc"]))
"""
import table as tb
class SgExpressionEvaluator:
"""A set of utility functions to evaluate expressions."""
@staticmethod
def EvaluateExpressionInRow(fields, row, expr):
"""
Evaluates the results of an expression (presumably a non-terminal field)
given a list of fields and the values of a row.
"""
# TODO(lnishan): This works for now, but in the future we might want to implement
# a proper evaluator (correct tokenization, 2-stack evaluation)
pairs = zip(fields, row)
pairs.sort(key=lambda p: len(p[0]), reverse=True)
for pair in pairs:
expr = expr.replace(pair[0], str(pair[1]))
return eval(expr)
@staticmethod
def EvaluateExpressionsInRow(fields, row, exprs):
return [SgExpressionEvaluator.EvaluateExpressionInRow(fields, row, expr) for expr in exprs]
@staticmethod
def EvaluateExpressionsInTable(table, exprs):
ret = tb.SgTable()
ret.SetFields(exprs)
for row in table:
ret.Append(SgExpressionEvaluator.EvaluateExpressionsInRow(table.GetFields(), row, exprs))
return ret
|
Add SgExpressionEvaluator - Evaluates expressions given fields and values
|
Add SgExpressionEvaluator - Evaluates expressions given fields and values
|
Python
|
mit
|
lnishan/SQLGitHub
|
Add SgExpressionEvaluator - Evaluates expressions given fields and values
|
"""A set of utility functions to evaluate expressions.
Sample Usage:
print(SgExpressionEvaluator.EvaluateExpressionInRow(["a", "bb", "ccc"], [1, 2, 3], "bb + 2.0 + ccc / a"))
print(SgExpressionEvaluator.EvaluateExpressionsInRow(["a", "bb", "ccc"], [1, 2, 3], ["bb + 2.0 + ccc / a", "a + bb + ccc"]))
t = tb.SgTable()
t.SetFields(["a", "bb", "ccc"])
t.Append([1, 2, 3])
t.Append([2, 4, 6])
print(SgExpressionEvaluator.EvaluateExpressionsInTable(t, ["bb + 2.0 + ccc / a", "a + bb + ccc"]))
"""
import table as tb
class SgExpressionEvaluator:
"""A set of utility functions to evaluate expressions."""
@staticmethod
def EvaluateExpressionInRow(fields, row, expr):
"""
Evaluates the results of an expression (presumably a non-terminal field)
given a list of fields and the values of a row.
"""
# TODO(lnishan): This works for now, but in the future we might want to implement
# a proper evaluator (correct tokenization, 2-stack evaluation)
pairs = zip(fields, row)
pairs.sort(key=lambda p: len(p[0]), reverse=True)
for pair in pairs:
expr = expr.replace(pair[0], str(pair[1]))
return eval(expr)
@staticmethod
def EvaluateExpressionsInRow(fields, row, exprs):
return [SgExpressionEvaluator.EvaluateExpressionInRow(fields, row, expr) for expr in exprs]
@staticmethod
def EvaluateExpressionsInTable(table, exprs):
ret = tb.SgTable()
ret.SetFields(exprs)
for row in table:
ret.Append(SgExpressionEvaluator.EvaluateExpressionsInRow(table.GetFields(), row, exprs))
return ret
|
<commit_before><commit_msg>Add SgExpressionEvaluator - Evaluates expressions given fields and values<commit_after>
|
"""A set of utility functions to evaluate expressions.
Sample Usage:
print(SgExpressionEvaluator.EvaluateExpressionInRow(["a", "bb", "ccc"], [1, 2, 3], "bb + 2.0 + ccc / a"))
print(SgExpressionEvaluator.EvaluateExpressionsInRow(["a", "bb", "ccc"], [1, 2, 3], ["bb + 2.0 + ccc / a", "a + bb + ccc"]))
t = tb.SgTable()
t.SetFields(["a", "bb", "ccc"])
t.Append([1, 2, 3])
t.Append([2, 4, 6])
print(SgExpressionEvaluator.EvaluateExpressionsInTable(t, ["bb + 2.0 + ccc / a", "a + bb + ccc"]))
"""
import table as tb
class SgExpressionEvaluator:
"""A set of utility functions to evaluate expressions."""
@staticmethod
def EvaluateExpressionInRow(fields, row, expr):
"""
Evaluates the results of an expression (presumably a non-terminal field)
given a list of fields and the values of a row.
"""
# TODO(lnishan): This works for now, but in the future we might want to implement
# a proper evaluator (correct tokenization, 2-stack evaluation)
pairs = zip(fields, row)
pairs.sort(key=lambda p: len(p[0]), reverse=True)
for pair in pairs:
expr = expr.replace(pair[0], str(pair[1]))
return eval(expr)
@staticmethod
def EvaluateExpressionsInRow(fields, row, exprs):
return [SgExpressionEvaluator.EvaluateExpressionInRow(fields, row, expr) for expr in exprs]
@staticmethod
def EvaluateExpressionsInTable(table, exprs):
ret = tb.SgTable()
ret.SetFields(exprs)
for row in table:
ret.Append(SgExpressionEvaluator.EvaluateExpressionsInRow(table.GetFields(), row, exprs))
return ret
|
Add SgExpressionEvaluator - Evaluates expressions given fields and values"""A set of utility functions to evaluate expressions.
Sample Usage:
print(SgExpressionEvaluator.EvaluateExpressionInRow(["a", "bb", "ccc"], [1, 2, 3], "bb + 2.0 + ccc / a"))
print(SgExpressionEvaluator.EvaluateExpressionsInRow(["a", "bb", "ccc"], [1, 2, 3], ["bb + 2.0 + ccc / a", "a + bb + ccc"]))
t = tb.SgTable()
t.SetFields(["a", "bb", "ccc"])
t.Append([1, 2, 3])
t.Append([2, 4, 6])
print(SgExpressionEvaluator.EvaluateExpressionsInTable(t, ["bb + 2.0 + ccc / a", "a + bb + ccc"]))
"""
import table as tb
class SgExpressionEvaluator:
"""A set of utility functions to evaluate expressions."""
@staticmethod
def EvaluateExpressionInRow(fields, row, expr):
"""
Evaluates the results of an expression (presumably a non-terminal field)
given a list of fields and the values of a row.
"""
# TODO(lnishan): This works for now, but in the future we might want to implement
# a proper evaluator (correct tokenization, 2-stack evaluation)
pairs = zip(fields, row)
pairs.sort(key=lambda p: len(p[0]), reverse=True)
for pair in pairs:
expr = expr.replace(pair[0], str(pair[1]))
return eval(expr)
@staticmethod
def EvaluateExpressionsInRow(fields, row, exprs):
return [SgExpressionEvaluator.EvaluateExpressionInRow(fields, row, expr) for expr in exprs]
@staticmethod
def EvaluateExpressionsInTable(table, exprs):
ret = tb.SgTable()
ret.SetFields(exprs)
for row in table:
ret.Append(SgExpressionEvaluator.EvaluateExpressionsInRow(table.GetFields(), row, exprs))
return ret
|
<commit_before><commit_msg>Add SgExpressionEvaluator - Evaluates expressions given fields and values<commit_after>"""A set of utility functions to evaluate expressions.
Sample Usage:
print(SgExpressionEvaluator.EvaluateExpressionInRow(["a", "bb", "ccc"], [1, 2, 3], "bb + 2.0 + ccc / a"))
print(SgExpressionEvaluator.EvaluateExpressionsInRow(["a", "bb", "ccc"], [1, 2, 3], ["bb + 2.0 + ccc / a", "a + bb + ccc"]))
t = tb.SgTable()
t.SetFields(["a", "bb", "ccc"])
t.Append([1, 2, 3])
t.Append([2, 4, 6])
print(SgExpressionEvaluator.EvaluateExpressionsInTable(t, ["bb + 2.0 + ccc / a", "a + bb + ccc"]))
"""
import table as tb
class SgExpressionEvaluator:
"""A set of utility functions to evaluate expressions."""
@staticmethod
def EvaluateExpressionInRow(fields, row, expr):
"""
Evaluates the results of an expression (presumably a non-terminal field)
given a list of fields and the values of a row.
"""
# TODO(lnishan): This works for now, but in the future we might want to implement
# a proper evaluator (correct tokenization, 2-stack evaluation)
pairs = zip(fields, row)
pairs.sort(key=lambda p: len(p[0]), reverse=True)
for pair in pairs:
expr = expr.replace(pair[0], str(pair[1]))
return eval(expr)
@staticmethod
def EvaluateExpressionsInRow(fields, row, exprs):
return [SgExpressionEvaluator.EvaluateExpressionInRow(fields, row, expr) for expr in exprs]
@staticmethod
def EvaluateExpressionsInTable(table, exprs):
ret = tb.SgTable()
ret.SetFields(exprs)
for row in table:
ret.Append(SgExpressionEvaluator.EvaluateExpressionsInRow(table.GetFields(), row, exprs))
return ret
|
|
a543e4dd62de6e0dbda0fa43c67d3a3fe2165499
|
download_summaries.py
|
download_summaries.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils.summary_downloader import SummaryDownloader
if __name__ == '__main__':
# setting target dir and time interval of interest
tgt_dir = r"D:\nhl\official_and_json\2016-17"
date = "May 20, 2017"
to_date = "May 30, 2017"
downloader = SummaryDownloader(tgt_dir, date, to_date)
# downloader.run()
|
Add initial version of download script
|
Add initial version of download script
|
Python
|
mit
|
leaffan/pynhldb
|
Add initial version of download script
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils.summary_downloader import SummaryDownloader
if __name__ == '__main__':
# setting target dir and time interval of interest
tgt_dir = r"D:\nhl\official_and_json\2016-17"
date = "May 20, 2017"
to_date = "May 30, 2017"
downloader = SummaryDownloader(tgt_dir, date, to_date)
# downloader.run()
|
<commit_before><commit_msg>Add initial version of download script<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils.summary_downloader import SummaryDownloader
if __name__ == '__main__':
# setting target dir and time interval of interest
tgt_dir = r"D:\nhl\official_and_json\2016-17"
date = "May 20, 2017"
to_date = "May 30, 2017"
downloader = SummaryDownloader(tgt_dir, date, to_date)
# downloader.run()
|
Add initial version of download script#!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils.summary_downloader import SummaryDownloader
if __name__ == '__main__':
# setting target dir and time interval of interest
tgt_dir = r"D:\nhl\official_and_json\2016-17"
date = "May 20, 2017"
to_date = "May 30, 2017"
downloader = SummaryDownloader(tgt_dir, date, to_date)
# downloader.run()
|
<commit_before><commit_msg>Add initial version of download script<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from utils.summary_downloader import SummaryDownloader
if __name__ == '__main__':
# setting target dir and time interval of interest
tgt_dir = r"D:\nhl\official_and_json\2016-17"
date = "May 20, 2017"
to_date = "May 30, 2017"
downloader = SummaryDownloader(tgt_dir, date, to_date)
# downloader.run()
|
|
1c270ec827b2cbb2d8ed6077366d2d7d10e17b16
|
utilities/foto-files/manage-duplicates.py
|
utilities/foto-files/manage-duplicates.py
|
'''
Compare File Contents and remove duplicate files
get sha256 hash for each file found
use dictionary to check for duplicates
delete duplicates
Dave Cuthbert
(C) 2021-02-12
MIT License
'''
import os
from collections import defaultdict
import hashlib
import sys
def get_hash(file_name):
BLOCK_SIZE = 1048576 #1MB - Protect against reading large files
hasher = hashlib.sha256()
f = open(file_name, 'rb')
read_buffer = f.read(BLOCK_SIZE)
while len(read_buffer) > 0:
hasher.update(read_buffer)
read_buffer = f.read(BLOCK_SIZE)
return hasher.hexdigest()
def check_files(start_dir):
compare_list = defaultdict()
num_duplicates = 0
for path, dirs, files in os.walk(start_dir):
for f in files:
file_name = os.path.join(path, f)
k = get_hash(file_name)
if k in compare_list:
# FOR DEV _DEBUG
# print(f"DUP: {len(compare_list[k])} {os.path.join(path,f)}")
try:
compare_list[k].append(os.path.join(path,f))
except:
print(f"Could not add {os.path.join(path,f)}" )
print(f"{compare_list[k]}")
sys.exit()
else:
compare_list[k] = [os.path.join(path,f)]
return compare_list
def print_list(dups_list):
for hash_key, file_names in dups_list.items():
if len(file_names) > 1:
print(f"HASH: {hash_key} DUPS: {len(file_names)}")
for f in file_names:
print(f" {f}")
def find_duplicates():
starting_dir = os.getcwd()
list_of_dups = check_files(starting_dir)
print_list(list_of_dups)
print("\nDONE")
if "__main__" == __name__:
find_duplicates()
#EOF
|
Replace older dups management script
|
Replace older dups management script
|
Python
|
mit
|
daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various,daveinnyc/various
|
Replace older dups management script
|
'''
Compare File Contents and remove duplicate files
get sha256 hash for each file found
use dictionary to check for duplicates
delete duplicates
Dave Cuthbert
(C) 2021-02-12
MIT License
'''
import os
from collections import defaultdict
import hashlib
import sys
def get_hash(file_name):
BLOCK_SIZE = 1048576 #1MB - Protect against reading large files
hasher = hashlib.sha256()
f = open(file_name, 'rb')
read_buffer = f.read(BLOCK_SIZE)
while len(read_buffer) > 0:
hasher.update(read_buffer)
read_buffer = f.read(BLOCK_SIZE)
return hasher.hexdigest()
def check_files(start_dir):
compare_list = defaultdict()
num_duplicates = 0
for path, dirs, files in os.walk(start_dir):
for f in files:
file_name = os.path.join(path, f)
k = get_hash(file_name)
if k in compare_list:
# FOR DEV _DEBUG
# print(f"DUP: {len(compare_list[k])} {os.path.join(path,f)}")
try:
compare_list[k].append(os.path.join(path,f))
except:
print(f"Could not add {os.path.join(path,f)}" )
print(f"{compare_list[k]}")
sys.exit()
else:
compare_list[k] = [os.path.join(path,f)]
return compare_list
def print_list(dups_list):
for hash_key, file_names in dups_list.items():
if len(file_names) > 1:
print(f"HASH: {hash_key} DUPS: {len(file_names)}")
for f in file_names:
print(f" {f}")
def find_duplicates():
starting_dir = os.getcwd()
list_of_dups = check_files(starting_dir)
print_list(list_of_dups)
print("\nDONE")
if "__main__" == __name__:
find_duplicates()
#EOF
|
<commit_before><commit_msg>Replace older dups management script<commit_after>
|
'''
Compare File Contents and remove duplicate files
get sha256 hash for each file found
use dictionary to check for duplicates
delete duplicates
Dave Cuthbert
(C) 2021-02-12
MIT License
'''
import os
from collections import defaultdict
import hashlib
import sys
def get_hash(file_name):
BLOCK_SIZE = 1048576 #1MB - Protect against reading large files
hasher = hashlib.sha256()
f = open(file_name, 'rb')
read_buffer = f.read(BLOCK_SIZE)
while len(read_buffer) > 0:
hasher.update(read_buffer)
read_buffer = f.read(BLOCK_SIZE)
return hasher.hexdigest()
def check_files(start_dir):
compare_list = defaultdict()
num_duplicates = 0
for path, dirs, files in os.walk(start_dir):
for f in files:
file_name = os.path.join(path, f)
k = get_hash(file_name)
if k in compare_list:
# FOR DEV _DEBUG
# print(f"DUP: {len(compare_list[k])} {os.path.join(path,f)}")
try:
compare_list[k].append(os.path.join(path,f))
except:
print(f"Could not add {os.path.join(path,f)}" )
print(f"{compare_list[k]}")
sys.exit()
else:
compare_list[k] = [os.path.join(path,f)]
return compare_list
def print_list(dups_list):
for hash_key, file_names in dups_list.items():
if len(file_names) > 1:
print(f"HASH: {hash_key} DUPS: {len(file_names)}")
for f in file_names:
print(f" {f}")
def find_duplicates():
starting_dir = os.getcwd()
list_of_dups = check_files(starting_dir)
print_list(list_of_dups)
print("\nDONE")
if "__main__" == __name__:
find_duplicates()
#EOF
|
Replace older dups management script'''
Compare File Contents and remove duplicate files
get sha256 hash for each file found
use dictionary to check for duplicates
delete duplicates
Dave Cuthbert
(C) 2021-02-12
MIT License
'''
import os
from collections import defaultdict
import hashlib
import sys
def get_hash(file_name):
BLOCK_SIZE = 1048576 #1MB - Protect against reading large files
hasher = hashlib.sha256()
f = open(file_name, 'rb')
read_buffer = f.read(BLOCK_SIZE)
while len(read_buffer) > 0:
hasher.update(read_buffer)
read_buffer = f.read(BLOCK_SIZE)
return hasher.hexdigest()
def check_files(start_dir):
compare_list = defaultdict()
num_duplicates = 0
for path, dirs, files in os.walk(start_dir):
for f in files:
file_name = os.path.join(path, f)
k = get_hash(file_name)
if k in compare_list:
# FOR DEV _DEBUG
# print(f"DUP: {len(compare_list[k])} {os.path.join(path,f)}")
try:
compare_list[k].append(os.path.join(path,f))
except:
print(f"Could not add {os.path.join(path,f)}" )
print(f"{compare_list[k]}")
sys.exit()
else:
compare_list[k] = [os.path.join(path,f)]
return compare_list
def print_list(dups_list):
for hash_key, file_names in dups_list.items():
if len(file_names) > 1:
print(f"HASH: {hash_key} DUPS: {len(file_names)}")
for f in file_names:
print(f" {f}")
def find_duplicates():
starting_dir = os.getcwd()
list_of_dups = check_files(starting_dir)
print_list(list_of_dups)
print("\nDONE")
if "__main__" == __name__:
find_duplicates()
#EOF
|
<commit_before><commit_msg>Replace older dups management script<commit_after>'''
Compare File Contents and remove duplicate files
get sha256 hash for each file found
use dictionary to check for duplicates
delete duplicates
Dave Cuthbert
(C) 2021-02-12
MIT License
'''
import os
from collections import defaultdict
import hashlib
import sys
def get_hash(file_name):
BLOCK_SIZE = 1048576 #1MB - Protect against reading large files
hasher = hashlib.sha256()
f = open(file_name, 'rb')
read_buffer = f.read(BLOCK_SIZE)
while len(read_buffer) > 0:
hasher.update(read_buffer)
read_buffer = f.read(BLOCK_SIZE)
return hasher.hexdigest()
def check_files(start_dir):
compare_list = defaultdict()
num_duplicates = 0
for path, dirs, files in os.walk(start_dir):
for f in files:
file_name = os.path.join(path, f)
k = get_hash(file_name)
if k in compare_list:
# FOR DEV _DEBUG
# print(f"DUP: {len(compare_list[k])} {os.path.join(path,f)}")
try:
compare_list[k].append(os.path.join(path,f))
except:
print(f"Could not add {os.path.join(path,f)}" )
print(f"{compare_list[k]}")
sys.exit()
else:
compare_list[k] = [os.path.join(path,f)]
return compare_list
def print_list(dups_list):
for hash_key, file_names in dups_list.items():
if len(file_names) > 1:
print(f"HASH: {hash_key} DUPS: {len(file_names)}")
for f in file_names:
print(f" {f}")
def find_duplicates():
starting_dir = os.getcwd()
list_of_dups = check_files(starting_dir)
print_list(list_of_dups)
print("\nDONE")
if "__main__" == __name__:
find_duplicates()
#EOF
|
|
81248594a64d59236c0ab369fb1d5d7eee073568
|
tests/test_coefficient.py
|
tests/test_coefficient.py
|
# -*- coding: utf-8 -*-
from nose.tools import assert_equal
from openfisca_france.model.prelevements_obligatoires.prelevements_sociaux.cotisations_sociales.allegements import *
from openfisca_core.periods import *
from openfisca_france import FranceTaxBenefitSystem
def test_coefficient_proratisation_only_contract_periods():
tax_benefit_system = FranceTaxBenefitSystem()
scenario = tax_benefit_system.new_scenario()
scenario.init_single_entity(period='2017-11',
parent1=dict(salaire_de_base=2300,
effectif_entreprise=1,
code_postal_entreprise="75001",
categorie_salarie=u'prive_non_cadre',
contrat_de_travail_debut='2017-11-1',
allegement_fillon_mode_recouvrement=u'progressif'))
simulation = scenario.new_simulation()
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-11'),1)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-12'),1)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-10'),0)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017'),2)
|
Add failing test for coefficient_proratisation
|
Add failing test for coefficient_proratisation
|
Python
|
agpl-3.0
|
antoinearnoud/openfisca-france,antoinearnoud/openfisca-france,sgmap/openfisca-france,sgmap/openfisca-france
|
Add failing test for coefficient_proratisation
|
# -*- coding: utf-8 -*-
from nose.tools import assert_equal
from openfisca_france.model.prelevements_obligatoires.prelevements_sociaux.cotisations_sociales.allegements import *
from openfisca_core.periods import *
from openfisca_france import FranceTaxBenefitSystem
def test_coefficient_proratisation_only_contract_periods():
tax_benefit_system = FranceTaxBenefitSystem()
scenario = tax_benefit_system.new_scenario()
scenario.init_single_entity(period='2017-11',
parent1=dict(salaire_de_base=2300,
effectif_entreprise=1,
code_postal_entreprise="75001",
categorie_salarie=u'prive_non_cadre',
contrat_de_travail_debut='2017-11-1',
allegement_fillon_mode_recouvrement=u'progressif'))
simulation = scenario.new_simulation()
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-11'),1)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-12'),1)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-10'),0)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017'),2)
|
<commit_before><commit_msg>Add failing test for coefficient_proratisation<commit_after>
|
# -*- coding: utf-8 -*-
from nose.tools import assert_equal
from openfisca_france.model.prelevements_obligatoires.prelevements_sociaux.cotisations_sociales.allegements import *
from openfisca_core.periods import *
from openfisca_france import FranceTaxBenefitSystem
def test_coefficient_proratisation_only_contract_periods():
tax_benefit_system = FranceTaxBenefitSystem()
scenario = tax_benefit_system.new_scenario()
scenario.init_single_entity(period='2017-11',
parent1=dict(salaire_de_base=2300,
effectif_entreprise=1,
code_postal_entreprise="75001",
categorie_salarie=u'prive_non_cadre',
contrat_de_travail_debut='2017-11-1',
allegement_fillon_mode_recouvrement=u'progressif'))
simulation = scenario.new_simulation()
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-11'),1)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-12'),1)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-10'),0)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017'),2)
|
Add failing test for coefficient_proratisation# -*- coding: utf-8 -*-
from nose.tools import assert_equal
from openfisca_france.model.prelevements_obligatoires.prelevements_sociaux.cotisations_sociales.allegements import *
from openfisca_core.periods import *
from openfisca_france import FranceTaxBenefitSystem
def test_coefficient_proratisation_only_contract_periods():
tax_benefit_system = FranceTaxBenefitSystem()
scenario = tax_benefit_system.new_scenario()
scenario.init_single_entity(period='2017-11',
parent1=dict(salaire_de_base=2300,
effectif_entreprise=1,
code_postal_entreprise="75001",
categorie_salarie=u'prive_non_cadre',
contrat_de_travail_debut='2017-11-1',
allegement_fillon_mode_recouvrement=u'progressif'))
simulation = scenario.new_simulation()
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-11'),1)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-12'),1)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-10'),0)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017'),2)
|
<commit_before><commit_msg>Add failing test for coefficient_proratisation<commit_after># -*- coding: utf-8 -*-
from nose.tools import assert_equal
from openfisca_france.model.prelevements_obligatoires.prelevements_sociaux.cotisations_sociales.allegements import *
from openfisca_core.periods import *
from openfisca_france import FranceTaxBenefitSystem
def test_coefficient_proratisation_only_contract_periods():
tax_benefit_system = FranceTaxBenefitSystem()
scenario = tax_benefit_system.new_scenario()
scenario.init_single_entity(period='2017-11',
parent1=dict(salaire_de_base=2300,
effectif_entreprise=1,
code_postal_entreprise="75001",
categorie_salarie=u'prive_non_cadre',
contrat_de_travail_debut='2017-11-1',
allegement_fillon_mode_recouvrement=u'progressif'))
simulation = scenario.new_simulation()
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-11'),1)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-12'),1)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017-10'),0)
assert_equal(simulation.calculate_add('coefficient_proratisation','2017'),2)
|
|
608976fb5663adc5cbfa3ebc9af00171e3042a98
|
monkey/cpu_profile.py
|
monkey/cpu_profile.py
|
from com.android.monkeyrunner import MonkeyRunner as mkr
from com.android.monkeyrunner import MonkeyDevice as mkd
import os, sys
sys.path.append(os.getcwd())
import common
# String Constants
cpucfg_pkg = 'br.ufpe.emilianofirmino.cpunuke'
cpustress_pkg = 'br.ufpe.emilianofirmino.cpujudge'
def config_cpu(device, corenum, speed):
common.open_app(device, cpucfg_pkg)
mkr.sleep(5)
for core in range(corenum):
device.drag((384, 400), (384, 250)) # select number of cpu core
device.type(str(speed))
mkr.sleep(5)
common.press_back(device)
def run_stress(device):
common.open_app(device, cpustress_pkg)
mkr.sleep(60)
device.touch(384, 200, mkd.DOWN_AND_UP) # click run button
mkr.sleep(10 * 60)
common.unlock_screen(device)
mkr.sleep(60)
common.press_back(60)
if __name__ == "__main__":
# Estabilish Connection
d = common.open_dev()
cpu_core = [1, 2, 3, 4]
cpu_freq = [384, 486, 594, 702, 810, 918, 1026, 1134, 1242, 1350, 1458, 1512]
# Run all cpu tests
for n in cpu_core:
for f in cpu_freq:
config_cpu(d, n, f)
common.start_prof(d)
run_stress(d)
common.stop_prof(d)
mkr.sleep(60)
# Close Connection
d.close()
|
Add monkeyrunner script of cpu energy usage
|
Add monkeyrunner script of cpu energy usage
Signed-off-by: Emiliano Firmino <05e638a1dec5cbdb4665208eae603124467a154b@gmail.com>
|
Python
|
mit
|
Miliox/droid_emc2,Miliox/droid_emc2,Miliox/droid_emc2,Miliox/droid_emc2,Miliox/droid_emc2
|
Add monkeyrunner script of cpu energy usage
Signed-off-by: Emiliano Firmino <05e638a1dec5cbdb4665208eae603124467a154b@gmail.com>
|
from com.android.monkeyrunner import MonkeyRunner as mkr
from com.android.monkeyrunner import MonkeyDevice as mkd
import os, sys
sys.path.append(os.getcwd())
import common
# String Constants
cpucfg_pkg = 'br.ufpe.emilianofirmino.cpunuke'
cpustress_pkg = 'br.ufpe.emilianofirmino.cpujudge'
def config_cpu(device, corenum, speed):
common.open_app(device, cpucfg_pkg)
mkr.sleep(5)
for core in range(corenum):
device.drag((384, 400), (384, 250)) # select number of cpu core
device.type(str(speed))
mkr.sleep(5)
common.press_back(device)
def run_stress(device):
common.open_app(device, cpustress_pkg)
mkr.sleep(60)
device.touch(384, 200, mkd.DOWN_AND_UP) # click run button
mkr.sleep(10 * 60)
common.unlock_screen(device)
mkr.sleep(60)
common.press_back(60)
if __name__ == "__main__":
# Estabilish Connection
d = common.open_dev()
cpu_core = [1, 2, 3, 4]
cpu_freq = [384, 486, 594, 702, 810, 918, 1026, 1134, 1242, 1350, 1458, 1512]
# Run all cpu tests
for n in cpu_core:
for f in cpu_freq:
config_cpu(d, n, f)
common.start_prof(d)
run_stress(d)
common.stop_prof(d)
mkr.sleep(60)
# Close Connection
d.close()
|
<commit_before><commit_msg>Add monkeyrunner script of cpu energy usage
Signed-off-by: Emiliano Firmino <05e638a1dec5cbdb4665208eae603124467a154b@gmail.com><commit_after>
|
from com.android.monkeyrunner import MonkeyRunner as mkr
from com.android.monkeyrunner import MonkeyDevice as mkd
import os, sys
sys.path.append(os.getcwd())
import common
# String Constants
cpucfg_pkg = 'br.ufpe.emilianofirmino.cpunuke'
cpustress_pkg = 'br.ufpe.emilianofirmino.cpujudge'
def config_cpu(device, corenum, speed):
common.open_app(device, cpucfg_pkg)
mkr.sleep(5)
for core in range(corenum):
device.drag((384, 400), (384, 250)) # select number of cpu core
device.type(str(speed))
mkr.sleep(5)
common.press_back(device)
def run_stress(device):
common.open_app(device, cpustress_pkg)
mkr.sleep(60)
device.touch(384, 200, mkd.DOWN_AND_UP) # click run button
mkr.sleep(10 * 60)
common.unlock_screen(device)
mkr.sleep(60)
common.press_back(60)
if __name__ == "__main__":
# Estabilish Connection
d = common.open_dev()
cpu_core = [1, 2, 3, 4]
cpu_freq = [384, 486, 594, 702, 810, 918, 1026, 1134, 1242, 1350, 1458, 1512]
# Run all cpu tests
for n in cpu_core:
for f in cpu_freq:
config_cpu(d, n, f)
common.start_prof(d)
run_stress(d)
common.stop_prof(d)
mkr.sleep(60)
# Close Connection
d.close()
|
Add monkeyrunner script of cpu energy usage
Signed-off-by: Emiliano Firmino <05e638a1dec5cbdb4665208eae603124467a154b@gmail.com>from com.android.monkeyrunner import MonkeyRunner as mkr
from com.android.monkeyrunner import MonkeyDevice as mkd
import os, sys
sys.path.append(os.getcwd())
import common
# String Constants
cpucfg_pkg = 'br.ufpe.emilianofirmino.cpunuke'
cpustress_pkg = 'br.ufpe.emilianofirmino.cpujudge'
def config_cpu(device, corenum, speed):
common.open_app(device, cpucfg_pkg)
mkr.sleep(5)
for core in range(corenum):
device.drag((384, 400), (384, 250)) # select number of cpu core
device.type(str(speed))
mkr.sleep(5)
common.press_back(device)
def run_stress(device):
common.open_app(device, cpustress_pkg)
mkr.sleep(60)
device.touch(384, 200, mkd.DOWN_AND_UP) # click run button
mkr.sleep(10 * 60)
common.unlock_screen(device)
mkr.sleep(60)
common.press_back(60)
if __name__ == "__main__":
# Estabilish Connection
d = common.open_dev()
cpu_core = [1, 2, 3, 4]
cpu_freq = [384, 486, 594, 702, 810, 918, 1026, 1134, 1242, 1350, 1458, 1512]
# Run all cpu tests
for n in cpu_core:
for f in cpu_freq:
config_cpu(d, n, f)
common.start_prof(d)
run_stress(d)
common.stop_prof(d)
mkr.sleep(60)
# Close Connection
d.close()
|
<commit_before><commit_msg>Add monkeyrunner script of cpu energy usage
Signed-off-by: Emiliano Firmino <05e638a1dec5cbdb4665208eae603124467a154b@gmail.com><commit_after>from com.android.monkeyrunner import MonkeyRunner as mkr
from com.android.monkeyrunner import MonkeyDevice as mkd
import os, sys
sys.path.append(os.getcwd())
import common
# String Constants
cpucfg_pkg = 'br.ufpe.emilianofirmino.cpunuke'
cpustress_pkg = 'br.ufpe.emilianofirmino.cpujudge'
def config_cpu(device, corenum, speed):
common.open_app(device, cpucfg_pkg)
mkr.sleep(5)
for core in range(corenum):
device.drag((384, 400), (384, 250)) # select number of cpu core
device.type(str(speed))
mkr.sleep(5)
common.press_back(device)
def run_stress(device):
common.open_app(device, cpustress_pkg)
mkr.sleep(60)
device.touch(384, 200, mkd.DOWN_AND_UP) # click run button
mkr.sleep(10 * 60)
common.unlock_screen(device)
mkr.sleep(60)
common.press_back(60)
if __name__ == "__main__":
# Estabilish Connection
d = common.open_dev()
cpu_core = [1, 2, 3, 4]
cpu_freq = [384, 486, 594, 702, 810, 918, 1026, 1134, 1242, 1350, 1458, 1512]
# Run all cpu tests
for n in cpu_core:
for f in cpu_freq:
config_cpu(d, n, f)
common.start_prof(d)
run_stress(d)
common.stop_prof(d)
mkr.sleep(60)
# Close Connection
d.close()
|
|
5ef9ea4fe0c90fad89d5ab772f42c48fd702860c
|
portal/migrations/versions/91351a73e6e2_.py
|
portal/migrations/versions/91351a73e6e2_.py
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 91351a73e6e2
Revises: 63262fe95b9c
Create Date: 2018-03-08 15:34:22.391417
"""
# revision identifiers, used by Alembic.
revision = '91351a73e6e2'
down_revision = '63262fe95b9c'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('interventions', sa.Column('subscribed_events', sa.Integer()))
op.execute('UPDATE interventions SET subscribed_events=0')
op.alter_column('interventions', 'subscribed_events', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('interventions', 'subscribed_events')
# ### end Alembic commands ###
|
Add bitmask for subscribed events to intervention table.
|
Add bitmask for subscribed events to intervention table.
|
Python
|
bsd-3-clause
|
uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal,uwcirg/true_nth_usa_portal
|
Add bitmask for subscribed events to intervention table.
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 91351a73e6e2
Revises: 63262fe95b9c
Create Date: 2018-03-08 15:34:22.391417
"""
# revision identifiers, used by Alembic.
revision = '91351a73e6e2'
down_revision = '63262fe95b9c'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('interventions', sa.Column('subscribed_events', sa.Integer()))
op.execute('UPDATE interventions SET subscribed_events=0')
op.alter_column('interventions', 'subscribed_events', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('interventions', 'subscribed_events')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add bitmask for subscribed events to intervention table.<commit_after>
|
from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 91351a73e6e2
Revises: 63262fe95b9c
Create Date: 2018-03-08 15:34:22.391417
"""
# revision identifiers, used by Alembic.
revision = '91351a73e6e2'
down_revision = '63262fe95b9c'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('interventions', sa.Column('subscribed_events', sa.Integer()))
op.execute('UPDATE interventions SET subscribed_events=0')
op.alter_column('interventions', 'subscribed_events', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('interventions', 'subscribed_events')
# ### end Alembic commands ###
|
Add bitmask for subscribed events to intervention table.from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 91351a73e6e2
Revises: 63262fe95b9c
Create Date: 2018-03-08 15:34:22.391417
"""
# revision identifiers, used by Alembic.
revision = '91351a73e6e2'
down_revision = '63262fe95b9c'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('interventions', sa.Column('subscribed_events', sa.Integer()))
op.execute('UPDATE interventions SET subscribed_events=0')
op.alter_column('interventions', 'subscribed_events', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('interventions', 'subscribed_events')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add bitmask for subscribed events to intervention table.<commit_after>from alembic import op
import sqlalchemy as sa
"""empty message
Revision ID: 91351a73e6e2
Revises: 63262fe95b9c
Create Date: 2018-03-08 15:34:22.391417
"""
# revision identifiers, used by Alembic.
revision = '91351a73e6e2'
down_revision = '63262fe95b9c'
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('interventions', sa.Column('subscribed_events', sa.Integer()))
op.execute('UPDATE interventions SET subscribed_events=0')
op.alter_column('interventions', 'subscribed_events', nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('interventions', 'subscribed_events')
# ### end Alembic commands ###
|
|
303152ec2828fe6c709ec17d21910a9b3a369004
|
share/migrations/0034_auto_20160830_2102.py
|
share/migrations/0034_auto_20160830_2102.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-30 21:02
from __future__ import unicode_literals
from django.db import migrations
import share.models.fields
class Migration(migrations.Migration):
dependencies = [
('share', '0033_auto_20160829_1707'),
]
operations = [
migrations.AlterField(
model_name='link',
name='url',
field=share.models.fields.ShareURLField(db_index=True),
),
migrations.AlterField(
model_name='linkversion',
name='url',
field=share.models.fields.ShareURLField(db_index=True),
),
]
|
Add migration for Link.url type change
|
Add migration for Link.url type change
|
Python
|
apache-2.0
|
laurenbarker/SHARE,zamattiac/SHARE,CenterForOpenScience/SHARE,laurenbarker/SHARE,laurenbarker/SHARE,aaxelb/SHARE,CenterForOpenScience/SHARE,zamattiac/SHARE,aaxelb/SHARE,zamattiac/SHARE,aaxelb/SHARE,CenterForOpenScience/SHARE
|
Add migration for Link.url type change
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-30 21:02
from __future__ import unicode_literals
from django.db import migrations
import share.models.fields
class Migration(migrations.Migration):
dependencies = [
('share', '0033_auto_20160829_1707'),
]
operations = [
migrations.AlterField(
model_name='link',
name='url',
field=share.models.fields.ShareURLField(db_index=True),
),
migrations.AlterField(
model_name='linkversion',
name='url',
field=share.models.fields.ShareURLField(db_index=True),
),
]
|
<commit_before><commit_msg>Add migration for Link.url type change<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-30 21:02
from __future__ import unicode_literals
from django.db import migrations
import share.models.fields
class Migration(migrations.Migration):
dependencies = [
('share', '0033_auto_20160829_1707'),
]
operations = [
migrations.AlterField(
model_name='link',
name='url',
field=share.models.fields.ShareURLField(db_index=True),
),
migrations.AlterField(
model_name='linkversion',
name='url',
field=share.models.fields.ShareURLField(db_index=True),
),
]
|
Add migration for Link.url type change# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-30 21:02
from __future__ import unicode_literals
from django.db import migrations
import share.models.fields
class Migration(migrations.Migration):
dependencies = [
('share', '0033_auto_20160829_1707'),
]
operations = [
migrations.AlterField(
model_name='link',
name='url',
field=share.models.fields.ShareURLField(db_index=True),
),
migrations.AlterField(
model_name='linkversion',
name='url',
field=share.models.fields.ShareURLField(db_index=True),
),
]
|
<commit_before><commit_msg>Add migration for Link.url type change<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-30 21:02
from __future__ import unicode_literals
from django.db import migrations
import share.models.fields
class Migration(migrations.Migration):
dependencies = [
('share', '0033_auto_20160829_1707'),
]
operations = [
migrations.AlterField(
model_name='link',
name='url',
field=share.models.fields.ShareURLField(db_index=True),
),
migrations.AlterField(
model_name='linkversion',
name='url',
field=share.models.fields.ShareURLField(db_index=True),
),
]
|
|
b758d6b1c445093a261e0f2be175181fa57109e4
|
math/basic/Greatest_digit_in_number/Greatest_digit_in_number.py
|
math/basic/Greatest_digit_in_number/Greatest_digit_in_number.py
|
number = 201328361572
maxDigit = -1
# convert number into unique set of digits and iterate over it
for c in set(str(number)):
# parse back to int and compare vs known max
i = int(c)
if i > maxDigit:
maxDigit = i
print(maxDigit)
# > 8
|
Add greatest digit in number with python in math
|
Add greatest digit in number with python in math
|
Python
|
cc0-1.0
|
Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,Cnidarias/al-go-rithms,Deepak345/al-go-rithms,ZoranPandovski/al-go-rithms,Deepak345/al-go-rithms,Cnidarias/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,Cnidarias/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms,Deepak345/al-go-rithms,Deepak345/al-go-rithms,manikTharaka/al-go-rithms,manikTharaka/al-go-rithms
|
Add greatest digit in number with python in math
|
number = 201328361572
maxDigit = -1
# convert number into unique set of digits and iterate over it
for c in set(str(number)):
# parse back to int and compare vs known max
i = int(c)
if i > maxDigit:
maxDigit = i
print(maxDigit)
# > 8
|
<commit_before><commit_msg>Add greatest digit in number with python in math<commit_after>
|
number = 201328361572
maxDigit = -1
# convert number into unique set of digits and iterate over it
for c in set(str(number)):
# parse back to int and compare vs known max
i = int(c)
if i > maxDigit:
maxDigit = i
print(maxDigit)
# > 8
|
Add greatest digit in number with python in mathnumber = 201328361572
maxDigit = -1
# convert number into unique set of digits and iterate over it
for c in set(str(number)):
# parse back to int and compare vs known max
i = int(c)
if i > maxDigit:
maxDigit = i
print(maxDigit)
# > 8
|
<commit_before><commit_msg>Add greatest digit in number with python in math<commit_after>number = 201328361572
maxDigit = -1
# convert number into unique set of digits and iterate over it
for c in set(str(number)):
# parse back to int and compare vs known max
i = int(c)
if i > maxDigit:
maxDigit = i
print(maxDigit)
# > 8
|
|
12bc92863076c594422f327efc1ba23a321b05a7
|
plotly/tests/test_optional/test_matplotlylib/test_date_times.py
|
plotly/tests/test_optional/test_matplotlylib/test_date_times.py
|
from __future__ import absolute_import
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
from matplotlib.dates import date2num
import plotly.tools as tls
from unittest import TestCase
from plotly.tests.test_optional.optional_utils import compare_dict, run_fig
class TestDateTimes(TestCase):
def test_normal_mpl_dates(self):
datetime_format = '%Y-%m-%d %H:%M:%S'
y = [1, 2, 3, 4]
date_strings = ['2010-01-04 00:00:00',
'2010-01-04 10:00:00',
'2010-01-04 23:00:59',
'2010-01-05 00:00:00']
# 1. create datetimes from the strings
dates = [datetime.datetime.strptime(date_string, datetime_format)
for date_string in date_strings]
# 2. create the mpl_dates from these datetimes
mpl_dates = date2num(dates)
# make a figure in mpl
fig, ax = plt.subplots()
ax.plot_date(mpl_dates, y)
# convert this figure to plotly's graph_objs
pfig = tls.mpl_to_plotly(fig)
print date_strings
print pfig['data'][0]['x']
# we use the same format here, so we expect equality here
self.assertEqual(pfig['data'][0]['x'], date_strings)
|
Add test for dates in mpl.
|
Add test for dates in mpl.
|
Python
|
mit
|
plotly/plotly.py,ee-in/python-api,plotly/python-api,plotly/python-api,plotly/python-api,ee-in/python-api,plotly/plotly.py,plotly/plotly.py,ee-in/python-api
|
Add test for dates in mpl.
|
from __future__ import absolute_import
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
from matplotlib.dates import date2num
import plotly.tools as tls
from unittest import TestCase
from plotly.tests.test_optional.optional_utils import compare_dict, run_fig
class TestDateTimes(TestCase):
def test_normal_mpl_dates(self):
datetime_format = '%Y-%m-%d %H:%M:%S'
y = [1, 2, 3, 4]
date_strings = ['2010-01-04 00:00:00',
'2010-01-04 10:00:00',
'2010-01-04 23:00:59',
'2010-01-05 00:00:00']
# 1. create datetimes from the strings
dates = [datetime.datetime.strptime(date_string, datetime_format)
for date_string in date_strings]
# 2. create the mpl_dates from these datetimes
mpl_dates = date2num(dates)
# make a figure in mpl
fig, ax = plt.subplots()
ax.plot_date(mpl_dates, y)
# convert this figure to plotly's graph_objs
pfig = tls.mpl_to_plotly(fig)
print date_strings
print pfig['data'][0]['x']
# we use the same format here, so we expect equality here
self.assertEqual(pfig['data'][0]['x'], date_strings)
|
<commit_before><commit_msg>Add test for dates in mpl.<commit_after>
|
from __future__ import absolute_import
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
from matplotlib.dates import date2num
import plotly.tools as tls
from unittest import TestCase
from plotly.tests.test_optional.optional_utils import compare_dict, run_fig
class TestDateTimes(TestCase):
def test_normal_mpl_dates(self):
datetime_format = '%Y-%m-%d %H:%M:%S'
y = [1, 2, 3, 4]
date_strings = ['2010-01-04 00:00:00',
'2010-01-04 10:00:00',
'2010-01-04 23:00:59',
'2010-01-05 00:00:00']
# 1. create datetimes from the strings
dates = [datetime.datetime.strptime(date_string, datetime_format)
for date_string in date_strings]
# 2. create the mpl_dates from these datetimes
mpl_dates = date2num(dates)
# make a figure in mpl
fig, ax = plt.subplots()
ax.plot_date(mpl_dates, y)
# convert this figure to plotly's graph_objs
pfig = tls.mpl_to_plotly(fig)
print date_strings
print pfig['data'][0]['x']
# we use the same format here, so we expect equality here
self.assertEqual(pfig['data'][0]['x'], date_strings)
|
Add test for dates in mpl.from __future__ import absolute_import
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
from matplotlib.dates import date2num
import plotly.tools as tls
from unittest import TestCase
from plotly.tests.test_optional.optional_utils import compare_dict, run_fig
class TestDateTimes(TestCase):
def test_normal_mpl_dates(self):
datetime_format = '%Y-%m-%d %H:%M:%S'
y = [1, 2, 3, 4]
date_strings = ['2010-01-04 00:00:00',
'2010-01-04 10:00:00',
'2010-01-04 23:00:59',
'2010-01-05 00:00:00']
# 1. create datetimes from the strings
dates = [datetime.datetime.strptime(date_string, datetime_format)
for date_string in date_strings]
# 2. create the mpl_dates from these datetimes
mpl_dates = date2num(dates)
# make a figure in mpl
fig, ax = plt.subplots()
ax.plot_date(mpl_dates, y)
# convert this figure to plotly's graph_objs
pfig = tls.mpl_to_plotly(fig)
print date_strings
print pfig['data'][0]['x']
# we use the same format here, so we expect equality here
self.assertEqual(pfig['data'][0]['x'], date_strings)
|
<commit_before><commit_msg>Add test for dates in mpl.<commit_after>from __future__ import absolute_import
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import datetime
from matplotlib.dates import date2num
import plotly.tools as tls
from unittest import TestCase
from plotly.tests.test_optional.optional_utils import compare_dict, run_fig
class TestDateTimes(TestCase):
def test_normal_mpl_dates(self):
datetime_format = '%Y-%m-%d %H:%M:%S'
y = [1, 2, 3, 4]
date_strings = ['2010-01-04 00:00:00',
'2010-01-04 10:00:00',
'2010-01-04 23:00:59',
'2010-01-05 00:00:00']
# 1. create datetimes from the strings
dates = [datetime.datetime.strptime(date_string, datetime_format)
for date_string in date_strings]
# 2. create the mpl_dates from these datetimes
mpl_dates = date2num(dates)
# make a figure in mpl
fig, ax = plt.subplots()
ax.plot_date(mpl_dates, y)
# convert this figure to plotly's graph_objs
pfig = tls.mpl_to_plotly(fig)
print date_strings
print pfig['data'][0]['x']
# we use the same format here, so we expect equality here
self.assertEqual(pfig['data'][0]['x'], date_strings)
|
|
0224b1eeeb3db78f5445e413d3c5a3a8c3c4bc57
|
tools/wf_generators/generate_parallel_wf.py
|
tools/wf_generators/generate_parallel_wf.py
|
#!/usr/bin/env python
import sys
try:
wf_name = str(sys.argv[1])
branch_cnt = int(sys.argv[2])
branch_depth = int(sys.argv[3])
add_join = len(sys.argv) > 4
except:
raise ValueError(
'Usage: <script_name> workflow_name'
' number_of_parallel_branches branch_depth add_join'
)
f = open('%s.mist' % wf_name, 'w')
# Writing a workflow header to the file.
f.write('---\n')
f.write("version: '2.0'\n\n")
f.write("%s:\n" % wf_name)
f.write(" tasks:\n")
# 1. First starting task.
f.write(" task_1:\n")
f.write(" action: std.noop\n")
f.write(" on-success:\n")
for branch_num in range(1, branch_cnt + 1):
f.write(" - task_%s_1\n" % branch_num)
# 2. Branch tasks.
for branch_num in range(1, branch_cnt + 1):
for task_num in range(1, branch_depth + 1):
f.write(" task_%s_%s:\n" % (branch_num, task_num))
f.write(" action: std.noop\n")
if task_num < branch_depth:
f.write(" on-success: task_%s_%s\n" % (branch_num, task_num + 1))
elif add_join:
f.write(" on-success: task_join\n")
# 3. The last "join" task, if needed.
if add_join:
f.write(" task_join:\n")
f.write(" join: all")
f.close()
print("Workflow '%s' is created." % wf_name)
|
Add a script that generates a test workflow with needed paralellism
|
Add a script that generates a test workflow with needed paralellism
Change-Id: I6b1087f95b88a881e074a0af4f6f8be6d9413bc7
|
Python
|
apache-2.0
|
openstack/mistral,openstack/mistral
|
Add a script that generates a test workflow with needed paralellism
Change-Id: I6b1087f95b88a881e074a0af4f6f8be6d9413bc7
|
#!/usr/bin/env python
import sys
try:
wf_name = str(sys.argv[1])
branch_cnt = int(sys.argv[2])
branch_depth = int(sys.argv[3])
add_join = len(sys.argv) > 4
except:
raise ValueError(
'Usage: <script_name> workflow_name'
' number_of_parallel_branches branch_depth add_join'
)
f = open('%s.mist' % wf_name, 'w')
# Writing a workflow header to the file.
f.write('---\n')
f.write("version: '2.0'\n\n")
f.write("%s:\n" % wf_name)
f.write(" tasks:\n")
# 1. First starting task.
f.write(" task_1:\n")
f.write(" action: std.noop\n")
f.write(" on-success:\n")
for branch_num in range(1, branch_cnt + 1):
f.write(" - task_%s_1\n" % branch_num)
# 2. Branch tasks.
for branch_num in range(1, branch_cnt + 1):
for task_num in range(1, branch_depth + 1):
f.write(" task_%s_%s:\n" % (branch_num, task_num))
f.write(" action: std.noop\n")
if task_num < branch_depth:
f.write(" on-success: task_%s_%s\n" % (branch_num, task_num + 1))
elif add_join:
f.write(" on-success: task_join\n")
# 3. The last "join" task, if needed.
if add_join:
f.write(" task_join:\n")
f.write(" join: all")
f.close()
print("Workflow '%s' is created." % wf_name)
|
<commit_before><commit_msg>Add a script that generates a test workflow with needed paralellism
Change-Id: I6b1087f95b88a881e074a0af4f6f8be6d9413bc7<commit_after>
|
#!/usr/bin/env python
import sys
try:
wf_name = str(sys.argv[1])
branch_cnt = int(sys.argv[2])
branch_depth = int(sys.argv[3])
add_join = len(sys.argv) > 4
except:
raise ValueError(
'Usage: <script_name> workflow_name'
' number_of_parallel_branches branch_depth add_join'
)
f = open('%s.mist' % wf_name, 'w')
# Writing a workflow header to the file.
f.write('---\n')
f.write("version: '2.0'\n\n")
f.write("%s:\n" % wf_name)
f.write(" tasks:\n")
# 1. First starting task.
f.write(" task_1:\n")
f.write(" action: std.noop\n")
f.write(" on-success:\n")
for branch_num in range(1, branch_cnt + 1):
f.write(" - task_%s_1\n" % branch_num)
# 2. Branch tasks.
for branch_num in range(1, branch_cnt + 1):
for task_num in range(1, branch_depth + 1):
f.write(" task_%s_%s:\n" % (branch_num, task_num))
f.write(" action: std.noop\n")
if task_num < branch_depth:
f.write(" on-success: task_%s_%s\n" % (branch_num, task_num + 1))
elif add_join:
f.write(" on-success: task_join\n")
# 3. The last "join" task, if needed.
if add_join:
f.write(" task_join:\n")
f.write(" join: all")
f.close()
print("Workflow '%s' is created." % wf_name)
|
Add a script that generates a test workflow with needed paralellism
Change-Id: I6b1087f95b88a881e074a0af4f6f8be6d9413bc7#!/usr/bin/env python
import sys
try:
wf_name = str(sys.argv[1])
branch_cnt = int(sys.argv[2])
branch_depth = int(sys.argv[3])
add_join = len(sys.argv) > 4
except:
raise ValueError(
'Usage: <script_name> workflow_name'
' number_of_parallel_branches branch_depth add_join'
)
f = open('%s.mist' % wf_name, 'w')
# Writing a workflow header to the file.
f.write('---\n')
f.write("version: '2.0'\n\n")
f.write("%s:\n" % wf_name)
f.write(" tasks:\n")
# 1. First starting task.
f.write(" task_1:\n")
f.write(" action: std.noop\n")
f.write(" on-success:\n")
for branch_num in range(1, branch_cnt + 1):
f.write(" - task_%s_1\n" % branch_num)
# 2. Branch tasks.
for branch_num in range(1, branch_cnt + 1):
for task_num in range(1, branch_depth + 1):
f.write(" task_%s_%s:\n" % (branch_num, task_num))
f.write(" action: std.noop\n")
if task_num < branch_depth:
f.write(" on-success: task_%s_%s\n" % (branch_num, task_num + 1))
elif add_join:
f.write(" on-success: task_join\n")
# 3. The last "join" task, if needed.
if add_join:
f.write(" task_join:\n")
f.write(" join: all")
f.close()
print("Workflow '%s' is created." % wf_name)
|
<commit_before><commit_msg>Add a script that generates a test workflow with needed paralellism
Change-Id: I6b1087f95b88a881e074a0af4f6f8be6d9413bc7<commit_after>#!/usr/bin/env python
import sys
try:
wf_name = str(sys.argv[1])
branch_cnt = int(sys.argv[2])
branch_depth = int(sys.argv[3])
add_join = len(sys.argv) > 4
except:
raise ValueError(
'Usage: <script_name> workflow_name'
' number_of_parallel_branches branch_depth add_join'
)
f = open('%s.mist' % wf_name, 'w')
# Writing a workflow header to the file.
f.write('---\n')
f.write("version: '2.0'\n\n")
f.write("%s:\n" % wf_name)
f.write(" tasks:\n")
# 1. First starting task.
f.write(" task_1:\n")
f.write(" action: std.noop\n")
f.write(" on-success:\n")
for branch_num in range(1, branch_cnt + 1):
f.write(" - task_%s_1\n" % branch_num)
# 2. Branch tasks.
for branch_num in range(1, branch_cnt + 1):
for task_num in range(1, branch_depth + 1):
f.write(" task_%s_%s:\n" % (branch_num, task_num))
f.write(" action: std.noop\n")
if task_num < branch_depth:
f.write(" on-success: task_%s_%s\n" % (branch_num, task_num + 1))
elif add_join:
f.write(" on-success: task_join\n")
# 3. The last "join" task, if needed.
if add_join:
f.write(" task_join:\n")
f.write(" join: all")
f.close()
print("Workflow '%s' is created." % wf_name)
|
|
65ca8f29f8e24307fb90c26a319f30f5b5ca4bee
|
scripts/check_run_dir_file_read.py
|
scripts/check_run_dir_file_read.py
|
import argparse
import sys
from os import listdir
from os.path import join
from pymagicc.io import read_cfg_file, MAGICCData
def print_summary(cannot_read, ignored, dir_to_check):
if cannot_read:
print(
"Can't read the following files in {}:\n{}".format(
dir_to_check, "\n".join(cannot_read)
)
)
else:
print("Can read all files in {}".format(dir_to_check))
print("\n\nIgnored:\n{}".format("\n".join(ignored)))
def test_can_read_all_files_in_magicc_dir(dir_to_check):
cannot_read = []
ignored = []
for file_to_read in listdir(dir_to_check):
try:
if file_to_read.endswith((".exe", ".mod", ".mat", ".m")):
ignored.append(file_to_read)
elif file_to_read.endswith(".CFG"):
read_cfg_file(join(dir_to_check, file_to_read))
else:
mdata = MAGICCData()
mdata.read(dir_to_check, file_to_read)
except:
cannot_read.append(file_to_read)
print_summary(cannot_read, ignored, dir_to_check)
def main():
parser = argparse.ArgumentParser(
prog="check-run-dir-file-read",
description="Check which files in a "
"directory can be read by "
"pymagicc's tools",
)
parser.add_argument(
"readdir", help="The folder where the files to read are located"
)
args = parser.parse_args()
test_can_read_all_files_in_magicc_dir(args.readdir)
if __name__ == "__main__":
main()
|
Add check run dir file read in script
|
Add check run dir file read in script
|
Python
|
agpl-3.0
|
openclimatedata/pymagicc,openclimatedata/pymagicc
|
Add check run dir file read in script
|
import argparse
import sys
from os import listdir
from os.path import join
from pymagicc.io import read_cfg_file, MAGICCData
def print_summary(cannot_read, ignored, dir_to_check):
if cannot_read:
print(
"Can't read the following files in {}:\n{}".format(
dir_to_check, "\n".join(cannot_read)
)
)
else:
print("Can read all files in {}".format(dir_to_check))
print("\n\nIgnored:\n{}".format("\n".join(ignored)))
def test_can_read_all_files_in_magicc_dir(dir_to_check):
cannot_read = []
ignored = []
for file_to_read in listdir(dir_to_check):
try:
if file_to_read.endswith((".exe", ".mod", ".mat", ".m")):
ignored.append(file_to_read)
elif file_to_read.endswith(".CFG"):
read_cfg_file(join(dir_to_check, file_to_read))
else:
mdata = MAGICCData()
mdata.read(dir_to_check, file_to_read)
except:
cannot_read.append(file_to_read)
print_summary(cannot_read, ignored, dir_to_check)
def main():
parser = argparse.ArgumentParser(
prog="check-run-dir-file-read",
description="Check which files in a "
"directory can be read by "
"pymagicc's tools",
)
parser.add_argument(
"readdir", help="The folder where the files to read are located"
)
args = parser.parse_args()
test_can_read_all_files_in_magicc_dir(args.readdir)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add check run dir file read in script<commit_after>
|
import argparse
import sys
from os import listdir
from os.path import join
from pymagicc.io import read_cfg_file, MAGICCData
def print_summary(cannot_read, ignored, dir_to_check):
if cannot_read:
print(
"Can't read the following files in {}:\n{}".format(
dir_to_check, "\n".join(cannot_read)
)
)
else:
print("Can read all files in {}".format(dir_to_check))
print("\n\nIgnored:\n{}".format("\n".join(ignored)))
def test_can_read_all_files_in_magicc_dir(dir_to_check):
cannot_read = []
ignored = []
for file_to_read in listdir(dir_to_check):
try:
if file_to_read.endswith((".exe", ".mod", ".mat", ".m")):
ignored.append(file_to_read)
elif file_to_read.endswith(".CFG"):
read_cfg_file(join(dir_to_check, file_to_read))
else:
mdata = MAGICCData()
mdata.read(dir_to_check, file_to_read)
except:
cannot_read.append(file_to_read)
print_summary(cannot_read, ignored, dir_to_check)
def main():
parser = argparse.ArgumentParser(
prog="check-run-dir-file-read",
description="Check which files in a "
"directory can be read by "
"pymagicc's tools",
)
parser.add_argument(
"readdir", help="The folder where the files to read are located"
)
args = parser.parse_args()
test_can_read_all_files_in_magicc_dir(args.readdir)
if __name__ == "__main__":
main()
|
Add check run dir file read in scriptimport argparse
import sys
from os import listdir
from os.path import join
from pymagicc.io import read_cfg_file, MAGICCData
def print_summary(cannot_read, ignored, dir_to_check):
if cannot_read:
print(
"Can't read the following files in {}:\n{}".format(
dir_to_check, "\n".join(cannot_read)
)
)
else:
print("Can read all files in {}".format(dir_to_check))
print("\n\nIgnored:\n{}".format("\n".join(ignored)))
def test_can_read_all_files_in_magicc_dir(dir_to_check):
cannot_read = []
ignored = []
for file_to_read in listdir(dir_to_check):
try:
if file_to_read.endswith((".exe", ".mod", ".mat", ".m")):
ignored.append(file_to_read)
elif file_to_read.endswith(".CFG"):
read_cfg_file(join(dir_to_check, file_to_read))
else:
mdata = MAGICCData()
mdata.read(dir_to_check, file_to_read)
except:
cannot_read.append(file_to_read)
print_summary(cannot_read, ignored, dir_to_check)
def main():
parser = argparse.ArgumentParser(
prog="check-run-dir-file-read",
description="Check which files in a "
"directory can be read by "
"pymagicc's tools",
)
parser.add_argument(
"readdir", help="The folder where the files to read are located"
)
args = parser.parse_args()
test_can_read_all_files_in_magicc_dir(args.readdir)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add check run dir file read in script<commit_after>import argparse
import sys
from os import listdir
from os.path import join
from pymagicc.io import read_cfg_file, MAGICCData
def print_summary(cannot_read, ignored, dir_to_check):
if cannot_read:
print(
"Can't read the following files in {}:\n{}".format(
dir_to_check, "\n".join(cannot_read)
)
)
else:
print("Can read all files in {}".format(dir_to_check))
print("\n\nIgnored:\n{}".format("\n".join(ignored)))
def test_can_read_all_files_in_magicc_dir(dir_to_check):
cannot_read = []
ignored = []
for file_to_read in listdir(dir_to_check):
try:
if file_to_read.endswith((".exe", ".mod", ".mat", ".m")):
ignored.append(file_to_read)
elif file_to_read.endswith(".CFG"):
read_cfg_file(join(dir_to_check, file_to_read))
else:
mdata = MAGICCData()
mdata.read(dir_to_check, file_to_read)
except:
cannot_read.append(file_to_read)
print_summary(cannot_read, ignored, dir_to_check)
def main():
parser = argparse.ArgumentParser(
prog="check-run-dir-file-read",
description="Check which files in a "
"directory can be read by "
"pymagicc's tools",
)
parser.add_argument(
"readdir", help="The folder where the files to read are located"
)
args = parser.parse_args()
test_can_read_all_files_in_magicc_dir(args.readdir)
if __name__ == "__main__":
main()
|
|
5f1fc4687e463632049ea889a8fc566839201a8a
|
crawler/migrations/0002_auto_20160826_1216.py
|
crawler/migrations/0002_auto_20160826_1216.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-26 12:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crawler', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Palavras_Ignorar',
new_name='Ignore_Words',
),
]
|
Add missing migration - that created bug, where table had wrong name
|
Add missing migration - that created bug, where table had wrong name
|
Python
|
mit
|
lucasgr7/silverplate,lucasgr7/silverplate,lucasgr7/silverplate
|
Add missing migration - that created bug, where table had wrong name
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-26 12:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crawler', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Palavras_Ignorar',
new_name='Ignore_Words',
),
]
|
<commit_before><commit_msg>Add missing migration - that created bug, where table had wrong name<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-26 12:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crawler', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Palavras_Ignorar',
new_name='Ignore_Words',
),
]
|
Add missing migration - that created bug, where table had wrong name# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-26 12:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crawler', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Palavras_Ignorar',
new_name='Ignore_Words',
),
]
|
<commit_before><commit_msg>Add missing migration - that created bug, where table had wrong name<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-26 12:16
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('crawler', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Palavras_Ignorar',
new_name='Ignore_Words',
),
]
|
|
7bd2df1ebfa02603a660c75cc13adcd35a326b68
|
ALE/print_pa_table.py
|
ALE/print_pa_table.py
|
#for a directory of FASTA gene family files, compute a gene presence-absence (or Count-type) table
from __future__ import print_function
import os, re
to_check = [file for file in os.listdir("fasta/") if file.endswith(".fa")]
#first obtain a complete list of the species. This is an appalling inefficient "algorithm"
species = {}
for file in to_check:
inh = open("fasta/" + file)
for line in inh:
if line.startswith(">"):
fields = re.split("_", line[1:])
sp = fields[0]
species[sp] = 1
#...and now, read the sequence files one at a time and print out a line corresponding to the count profile
splist = species.keys()
print("Family\t", end = '')
for element in splist:
print(element + "\t", end='')
print("\n", end = '')
for file in to_check:
file_count = {}
inh = open("fasta/" + file)
for line in inh:
fields = re.split("_", line[1:])
if fields[0] in file_count:
file_count[fields[0]] += 1
else:
file_count[fields[0]] = 1
print(file + "\t", end = '')
for sp in splist:
if sp in file_count:
print(str(file_count[sp]) + "\t", end = '')
else:
print("0\t", end = '')
print("\n", end = '')
|
Print a presence/absence table from a directory of FASTA-formatted sequence files
|
Print a presence/absence table from a directory of FASTA-formatted sequence files
|
Python
|
mit
|
Tancata/phylo,Tancata/phylo
|
Print a presence/absence table from a directory of FASTA-formatted sequence files
|
#for a directory of FASTA gene family files, compute a gene presence-absence (or Count-type) table
from __future__ import print_function
import os, re
to_check = [file for file in os.listdir("fasta/") if file.endswith(".fa")]
#first obtain a complete list of the species. This is an appalling inefficient "algorithm"
species = {}
for file in to_check:
inh = open("fasta/" + file)
for line in inh:
if line.startswith(">"):
fields = re.split("_", line[1:])
sp = fields[0]
species[sp] = 1
#...and now, read the sequence files one at a time and print out a line corresponding to the count profile
splist = species.keys()
print("Family\t", end = '')
for element in splist:
print(element + "\t", end='')
print("\n", end = '')
for file in to_check:
file_count = {}
inh = open("fasta/" + file)
for line in inh:
fields = re.split("_", line[1:])
if fields[0] in file_count:
file_count[fields[0]] += 1
else:
file_count[fields[0]] = 1
print(file + "\t", end = '')
for sp in splist:
if sp in file_count:
print(str(file_count[sp]) + "\t", end = '')
else:
print("0\t", end = '')
print("\n", end = '')
|
<commit_before><commit_msg>Print a presence/absence table from a directory of FASTA-formatted sequence files<commit_after>
|
#for a directory of FASTA gene family files, compute a gene presence-absence (or Count-type) table
from __future__ import print_function
import os, re
to_check = [file for file in os.listdir("fasta/") if file.endswith(".fa")]
#first obtain a complete list of the species. This is an appalling inefficient "algorithm"
species = {}
for file in to_check:
inh = open("fasta/" + file)
for line in inh:
if line.startswith(">"):
fields = re.split("_", line[1:])
sp = fields[0]
species[sp] = 1
#...and now, read the sequence files one at a time and print out a line corresponding to the count profile
splist = species.keys()
print("Family\t", end = '')
for element in splist:
print(element + "\t", end='')
print("\n", end = '')
for file in to_check:
file_count = {}
inh = open("fasta/" + file)
for line in inh:
fields = re.split("_", line[1:])
if fields[0] in file_count:
file_count[fields[0]] += 1
else:
file_count[fields[0]] = 1
print(file + "\t", end = '')
for sp in splist:
if sp in file_count:
print(str(file_count[sp]) + "\t", end = '')
else:
print("0\t", end = '')
print("\n", end = '')
|
Print a presence/absence table from a directory of FASTA-formatted sequence files#for a directory of FASTA gene family files, compute a gene presence-absence (or Count-type) table
from __future__ import print_function
import os, re
to_check = [file for file in os.listdir("fasta/") if file.endswith(".fa")]
#first obtain a complete list of the species. This is an appalling inefficient "algorithm"
species = {}
for file in to_check:
inh = open("fasta/" + file)
for line in inh:
if line.startswith(">"):
fields = re.split("_", line[1:])
sp = fields[0]
species[sp] = 1
#...and now, read the sequence files one at a time and print out a line corresponding to the count profile
splist = species.keys()
print("Family\t", end = '')
for element in splist:
print(element + "\t", end='')
print("\n", end = '')
for file in to_check:
file_count = {}
inh = open("fasta/" + file)
for line in inh:
fields = re.split("_", line[1:])
if fields[0] in file_count:
file_count[fields[0]] += 1
else:
file_count[fields[0]] = 1
print(file + "\t", end = '')
for sp in splist:
if sp in file_count:
print(str(file_count[sp]) + "\t", end = '')
else:
print("0\t", end = '')
print("\n", end = '')
|
<commit_before><commit_msg>Print a presence/absence table from a directory of FASTA-formatted sequence files<commit_after>#for a directory of FASTA gene family files, compute a gene presence-absence (or Count-type) table
from __future__ import print_function
import os, re
to_check = [file for file in os.listdir("fasta/") if file.endswith(".fa")]
#first obtain a complete list of the species. This is an appalling inefficient "algorithm"
species = {}
for file in to_check:
inh = open("fasta/" + file)
for line in inh:
if line.startswith(">"):
fields = re.split("_", line[1:])
sp = fields[0]
species[sp] = 1
#...and now, read the sequence files one at a time and print out a line corresponding to the count profile
splist = species.keys()
print("Family\t", end = '')
for element in splist:
print(element + "\t", end='')
print("\n", end = '')
for file in to_check:
file_count = {}
inh = open("fasta/" + file)
for line in inh:
fields = re.split("_", line[1:])
if fields[0] in file_count:
file_count[fields[0]] += 1
else:
file_count[fields[0]] = 1
print(file + "\t", end = '')
for sp in splist:
if sp in file_count:
print(str(file_count[sp]) + "\t", end = '')
else:
print("0\t", end = '')
print("\n", end = '')
|
|
bf11946afbac3a10b6a7ae502c81f7016c472719
|
social_auth/fields.py
|
social_auth/fields.py
|
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import simplejson
class JSONField(models.TextField):
"""Simple JSON field that stores python structures as JSON strings
on database.
"""
__metaclass__ = models.SubfieldBase
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if self.blank and not value:
return None
if isinstance(value, basestring):
try:
return simplejson.loads(value)
except Exception, e:
raise ValidationError(str(e))
else:
return value
def validate(self, value, model_instance):
"""Check value is a valid JSON string, raise ValidationError on
error."""
super(JSONField, self).validate(value, model_instance)
try:
return simplejson.loads(value)
except Exception, e:
raise ValidationError(str(e))
def get_db_prep_save(self, value):
"""Convert value to JSON string before save"""
try:
value = simplejson.dumps(value)
except Exception, e:
raise ValidationError(str(e))
return super(JSONField, self).get_db_prep_save(value)
|
Store extra data in JSON format, also add method to extend extra values to store. Closes gh-30
|
Store extra data in JSON format, also add method to extend extra values to store. Closes gh-30
|
Python
|
bsd-3-clause
|
qas612820704/django-social-auth,sk7/django-social-auth,limdauto/django-social-auth,mayankcu/Django-social,thesealion/django-social-auth,qas612820704/django-social-auth,gustavoam/django-social-auth,getsentry/django-social-auth,lovehhf/django-social-auth,MjAbuz/django-social-auth,MjAbuz/django-social-auth,michael-borisov/django-social-auth,dongguangming/django-social-auth,caktus/django-social-auth,1st/django-social-auth,czpython/django-social-auth,beswarm/django-social-auth,omab/django-social-auth,vuchau/django-social-auth,gustavoam/django-social-auth,WW-Digital/django-social-auth,limdauto/django-social-auth,beswarm/django-social-auth,duoduo369/django-social-auth,dongguangming/django-social-auth,vuchau/django-social-auth,vxvinh1511/django-social-auth,VishvajitP/django-social-auth,vxvinh1511/django-social-auth,brianmckinneyrocks/django-social-auth,thesealion/django-social-auth,brianmckinneyrocks/django-social-auth,lovehhf/django-social-auth,omab/django-social-auth,VishvajitP/django-social-auth,adw0rd/django-social-auth,antoviaque/django-social-auth-norel,krvss/django-social-auth,caktus/django-social-auth,michael-borisov/django-social-auth
|
Store extra data in JSON format, also add method to extend extra values to store. Closes gh-30
|
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import simplejson
class JSONField(models.TextField):
"""Simple JSON field that stores python structures as JSON strings
on database.
"""
__metaclass__ = models.SubfieldBase
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if self.blank and not value:
return None
if isinstance(value, basestring):
try:
return simplejson.loads(value)
except Exception, e:
raise ValidationError(str(e))
else:
return value
def validate(self, value, model_instance):
"""Check value is a valid JSON string, raise ValidationError on
error."""
super(JSONField, self).validate(value, model_instance)
try:
return simplejson.loads(value)
except Exception, e:
raise ValidationError(str(e))
def get_db_prep_save(self, value):
"""Convert value to JSON string before save"""
try:
value = simplejson.dumps(value)
except Exception, e:
raise ValidationError(str(e))
return super(JSONField, self).get_db_prep_save(value)
|
<commit_before><commit_msg>Store extra data in JSON format, also add method to extend extra values to store. Closes gh-30<commit_after>
|
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import simplejson
class JSONField(models.TextField):
"""Simple JSON field that stores python structures as JSON strings
on database.
"""
__metaclass__ = models.SubfieldBase
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if self.blank and not value:
return None
if isinstance(value, basestring):
try:
return simplejson.loads(value)
except Exception, e:
raise ValidationError(str(e))
else:
return value
def validate(self, value, model_instance):
"""Check value is a valid JSON string, raise ValidationError on
error."""
super(JSONField, self).validate(value, model_instance)
try:
return simplejson.loads(value)
except Exception, e:
raise ValidationError(str(e))
def get_db_prep_save(self, value):
"""Convert value to JSON string before save"""
try:
value = simplejson.dumps(value)
except Exception, e:
raise ValidationError(str(e))
return super(JSONField, self).get_db_prep_save(value)
|
Store extra data in JSON format, also add method to extend extra values to store. Closes gh-30from django.core.exceptions import ValidationError
from django.db import models
from django.utils import simplejson
class JSONField(models.TextField):
"""Simple JSON field that stores python structures as JSON strings
on database.
"""
__metaclass__ = models.SubfieldBase
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if self.blank and not value:
return None
if isinstance(value, basestring):
try:
return simplejson.loads(value)
except Exception, e:
raise ValidationError(str(e))
else:
return value
def validate(self, value, model_instance):
"""Check value is a valid JSON string, raise ValidationError on
error."""
super(JSONField, self).validate(value, model_instance)
try:
return simplejson.loads(value)
except Exception, e:
raise ValidationError(str(e))
def get_db_prep_save(self, value):
"""Convert value to JSON string before save"""
try:
value = simplejson.dumps(value)
except Exception, e:
raise ValidationError(str(e))
return super(JSONField, self).get_db_prep_save(value)
|
<commit_before><commit_msg>Store extra data in JSON format, also add method to extend extra values to store. Closes gh-30<commit_after>from django.core.exceptions import ValidationError
from django.db import models
from django.utils import simplejson
class JSONField(models.TextField):
"""Simple JSON field that stores python structures as JSON strings
on database.
"""
__metaclass__ = models.SubfieldBase
def to_python(self, value):
"""
Convert the input JSON value into python structures, raises
django.core.exceptions.ValidationError if the data can't be converted.
"""
if self.blank and not value:
return None
if isinstance(value, basestring):
try:
return simplejson.loads(value)
except Exception, e:
raise ValidationError(str(e))
else:
return value
def validate(self, value, model_instance):
"""Check value is a valid JSON string, raise ValidationError on
error."""
super(JSONField, self).validate(value, model_instance)
try:
return simplejson.loads(value)
except Exception, e:
raise ValidationError(str(e))
def get_db_prep_save(self, value):
"""Convert value to JSON string before save"""
try:
value = simplejson.dumps(value)
except Exception, e:
raise ValidationError(str(e))
return super(JSONField, self).get_db_prep_save(value)
|
|
4c8216129ee655026e4c0e8c3781645017969614
|
etc/wpt_result_analyzer.py
|
etc/wpt_result_analyzer.py
|
#!/usr/bin/env python
# Copyright 2019 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import configparser
import os
test_root = os.path.join('tests', 'wpt', 'web-platform-tests')
meta_root = os.path.join('tests', 'wpt', 'metadata')
test_counts = {}
meta_counts = {}
for base_dir, dir_names, files in os.walk(test_root):
if base_dir == test_root:
continue
rel_base = os.path.relpath(base_dir, test_root)
if not os.path.exists(os.path.join(meta_root, rel_base)):
continue
test_files = []
exts = ['.html', '.htm', '.xht', '.xhtml', '.window.js', '.worker.js', '.any.js']
for f in files:
for ext in exts:
if f.endswith(ext):
test_files += [f]
test_counts[rel_base] = len(test_files)
for base_dir, dir_names, files in os.walk(meta_root):
if base_dir == meta_root:
continue
rel_base = os.path.relpath(base_dir, meta_root)
num_files = len(files)
if '__dir__.ini' in files:
num_files -= 1
meta_counts[rel_base] = num_files
final_counts = []
for (test_dir, test_count) in test_counts.items():
if not test_count:
continue
meta_count = meta_counts.get(test_dir, 0)
final_counts += [(test_dir, test_count, meta_count)]
print('Test counts')
print('dir: %% failed (num tests / num failures)')
s = sorted(final_counts, key=lambda x: x[2] / x[1])
for (test_dir, test_count, meta_count) in reversed(sorted(s, key=lambda x: x[2])):
if not meta_count:
continue
print('%s: %.2f%% (%d / %d)' % (test_dir, meta_count / test_count * 100, test_count, meta_count))
|
Add script to summarize WPT test directories with failing tests.
|
Add script to summarize WPT test directories with failing tests.
|
Python
|
mpl-2.0
|
splav/servo,splav/servo,KiChjang/servo,DominoTree/servo,splav/servo,splav/servo,KiChjang/servo,DominoTree/servo,DominoTree/servo,DominoTree/servo,DominoTree/servo,splav/servo,DominoTree/servo,KiChjang/servo,KiChjang/servo,splav/servo,KiChjang/servo,KiChjang/servo,KiChjang/servo,DominoTree/servo,DominoTree/servo,DominoTree/servo,splav/servo,KiChjang/servo,DominoTree/servo,splav/servo,splav/servo,KiChjang/servo,KiChjang/servo,splav/servo
|
Add script to summarize WPT test directories with failing tests.
|
#!/usr/bin/env python
# Copyright 2019 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import configparser
import os
test_root = os.path.join('tests', 'wpt', 'web-platform-tests')
meta_root = os.path.join('tests', 'wpt', 'metadata')
test_counts = {}
meta_counts = {}
for base_dir, dir_names, files in os.walk(test_root):
if base_dir == test_root:
continue
rel_base = os.path.relpath(base_dir, test_root)
if not os.path.exists(os.path.join(meta_root, rel_base)):
continue
test_files = []
exts = ['.html', '.htm', '.xht', '.xhtml', '.window.js', '.worker.js', '.any.js']
for f in files:
for ext in exts:
if f.endswith(ext):
test_files += [f]
test_counts[rel_base] = len(test_files)
for base_dir, dir_names, files in os.walk(meta_root):
if base_dir == meta_root:
continue
rel_base = os.path.relpath(base_dir, meta_root)
num_files = len(files)
if '__dir__.ini' in files:
num_files -= 1
meta_counts[rel_base] = num_files
final_counts = []
for (test_dir, test_count) in test_counts.items():
if not test_count:
continue
meta_count = meta_counts.get(test_dir, 0)
final_counts += [(test_dir, test_count, meta_count)]
print('Test counts')
print('dir: %% failed (num tests / num failures)')
s = sorted(final_counts, key=lambda x: x[2] / x[1])
for (test_dir, test_count, meta_count) in reversed(sorted(s, key=lambda x: x[2])):
if not meta_count:
continue
print('%s: %.2f%% (%d / %d)' % (test_dir, meta_count / test_count * 100, test_count, meta_count))
|
<commit_before><commit_msg>Add script to summarize WPT test directories with failing tests.<commit_after>
|
#!/usr/bin/env python
# Copyright 2019 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import configparser
import os
test_root = os.path.join('tests', 'wpt', 'web-platform-tests')
meta_root = os.path.join('tests', 'wpt', 'metadata')
test_counts = {}
meta_counts = {}
for base_dir, dir_names, files in os.walk(test_root):
if base_dir == test_root:
continue
rel_base = os.path.relpath(base_dir, test_root)
if not os.path.exists(os.path.join(meta_root, rel_base)):
continue
test_files = []
exts = ['.html', '.htm', '.xht', '.xhtml', '.window.js', '.worker.js', '.any.js']
for f in files:
for ext in exts:
if f.endswith(ext):
test_files += [f]
test_counts[rel_base] = len(test_files)
for base_dir, dir_names, files in os.walk(meta_root):
if base_dir == meta_root:
continue
rel_base = os.path.relpath(base_dir, meta_root)
num_files = len(files)
if '__dir__.ini' in files:
num_files -= 1
meta_counts[rel_base] = num_files
final_counts = []
for (test_dir, test_count) in test_counts.items():
if not test_count:
continue
meta_count = meta_counts.get(test_dir, 0)
final_counts += [(test_dir, test_count, meta_count)]
print('Test counts')
print('dir: %% failed (num tests / num failures)')
s = sorted(final_counts, key=lambda x: x[2] / x[1])
for (test_dir, test_count, meta_count) in reversed(sorted(s, key=lambda x: x[2])):
if not meta_count:
continue
print('%s: %.2f%% (%d / %d)' % (test_dir, meta_count / test_count * 100, test_count, meta_count))
|
Add script to summarize WPT test directories with failing tests.#!/usr/bin/env python
# Copyright 2019 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import configparser
import os
test_root = os.path.join('tests', 'wpt', 'web-platform-tests')
meta_root = os.path.join('tests', 'wpt', 'metadata')
test_counts = {}
meta_counts = {}
for base_dir, dir_names, files in os.walk(test_root):
if base_dir == test_root:
continue
rel_base = os.path.relpath(base_dir, test_root)
if not os.path.exists(os.path.join(meta_root, rel_base)):
continue
test_files = []
exts = ['.html', '.htm', '.xht', '.xhtml', '.window.js', '.worker.js', '.any.js']
for f in files:
for ext in exts:
if f.endswith(ext):
test_files += [f]
test_counts[rel_base] = len(test_files)
for base_dir, dir_names, files in os.walk(meta_root):
if base_dir == meta_root:
continue
rel_base = os.path.relpath(base_dir, meta_root)
num_files = len(files)
if '__dir__.ini' in files:
num_files -= 1
meta_counts[rel_base] = num_files
final_counts = []
for (test_dir, test_count) in test_counts.items():
if not test_count:
continue
meta_count = meta_counts.get(test_dir, 0)
final_counts += [(test_dir, test_count, meta_count)]
print('Test counts')
print('dir: %% failed (num tests / num failures)')
s = sorted(final_counts, key=lambda x: x[2] / x[1])
for (test_dir, test_count, meta_count) in reversed(sorted(s, key=lambda x: x[2])):
if not meta_count:
continue
print('%s: %.2f%% (%d / %d)' % (test_dir, meta_count / test_count * 100, test_count, meta_count))
|
<commit_before><commit_msg>Add script to summarize WPT test directories with failing tests.<commit_after>#!/usr/bin/env python
# Copyright 2019 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import configparser
import os
test_root = os.path.join('tests', 'wpt', 'web-platform-tests')
meta_root = os.path.join('tests', 'wpt', 'metadata')
test_counts = {}
meta_counts = {}
for base_dir, dir_names, files in os.walk(test_root):
if base_dir == test_root:
continue
rel_base = os.path.relpath(base_dir, test_root)
if not os.path.exists(os.path.join(meta_root, rel_base)):
continue
test_files = []
exts = ['.html', '.htm', '.xht', '.xhtml', '.window.js', '.worker.js', '.any.js']
for f in files:
for ext in exts:
if f.endswith(ext):
test_files += [f]
test_counts[rel_base] = len(test_files)
for base_dir, dir_names, files in os.walk(meta_root):
if base_dir == meta_root:
continue
rel_base = os.path.relpath(base_dir, meta_root)
num_files = len(files)
if '__dir__.ini' in files:
num_files -= 1
meta_counts[rel_base] = num_files
final_counts = []
for (test_dir, test_count) in test_counts.items():
if not test_count:
continue
meta_count = meta_counts.get(test_dir, 0)
final_counts += [(test_dir, test_count, meta_count)]
print('Test counts')
print('dir: %% failed (num tests / num failures)')
s = sorted(final_counts, key=lambda x: x[2] / x[1])
for (test_dir, test_count, meta_count) in reversed(sorted(s, key=lambda x: x[2])):
if not meta_count:
continue
print('%s: %.2f%% (%d / %d)' % (test_dir, meta_count / test_count * 100, test_count, meta_count))
|
|
a48774d7279df4148df9dcfdedaefdd0689373ba
|
scripts/migrate_piwik_derived_nodes.py
|
scripts/migrate_piwik_derived_nodes.py
|
"""
"""
import logging
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)
))
from modularodm.query.querydialect import DefaultQueryDialect as Q
from website.app import init_app
from website.models import Node
logger = logging.getLogger('root')
app = init_app('website.settings', set_backends=True, routes=True)
def copied_from_ancestor(node, attribute):
parent = getattr(node, attribute)
if (
parent
and parent.piwik_site_id is not None
and parent.piwik_site_id == node.piwik_site_id
):
return True
if parent:
copied_from_ancestor(parent, attribute)
return False
print "=== Registrations ==="
for node in Node.find(Q('is_registration', 'eq', True)):
if copied_from_ancestor(node, 'registered_from'):
node.piwik_site_id = None
node.save()
print(node._id)
print "=== Forks ==="
for node in Node.find(Q('is_fork', 'eq', True)):
if copied_from_ancestor(node, 'forked_from'):
node.piwik_site_id = None
node.save()
print(node._id)
|
Add migration script for nodes with incorrect piwik_site_id set.
|
Add migration script for nodes with incorrect piwik_site_id set.
|
Python
|
apache-2.0
|
haoyuchen1992/osf.io,GaryKriebel/osf.io,KAsante95/osf.io,SSJohns/osf.io,kwierman/osf.io,sbt9uc/osf.io,njantrania/osf.io,Johnetordoff/osf.io,chennan47/osf.io,HarryRybacki/osf.io,cslzchen/osf.io,caseyrygt/osf.io,bdyetton/prettychart,amyshi188/osf.io,wearpants/osf.io,Nesiehr/osf.io,caseyrollins/osf.io,jeffreyliu3230/osf.io,dplorimer/osf,zkraime/osf.io,KAsante95/osf.io,reinaH/osf.io,saradbowman/osf.io,acshi/osf.io,jmcarp/osf.io,cldershem/osf.io,leb2dg/osf.io,SSJohns/osf.io,fabianvf/osf.io,binoculars/osf.io,barbour-em/osf.io,mluke93/osf.io,revanthkolli/osf.io,ZobairAlijan/osf.io,TomHeatwole/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,DanielSBrown/osf.io,petermalcolm/osf.io,HalcyonChimera/osf.io,asanfilippo7/osf.io,bdyetton/prettychart,cldershem/osf.io,alexschiller/osf.io,ZobairAlijan/osf.io,baylee-d/osf.io,barbour-em/osf.io,leb2dg/osf.io,amyshi188/osf.io,himanshuo/osf.io,HarryRybacki/osf.io,DanielSBrown/osf.io,baylee-d/osf.io,ckc6cz/osf.io,jnayak1/osf.io,monikagrabowska/osf.io,brandonPurvis/osf.io,himanshuo/osf.io,fabianvf/osf.io,mattclark/osf.io,zkraime/osf.io,samanehsan/osf.io,pattisdr/osf.io,danielneis/osf.io,mfraezz/osf.io,chrisseto/osf.io,MerlinZhang/osf.io,ticklemepierce/osf.io,mluke93/osf.io,petermalcolm/osf.io,monikagrabowska/osf.io,mfraezz/osf.io,zkraime/osf.io,jolene-esposito/osf.io,Nesiehr/osf.io,dplorimer/osf,sbt9uc/osf.io,kch8qx/osf.io,rdhyee/osf.io,RomanZWang/osf.io,chrisseto/osf.io,jinluyuan/osf.io,caseyrollins/osf.io,GageGaskins/osf.io,aaxelb/osf.io,arpitar/osf.io,brandonPurvis/osf.io,crcresearch/osf.io,caseyrollins/osf.io,mluke93/osf.io,brianjgeiger/osf.io,GaryKriebel/osf.io,hmoco/osf.io,Nesiehr/osf.io,jnayak1/osf.io,kwierman/osf.io,mluo613/osf.io,asanfilippo7/osf.io,KAsante95/osf.io,Ghalko/osf.io,cwisecarver/osf.io,kwierman/osf.io,samanehsan/osf.io,jmcarp/osf.io,leb2dg/osf.io,njantrania/osf.io,doublebits/osf.io,caseyrygt/osf.io,felliott/osf.io,pattisdr/osf.io,petermalcolm/osf.io,jeffreyliu3230/osf.io,asanfilippo7/osf.io,samchrisinger/osf.io,njantrania/osf.io,brandonPurvis/osf.io,leb2dg/osf.io,brianjgeiger/osf.io,jnayak1/osf.io,KAsante95/osf.io,ZobairAlijan/osf.io,jmcarp/osf.io,pattisdr/osf.io,alexschiller/osf.io,rdhyee/osf.io,kushG/osf.io,GageGaskins/osf.io,rdhyee/osf.io,dplorimer/osf,GageGaskins/osf.io,cldershem/osf.io,DanielSBrown/osf.io,erinspace/osf.io,aaxelb/osf.io,chennan47/osf.io,bdyetton/prettychart,billyhunt/osf.io,mattclark/osf.io,GageGaskins/osf.io,Ghalko/osf.io,alexschiller/osf.io,himanshuo/osf.io,AndrewSallans/osf.io,caneruguz/osf.io,jinluyuan/osf.io,jolene-esposito/osf.io,monikagrabowska/osf.io,adlius/osf.io,jmcarp/osf.io,acshi/osf.io,ckc6cz/osf.io,kwierman/osf.io,saradbowman/osf.io,TomBaxter/osf.io,danielneis/osf.io,mattclark/osf.io,adlius/osf.io,lamdnhan/osf.io,mluo613/osf.io,RomanZWang/osf.io,bdyetton/prettychart,danielneis/osf.io,rdhyee/osf.io,cosenal/osf.io,kushG/osf.io,ZobairAlijan/osf.io,reinaH/osf.io,binoculars/osf.io,cwisecarver/osf.io,himanshuo/osf.io,DanielSBrown/osf.io,laurenrevere/osf.io,adlius/osf.io,lamdnhan/osf.io,abought/osf.io,felliott/osf.io,amyshi188/osf.io,caneruguz/osf.io,revanthkolli/osf.io,sloria/osf.io,zamattiac/osf.io,jolene-esposito/osf.io,lyndsysimon/osf.io,kch8qx/osf.io,ticklemepierce/osf.io,GageGaskins/osf.io,kch8qx/osf.io,sloria/osf.io,TomHeatwole/osf.io,acshi/osf.io,kushG/osf.io,zachjanicki/osf.io,cwisecarver/osf.io,chrisseto/osf.io,Johnetordoff/osf.io,dplorimer/osf,alexschiller/osf.io,jnayak1/osf.io,CenterForOpenScience/osf.io,felliott/osf.io,MerlinZhang/osf.io,emetsger/osf.io,abought/osf.io,acshi/osf.io,zachjanicki/osf.io,HalcyonChimera/osf.io,samanehsan/osf.io,TomHeatwole/osf.io,doublebits/osf.io,revanthkolli/osf.io,ticklemepierce/osf.io,laurenrevere/osf.io,abought/osf.io,doublebits/osf.io,samchrisinger/osf.io,cosenal/osf.io,billyhunt/osf.io,jolene-esposito/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,icereval/osf.io,zamattiac/osf.io,GaryKriebel/osf.io,lyndsysimon/osf.io,erinspace/osf.io,cwisecarver/osf.io,emetsger/osf.io,chrisseto/osf.io,kushG/osf.io,brandonPurvis/osf.io,mluo613/osf.io,brianjgeiger/osf.io,hmoco/osf.io,mluo613/osf.io,danielneis/osf.io,TomBaxter/osf.io,caseyrygt/osf.io,sloria/osf.io,zamattiac/osf.io,cosenal/osf.io,icereval/osf.io,zamattiac/osf.io,zachjanicki/osf.io,zachjanicki/osf.io,revanthkolli/osf.io,felliott/osf.io,billyhunt/osf.io,kch8qx/osf.io,lamdnhan/osf.io,mfraezz/osf.io,hmoco/osf.io,HarryRybacki/osf.io,barbour-em/osf.io,MerlinZhang/osf.io,crcresearch/osf.io,sbt9uc/osf.io,Johnetordoff/osf.io,acshi/osf.io,doublebits/osf.io,cslzchen/osf.io,wearpants/osf.io,caneruguz/osf.io,jinluyuan/osf.io,reinaH/osf.io,jeffreyliu3230/osf.io,samchrisinger/osf.io,TomBaxter/osf.io,cldershem/osf.io,CenterForOpenScience/osf.io,jeffreyliu3230/osf.io,Nesiehr/osf.io,abought/osf.io,RomanZWang/osf.io,aaxelb/osf.io,cosenal/osf.io,arpitar/osf.io,cslzchen/osf.io,fabianvf/osf.io,emetsger/osf.io,samanehsan/osf.io,KAsante95/osf.io,lamdnhan/osf.io,haoyuchen1992/osf.io,erinspace/osf.io,arpitar/osf.io,HalcyonChimera/osf.io,barbour-em/osf.io,lyndsysimon/osf.io,crcresearch/osf.io,RomanZWang/osf.io,CenterForOpenScience/osf.io,Johnetordoff/osf.io,sbt9uc/osf.io,mluo613/osf.io,arpitar/osf.io,SSJohns/osf.io,monikagrabowska/osf.io,samchrisinger/osf.io,mluke93/osf.io,lyndsysimon/osf.io,doublebits/osf.io,cslzchen/osf.io,billyhunt/osf.io,amyshi188/osf.io,laurenrevere/osf.io,GaryKriebel/osf.io,wearpants/osf.io,caneruguz/osf.io,reinaH/osf.io,HalcyonChimera/osf.io,zkraime/osf.io,Ghalko/osf.io,adlius/osf.io,haoyuchen1992/osf.io,hmoco/osf.io,SSJohns/osf.io,AndrewSallans/osf.io,petermalcolm/osf.io,binoculars/osf.io,caseyrygt/osf.io,Ghalko/osf.io,brandonPurvis/osf.io,njantrania/osf.io,ckc6cz/osf.io,ckc6cz/osf.io,billyhunt/osf.io,kch8qx/osf.io,chennan47/osf.io,MerlinZhang/osf.io,fabianvf/osf.io,emetsger/osf.io,HarryRybacki/osf.io,RomanZWang/osf.io,TomHeatwole/osf.io,aaxelb/osf.io,wearpants/osf.io,jinluyuan/osf.io,ticklemepierce/osf.io,icereval/osf.io,alexschiller/osf.io,haoyuchen1992/osf.io,monikagrabowska/osf.io,asanfilippo7/osf.io
|
Add migration script for nodes with incorrect piwik_site_id set.
|
"""
"""
import logging
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)
))
from modularodm.query.querydialect import DefaultQueryDialect as Q
from website.app import init_app
from website.models import Node
logger = logging.getLogger('root')
app = init_app('website.settings', set_backends=True, routes=True)
def copied_from_ancestor(node, attribute):
parent = getattr(node, attribute)
if (
parent
and parent.piwik_site_id is not None
and parent.piwik_site_id == node.piwik_site_id
):
return True
if parent:
copied_from_ancestor(parent, attribute)
return False
print "=== Registrations ==="
for node in Node.find(Q('is_registration', 'eq', True)):
if copied_from_ancestor(node, 'registered_from'):
node.piwik_site_id = None
node.save()
print(node._id)
print "=== Forks ==="
for node in Node.find(Q('is_fork', 'eq', True)):
if copied_from_ancestor(node, 'forked_from'):
node.piwik_site_id = None
node.save()
print(node._id)
|
<commit_before><commit_msg>Add migration script for nodes with incorrect piwik_site_id set.<commit_after>
|
"""
"""
import logging
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)
))
from modularodm.query.querydialect import DefaultQueryDialect as Q
from website.app import init_app
from website.models import Node
logger = logging.getLogger('root')
app = init_app('website.settings', set_backends=True, routes=True)
def copied_from_ancestor(node, attribute):
parent = getattr(node, attribute)
if (
parent
and parent.piwik_site_id is not None
and parent.piwik_site_id == node.piwik_site_id
):
return True
if parent:
copied_from_ancestor(parent, attribute)
return False
print "=== Registrations ==="
for node in Node.find(Q('is_registration', 'eq', True)):
if copied_from_ancestor(node, 'registered_from'):
node.piwik_site_id = None
node.save()
print(node._id)
print "=== Forks ==="
for node in Node.find(Q('is_fork', 'eq', True)):
if copied_from_ancestor(node, 'forked_from'):
node.piwik_site_id = None
node.save()
print(node._id)
|
Add migration script for nodes with incorrect piwik_site_id set."""
"""
import logging
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)
))
from modularodm.query.querydialect import DefaultQueryDialect as Q
from website.app import init_app
from website.models import Node
logger = logging.getLogger('root')
app = init_app('website.settings', set_backends=True, routes=True)
def copied_from_ancestor(node, attribute):
parent = getattr(node, attribute)
if (
parent
and parent.piwik_site_id is not None
and parent.piwik_site_id == node.piwik_site_id
):
return True
if parent:
copied_from_ancestor(parent, attribute)
return False
print "=== Registrations ==="
for node in Node.find(Q('is_registration', 'eq', True)):
if copied_from_ancestor(node, 'registered_from'):
node.piwik_site_id = None
node.save()
print(node._id)
print "=== Forks ==="
for node in Node.find(Q('is_fork', 'eq', True)):
if copied_from_ancestor(node, 'forked_from'):
node.piwik_site_id = None
node.save()
print(node._id)
|
<commit_before><commit_msg>Add migration script for nodes with incorrect piwik_site_id set.<commit_after>"""
"""
import logging
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir)
))
from modularodm.query.querydialect import DefaultQueryDialect as Q
from website.app import init_app
from website.models import Node
logger = logging.getLogger('root')
app = init_app('website.settings', set_backends=True, routes=True)
def copied_from_ancestor(node, attribute):
parent = getattr(node, attribute)
if (
parent
and parent.piwik_site_id is not None
and parent.piwik_site_id == node.piwik_site_id
):
return True
if parent:
copied_from_ancestor(parent, attribute)
return False
print "=== Registrations ==="
for node in Node.find(Q('is_registration', 'eq', True)):
if copied_from_ancestor(node, 'registered_from'):
node.piwik_site_id = None
node.save()
print(node._id)
print "=== Forks ==="
for node in Node.find(Q('is_fork', 'eq', True)):
if copied_from_ancestor(node, 'forked_from'):
node.piwik_site_id = None
node.save()
print(node._id)
|
|
6ee284adcc30213bc07153a16be475ec3e067d7e
|
rbtools/api/decode.py
|
rbtools/api/decode.py
|
import json
from rbtools.api.utils import parse_mimetype
DECODER_MAP = {}
def DefaultDecoder(payload):
"""Default decoder for API payloads.
The default decoder is used when a decoder is not found in the
DECODER_MAP. This is a last resort which should only be used when
something has gone wrong.
"""
return {
'resource': {
'data': payload,
},
}
DEFAULT_DECODER = DefaultDecoder
def JsonDecoder(payload):
return json.loads(payload)
DECODER_MAP['application/json'] = JsonDecoder
def PlainTextDecoder(payload):
return {
'resource': {
'text': payload,
},
}
DECODER_MAP['text/plain'] = PlainTextDecoder
def PatchDecoder(payload):
return {
'resource': {
'diff': payload,
},
}
DECODER_MAP['text/x-patch'] = PatchDecoder
def decode_response(payload, mime_type):
"""Decode a Web API response.
The body of a Web API response will be decoded into a dictionary,
according to the provided mime_type.
"""
if not payload:
return {}
mime = parse_mimetype(mime_type)
format = '%s/%s' % (mime['main_type'], mime['format'])
if format in DECODER_MAP:
decoder = DECODER_MAP[format]
else:
decoder = DEFAULT_DECODER
return decoder(payload)
|
import json
from rbtools.api.utils import parse_mimetype
DECODER_MAP = {}
def DefaultDecoder(payload):
"""Default decoder for API payloads.
The default decoder is used when a decoder is not found in the
DECODER_MAP. This is a last resort which should only be used when
something has gone wrong.
"""
return {
'resource': {
'data': payload,
},
}
DEFAULT_DECODER = DefaultDecoder
def JsonDecoder(payload):
return json.loads(payload)
DECODER_MAP['application/json'] = JsonDecoder
def PlainTextDecoder(payload):
return {
'resource': {
'text': payload,
},
}
DECODER_MAP['text/plain'] = PlainTextDecoder
def PatchDecoder(payload):
return {
'resource': {
'diff': payload,
},
}
DECODER_MAP['text/x-patch'] = PatchDecoder
def decode_response(payload, mime_type):
"""Decode a Web API response.
The body of a Web API response will be decoded into a dictionary,
according to the provided mime_type.
"""
mime = parse_mimetype(mime_type)
format = '%s/%s' % (mime['main_type'], mime['format'])
if format in DECODER_MAP:
decoder = DECODER_MAP[format]
else:
decoder = DEFAULT_DECODER
return decoder(payload)
|
Fix decoding of empty responses.
|
Fix decoding of empty responses.
When an empty response was given by the web server, we were
decoding to '{}', which is problematic when you are expecting
an empty response for things like empty text/plain files. This
fixes the problematic assumption we were making.
Reviewed at http://reviews.reviewboard.org/r/3680/
|
Python
|
mit
|
davidt/rbtools,datjwu/rbtools,halvorlu/rbtools,beol/rbtools,reviewboard/rbtools,datjwu/rbtools,reviewboard/rbtools,halvorlu/rbtools,davidt/rbtools,datjwu/rbtools,davidt/rbtools,1tush/rbtools,beol/rbtools,haosdent/rbtools,beol/rbtools,haosdent/rbtools,reviewboard/rbtools,haosdent/rbtools,halvorlu/rbtools
|
import json
from rbtools.api.utils import parse_mimetype
DECODER_MAP = {}
def DefaultDecoder(payload):
"""Default decoder for API payloads.
The default decoder is used when a decoder is not found in the
DECODER_MAP. This is a last resort which should only be used when
something has gone wrong.
"""
return {
'resource': {
'data': payload,
},
}
DEFAULT_DECODER = DefaultDecoder
def JsonDecoder(payload):
return json.loads(payload)
DECODER_MAP['application/json'] = JsonDecoder
def PlainTextDecoder(payload):
return {
'resource': {
'text': payload,
},
}
DECODER_MAP['text/plain'] = PlainTextDecoder
def PatchDecoder(payload):
return {
'resource': {
'diff': payload,
},
}
DECODER_MAP['text/x-patch'] = PatchDecoder
def decode_response(payload, mime_type):
"""Decode a Web API response.
The body of a Web API response will be decoded into a dictionary,
according to the provided mime_type.
"""
if not payload:
return {}
mime = parse_mimetype(mime_type)
format = '%s/%s' % (mime['main_type'], mime['format'])
if format in DECODER_MAP:
decoder = DECODER_MAP[format]
else:
decoder = DEFAULT_DECODER
return decoder(payload)
Fix decoding of empty responses.
When an empty response was given by the web server, we were
decoding to '{}', which is problematic when you are expecting
an empty response for things like empty text/plain files. This
fixes the problematic assumption we were making.
Reviewed at http://reviews.reviewboard.org/r/3680/
|
import json
from rbtools.api.utils import parse_mimetype
DECODER_MAP = {}
def DefaultDecoder(payload):
"""Default decoder for API payloads.
The default decoder is used when a decoder is not found in the
DECODER_MAP. This is a last resort which should only be used when
something has gone wrong.
"""
return {
'resource': {
'data': payload,
},
}
DEFAULT_DECODER = DefaultDecoder
def JsonDecoder(payload):
return json.loads(payload)
DECODER_MAP['application/json'] = JsonDecoder
def PlainTextDecoder(payload):
return {
'resource': {
'text': payload,
},
}
DECODER_MAP['text/plain'] = PlainTextDecoder
def PatchDecoder(payload):
return {
'resource': {
'diff': payload,
},
}
DECODER_MAP['text/x-patch'] = PatchDecoder
def decode_response(payload, mime_type):
"""Decode a Web API response.
The body of a Web API response will be decoded into a dictionary,
according to the provided mime_type.
"""
mime = parse_mimetype(mime_type)
format = '%s/%s' % (mime['main_type'], mime['format'])
if format in DECODER_MAP:
decoder = DECODER_MAP[format]
else:
decoder = DEFAULT_DECODER
return decoder(payload)
|
<commit_before>import json
from rbtools.api.utils import parse_mimetype
DECODER_MAP = {}
def DefaultDecoder(payload):
"""Default decoder for API payloads.
The default decoder is used when a decoder is not found in the
DECODER_MAP. This is a last resort which should only be used when
something has gone wrong.
"""
return {
'resource': {
'data': payload,
},
}
DEFAULT_DECODER = DefaultDecoder
def JsonDecoder(payload):
return json.loads(payload)
DECODER_MAP['application/json'] = JsonDecoder
def PlainTextDecoder(payload):
return {
'resource': {
'text': payload,
},
}
DECODER_MAP['text/plain'] = PlainTextDecoder
def PatchDecoder(payload):
return {
'resource': {
'diff': payload,
},
}
DECODER_MAP['text/x-patch'] = PatchDecoder
def decode_response(payload, mime_type):
"""Decode a Web API response.
The body of a Web API response will be decoded into a dictionary,
according to the provided mime_type.
"""
if not payload:
return {}
mime = parse_mimetype(mime_type)
format = '%s/%s' % (mime['main_type'], mime['format'])
if format in DECODER_MAP:
decoder = DECODER_MAP[format]
else:
decoder = DEFAULT_DECODER
return decoder(payload)
<commit_msg>Fix decoding of empty responses.
When an empty response was given by the web server, we were
decoding to '{}', which is problematic when you are expecting
an empty response for things like empty text/plain files. This
fixes the problematic assumption we were making.
Reviewed at http://reviews.reviewboard.org/r/3680/<commit_after>
|
import json
from rbtools.api.utils import parse_mimetype
DECODER_MAP = {}
def DefaultDecoder(payload):
"""Default decoder for API payloads.
The default decoder is used when a decoder is not found in the
DECODER_MAP. This is a last resort which should only be used when
something has gone wrong.
"""
return {
'resource': {
'data': payload,
},
}
DEFAULT_DECODER = DefaultDecoder
def JsonDecoder(payload):
return json.loads(payload)
DECODER_MAP['application/json'] = JsonDecoder
def PlainTextDecoder(payload):
return {
'resource': {
'text': payload,
},
}
DECODER_MAP['text/plain'] = PlainTextDecoder
def PatchDecoder(payload):
return {
'resource': {
'diff': payload,
},
}
DECODER_MAP['text/x-patch'] = PatchDecoder
def decode_response(payload, mime_type):
"""Decode a Web API response.
The body of a Web API response will be decoded into a dictionary,
according to the provided mime_type.
"""
mime = parse_mimetype(mime_type)
format = '%s/%s' % (mime['main_type'], mime['format'])
if format in DECODER_MAP:
decoder = DECODER_MAP[format]
else:
decoder = DEFAULT_DECODER
return decoder(payload)
|
import json
from rbtools.api.utils import parse_mimetype
DECODER_MAP = {}
def DefaultDecoder(payload):
"""Default decoder for API payloads.
The default decoder is used when a decoder is not found in the
DECODER_MAP. This is a last resort which should only be used when
something has gone wrong.
"""
return {
'resource': {
'data': payload,
},
}
DEFAULT_DECODER = DefaultDecoder
def JsonDecoder(payload):
return json.loads(payload)
DECODER_MAP['application/json'] = JsonDecoder
def PlainTextDecoder(payload):
return {
'resource': {
'text': payload,
},
}
DECODER_MAP['text/plain'] = PlainTextDecoder
def PatchDecoder(payload):
return {
'resource': {
'diff': payload,
},
}
DECODER_MAP['text/x-patch'] = PatchDecoder
def decode_response(payload, mime_type):
"""Decode a Web API response.
The body of a Web API response will be decoded into a dictionary,
according to the provided mime_type.
"""
if not payload:
return {}
mime = parse_mimetype(mime_type)
format = '%s/%s' % (mime['main_type'], mime['format'])
if format in DECODER_MAP:
decoder = DECODER_MAP[format]
else:
decoder = DEFAULT_DECODER
return decoder(payload)
Fix decoding of empty responses.
When an empty response was given by the web server, we were
decoding to '{}', which is problematic when you are expecting
an empty response for things like empty text/plain files. This
fixes the problematic assumption we were making.
Reviewed at http://reviews.reviewboard.org/r/3680/import json
from rbtools.api.utils import parse_mimetype
DECODER_MAP = {}
def DefaultDecoder(payload):
"""Default decoder for API payloads.
The default decoder is used when a decoder is not found in the
DECODER_MAP. This is a last resort which should only be used when
something has gone wrong.
"""
return {
'resource': {
'data': payload,
},
}
DEFAULT_DECODER = DefaultDecoder
def JsonDecoder(payload):
return json.loads(payload)
DECODER_MAP['application/json'] = JsonDecoder
def PlainTextDecoder(payload):
return {
'resource': {
'text': payload,
},
}
DECODER_MAP['text/plain'] = PlainTextDecoder
def PatchDecoder(payload):
return {
'resource': {
'diff': payload,
},
}
DECODER_MAP['text/x-patch'] = PatchDecoder
def decode_response(payload, mime_type):
"""Decode a Web API response.
The body of a Web API response will be decoded into a dictionary,
according to the provided mime_type.
"""
mime = parse_mimetype(mime_type)
format = '%s/%s' % (mime['main_type'], mime['format'])
if format in DECODER_MAP:
decoder = DECODER_MAP[format]
else:
decoder = DEFAULT_DECODER
return decoder(payload)
|
<commit_before>import json
from rbtools.api.utils import parse_mimetype
DECODER_MAP = {}
def DefaultDecoder(payload):
"""Default decoder for API payloads.
The default decoder is used when a decoder is not found in the
DECODER_MAP. This is a last resort which should only be used when
something has gone wrong.
"""
return {
'resource': {
'data': payload,
},
}
DEFAULT_DECODER = DefaultDecoder
def JsonDecoder(payload):
return json.loads(payload)
DECODER_MAP['application/json'] = JsonDecoder
def PlainTextDecoder(payload):
return {
'resource': {
'text': payload,
},
}
DECODER_MAP['text/plain'] = PlainTextDecoder
def PatchDecoder(payload):
return {
'resource': {
'diff': payload,
},
}
DECODER_MAP['text/x-patch'] = PatchDecoder
def decode_response(payload, mime_type):
"""Decode a Web API response.
The body of a Web API response will be decoded into a dictionary,
according to the provided mime_type.
"""
if not payload:
return {}
mime = parse_mimetype(mime_type)
format = '%s/%s' % (mime['main_type'], mime['format'])
if format in DECODER_MAP:
decoder = DECODER_MAP[format]
else:
decoder = DEFAULT_DECODER
return decoder(payload)
<commit_msg>Fix decoding of empty responses.
When an empty response was given by the web server, we were
decoding to '{}', which is problematic when you are expecting
an empty response for things like empty text/plain files. This
fixes the problematic assumption we were making.
Reviewed at http://reviews.reviewboard.org/r/3680/<commit_after>import json
from rbtools.api.utils import parse_mimetype
DECODER_MAP = {}
def DefaultDecoder(payload):
"""Default decoder for API payloads.
The default decoder is used when a decoder is not found in the
DECODER_MAP. This is a last resort which should only be used when
something has gone wrong.
"""
return {
'resource': {
'data': payload,
},
}
DEFAULT_DECODER = DefaultDecoder
def JsonDecoder(payload):
return json.loads(payload)
DECODER_MAP['application/json'] = JsonDecoder
def PlainTextDecoder(payload):
return {
'resource': {
'text': payload,
},
}
DECODER_MAP['text/plain'] = PlainTextDecoder
def PatchDecoder(payload):
return {
'resource': {
'diff': payload,
},
}
DECODER_MAP['text/x-patch'] = PatchDecoder
def decode_response(payload, mime_type):
"""Decode a Web API response.
The body of a Web API response will be decoded into a dictionary,
according to the provided mime_type.
"""
mime = parse_mimetype(mime_type)
format = '%s/%s' % (mime['main_type'], mime['format'])
if format in DECODER_MAP:
decoder = DECODER_MAP[format]
else:
decoder = DEFAULT_DECODER
return decoder(payload)
|
b58c85e5105b06ecb5d8598105b4c999287a5a62
|
demo/searchJustLtr.py
|
demo/searchJustLtr.py
|
from features import formatFeature
baseQuery = {
"query": {
"ltr": {
"model": {
"stored": "" # Model name
},
"features": []# features]
}
}
}
def featureQueries(keywords):
try:
ftrId = 1
while True:
parsedJson = formatFeature(ftrId, keywords)
baseQuery['query']['ltr']['features'].append(parsedJson['query'])
ftrId += 1
except IOError:
pass
import json
print("%s" % json.dumps(baseQuery))
return baseQuery
if __name__ == "__main__":
from sys import argv
from elasticsearch import Elasticsearch
esUrl="http://localhost:9200"
es = Elasticsearch(timeout=1000)
search = featureQueries(argv[1])
model = "test_6"
if len(argv) > 2:
model = argv[2]
baseQuery['query']['ltr']['model']['stored'] = model
results = es.search(index='tmdb', doc_type='movie', body=search)
for result in results['hits']['hits']:
print(result['_source']['title'])
|
Add a script for Search w/ just LTR
|
Add a script for Search w/ just LTR
|
Python
|
apache-2.0
|
o19s/elasticsearch-learning-to-rank,o19s/elasticsearch-learning-to-rank
|
Add a script for Search w/ just LTR
|
from features import formatFeature
baseQuery = {
"query": {
"ltr": {
"model": {
"stored": "" # Model name
},
"features": []# features]
}
}
}
def featureQueries(keywords):
try:
ftrId = 1
while True:
parsedJson = formatFeature(ftrId, keywords)
baseQuery['query']['ltr']['features'].append(parsedJson['query'])
ftrId += 1
except IOError:
pass
import json
print("%s" % json.dumps(baseQuery))
return baseQuery
if __name__ == "__main__":
from sys import argv
from elasticsearch import Elasticsearch
esUrl="http://localhost:9200"
es = Elasticsearch(timeout=1000)
search = featureQueries(argv[1])
model = "test_6"
if len(argv) > 2:
model = argv[2]
baseQuery['query']['ltr']['model']['stored'] = model
results = es.search(index='tmdb', doc_type='movie', body=search)
for result in results['hits']['hits']:
print(result['_source']['title'])
|
<commit_before><commit_msg>Add a script for Search w/ just LTR<commit_after>
|
from features import formatFeature
baseQuery = {
"query": {
"ltr": {
"model": {
"stored": "" # Model name
},
"features": []# features]
}
}
}
def featureQueries(keywords):
try:
ftrId = 1
while True:
parsedJson = formatFeature(ftrId, keywords)
baseQuery['query']['ltr']['features'].append(parsedJson['query'])
ftrId += 1
except IOError:
pass
import json
print("%s" % json.dumps(baseQuery))
return baseQuery
if __name__ == "__main__":
from sys import argv
from elasticsearch import Elasticsearch
esUrl="http://localhost:9200"
es = Elasticsearch(timeout=1000)
search = featureQueries(argv[1])
model = "test_6"
if len(argv) > 2:
model = argv[2]
baseQuery['query']['ltr']['model']['stored'] = model
results = es.search(index='tmdb', doc_type='movie', body=search)
for result in results['hits']['hits']:
print(result['_source']['title'])
|
Add a script for Search w/ just LTRfrom features import formatFeature
baseQuery = {
"query": {
"ltr": {
"model": {
"stored": "" # Model name
},
"features": []# features]
}
}
}
def featureQueries(keywords):
try:
ftrId = 1
while True:
parsedJson = formatFeature(ftrId, keywords)
baseQuery['query']['ltr']['features'].append(parsedJson['query'])
ftrId += 1
except IOError:
pass
import json
print("%s" % json.dumps(baseQuery))
return baseQuery
if __name__ == "__main__":
from sys import argv
from elasticsearch import Elasticsearch
esUrl="http://localhost:9200"
es = Elasticsearch(timeout=1000)
search = featureQueries(argv[1])
model = "test_6"
if len(argv) > 2:
model = argv[2]
baseQuery['query']['ltr']['model']['stored'] = model
results = es.search(index='tmdb', doc_type='movie', body=search)
for result in results['hits']['hits']:
print(result['_source']['title'])
|
<commit_before><commit_msg>Add a script for Search w/ just LTR<commit_after>from features import formatFeature
baseQuery = {
"query": {
"ltr": {
"model": {
"stored": "" # Model name
},
"features": []# features]
}
}
}
def featureQueries(keywords):
try:
ftrId = 1
while True:
parsedJson = formatFeature(ftrId, keywords)
baseQuery['query']['ltr']['features'].append(parsedJson['query'])
ftrId += 1
except IOError:
pass
import json
print("%s" % json.dumps(baseQuery))
return baseQuery
if __name__ == "__main__":
from sys import argv
from elasticsearch import Elasticsearch
esUrl="http://localhost:9200"
es = Elasticsearch(timeout=1000)
search = featureQueries(argv[1])
model = "test_6"
if len(argv) > 2:
model = argv[2]
baseQuery['query']['ltr']['model']['stored'] = model
results = es.search(index='tmdb', doc_type='movie', body=search)
for result in results['hits']['hits']:
print(result['_source']['title'])
|
|
e5124e51ac716cf47af89fca957acc5623470e45
|
src/oscar/apps/dashboard/shipping/forms.py
|
src/oscar/apps/dashboard/shipping/forms.py
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['name', 'description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
Revert "Remove non-existing field from WeightBasedForm"
|
Revert "Remove non-existing field from WeightBasedForm"
This reverts commit af712fedf4963d66f56f5ab9054318c493572ab1.
|
Python
|
bsd-3-clause
|
anentropic/django-oscar,okfish/django-oscar,solarissmoke/django-oscar,sonofatailor/django-oscar,django-oscar/django-oscar,django-oscar/django-oscar,john-parton/django-oscar,sasha0/django-oscar,john-parton/django-oscar,okfish/django-oscar,anentropic/django-oscar,sonofatailor/django-oscar,anentropic/django-oscar,solarissmoke/django-oscar,sasha0/django-oscar,sonofatailor/django-oscar,john-parton/django-oscar,okfish/django-oscar,sonofatailor/django-oscar,anentropic/django-oscar,john-parton/django-oscar,solarissmoke/django-oscar,django-oscar/django-oscar,sasha0/django-oscar,okfish/django-oscar,solarissmoke/django-oscar,sasha0/django-oscar,django-oscar/django-oscar
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
Revert "Remove non-existing field from WeightBasedForm"
This reverts commit af712fedf4963d66f56f5ab9054318c493572ab1.
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['name', 'description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
<commit_before>from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
<commit_msg>Revert "Remove non-existing field from WeightBasedForm"
This reverts commit af712fedf4963d66f56f5ab9054318c493572ab1.<commit_after>
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['name', 'description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
Revert "Remove non-existing field from WeightBasedForm"
This reverts commit af712fedf4963d66f56f5ab9054318c493572ab1.from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['name', 'description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
<commit_before>from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
<commit_msg>Revert "Remove non-existing field from WeightBasedForm"
This reverts commit af712fedf4963d66f56f5ab9054318c493572ab1.<commit_after>from django import forms
from oscar.core.loading import get_model
class WeightBasedForm(forms.ModelForm):
class Meta:
model = get_model('shipping', 'WeightBased')
fields = ['name', 'description', 'default_weight', 'countries']
class WeightBandForm(forms.ModelForm):
def __init__(self, method, *args, **kwargs):
super(WeightBandForm, self).__init__(*args, **kwargs)
self.instance.method = method
class Meta:
model = get_model('shipping', 'WeightBand')
fields = ('upper_limit', 'charge')
|
999481b2c49440f428205a161a2c4ccf5580167e
|
IPython/core/tests/test_debugger.py
|
IPython/core/tests/test_debugger.py
|
"""Tests for debugging machinery.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# third-party
import nose.tools as nt
# Our own
from IPython.core import debugger
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_longer_repr():
from repr import repr as trepr
a = '1234567890'* 7
ar = "'1234567890123456789012345678901234567890123456789012345678901234567890'"
a_trunc = "'123456789012...8901234567890'"
nt.assert_equals(trepr(a), a_trunc)
# The creation of our tracer modifies the repr module's repr function
# in-place, since that global is used directly by the stdlib's pdb module.
t = debugger.Tracer()
nt.assert_equals(trepr(a), ar)
|
Add simple test for modified repr as per review.
|
Add simple test for modified repr as per review.
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add simple test for modified repr as per review.
|
"""Tests for debugging machinery.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# third-party
import nose.tools as nt
# Our own
from IPython.core import debugger
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_longer_repr():
from repr import repr as trepr
a = '1234567890'* 7
ar = "'1234567890123456789012345678901234567890123456789012345678901234567890'"
a_trunc = "'123456789012...8901234567890'"
nt.assert_equals(trepr(a), a_trunc)
# The creation of our tracer modifies the repr module's repr function
# in-place, since that global is used directly by the stdlib's pdb module.
t = debugger.Tracer()
nt.assert_equals(trepr(a), ar)
|
<commit_before><commit_msg>Add simple test for modified repr as per review.<commit_after>
|
"""Tests for debugging machinery.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# third-party
import nose.tools as nt
# Our own
from IPython.core import debugger
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_longer_repr():
from repr import repr as trepr
a = '1234567890'* 7
ar = "'1234567890123456789012345678901234567890123456789012345678901234567890'"
a_trunc = "'123456789012...8901234567890'"
nt.assert_equals(trepr(a), a_trunc)
# The creation of our tracer modifies the repr module's repr function
# in-place, since that global is used directly by the stdlib's pdb module.
t = debugger.Tracer()
nt.assert_equals(trepr(a), ar)
|
Add simple test for modified repr as per review."""Tests for debugging machinery.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# third-party
import nose.tools as nt
# Our own
from IPython.core import debugger
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_longer_repr():
from repr import repr as trepr
a = '1234567890'* 7
ar = "'1234567890123456789012345678901234567890123456789012345678901234567890'"
a_trunc = "'123456789012...8901234567890'"
nt.assert_equals(trepr(a), a_trunc)
# The creation of our tracer modifies the repr module's repr function
# in-place, since that global is used directly by the stdlib's pdb module.
t = debugger.Tracer()
nt.assert_equals(trepr(a), ar)
|
<commit_before><commit_msg>Add simple test for modified repr as per review.<commit_after>"""Tests for debugging machinery.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# third-party
import nose.tools as nt
# Our own
from IPython.core import debugger
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_longer_repr():
from repr import repr as trepr
a = '1234567890'* 7
ar = "'1234567890123456789012345678901234567890123456789012345678901234567890'"
a_trunc = "'123456789012...8901234567890'"
nt.assert_equals(trepr(a), a_trunc)
# The creation of our tracer modifies the repr module's repr function
# in-place, since that global is used directly by the stdlib's pdb module.
t = debugger.Tracer()
nt.assert_equals(trepr(a), ar)
|
|
a3eb4602aa5ec87e6f78477c4789ed2fbde1cf93
|
stevedore/__init__.py
|
stevedore/__init__.py
|
# flake8: noqa
from .extension import ExtensionManager
from .enabled import EnabledExtensionManager
from .named import NamedExtensionManager
from .hook import HookManager
from .driver import DriverManager
import logging
# Configure a NullHandler for our log messages in case
# the app we're used from does not set up logging.
LOG = logging.getLogger(__name__)
try:
LOG.addHandler(logging.NullHandler())
except AttributeError:
# No NullHandler, probably python 2.6
pass
|
# flake8: noqa
__all__ = [
'ExtensionManager',
'EnabledExtensionManager',
'NamedExtensionManager',
'HookManager',
'DriverManager',
]
from .extension import ExtensionManager
from .enabled import EnabledExtensionManager
from .named import NamedExtensionManager
from .hook import HookManager
from .driver import DriverManager
import logging
# Configure a NullHandler for our log messages in case
# the app we're used from does not set up logging.
LOG = logging.getLogger('stevedore')
if hasattr(logging, 'NullHandler'):
LOG.addHandler(logging.NullHandler())
else:
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
LOG.addHandler(NullHandler())
|
Update null log handling for py26
|
Update null log handling for py26
Python 2.6 does not have a NullHandler in the logging
module, so introduce a little class that does the same
work.
Also add __all__ to the package init so extra names are
not exported.
Resolves issue #2
Change-Id: Id59d394cd02372e2c31de336894f06653cb1e22d
|
Python
|
apache-2.0
|
mandeepdhami/stevedore,nelsnelson/stevedore,nelsnelson/stevedore,openstack/stevedore,varunarya10/stevedore,JioCloud/stevedore,JioCloud/stevedore,citrix-openstack-build/stevedore,mandeepdhami/stevedore,citrix-openstack-build/stevedore,varunarya10/stevedore
|
# flake8: noqa
from .extension import ExtensionManager
from .enabled import EnabledExtensionManager
from .named import NamedExtensionManager
from .hook import HookManager
from .driver import DriverManager
import logging
# Configure a NullHandler for our log messages in case
# the app we're used from does not set up logging.
LOG = logging.getLogger(__name__)
try:
LOG.addHandler(logging.NullHandler())
except AttributeError:
# No NullHandler, probably python 2.6
pass
Update null log handling for py26
Python 2.6 does not have a NullHandler in the logging
module, so introduce a little class that does the same
work.
Also add __all__ to the package init so extra names are
not exported.
Resolves issue #2
Change-Id: Id59d394cd02372e2c31de336894f06653cb1e22d
|
# flake8: noqa
__all__ = [
'ExtensionManager',
'EnabledExtensionManager',
'NamedExtensionManager',
'HookManager',
'DriverManager',
]
from .extension import ExtensionManager
from .enabled import EnabledExtensionManager
from .named import NamedExtensionManager
from .hook import HookManager
from .driver import DriverManager
import logging
# Configure a NullHandler for our log messages in case
# the app we're used from does not set up logging.
LOG = logging.getLogger('stevedore')
if hasattr(logging, 'NullHandler'):
LOG.addHandler(logging.NullHandler())
else:
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
LOG.addHandler(NullHandler())
|
<commit_before># flake8: noqa
from .extension import ExtensionManager
from .enabled import EnabledExtensionManager
from .named import NamedExtensionManager
from .hook import HookManager
from .driver import DriverManager
import logging
# Configure a NullHandler for our log messages in case
# the app we're used from does not set up logging.
LOG = logging.getLogger(__name__)
try:
LOG.addHandler(logging.NullHandler())
except AttributeError:
# No NullHandler, probably python 2.6
pass
<commit_msg>Update null log handling for py26
Python 2.6 does not have a NullHandler in the logging
module, so introduce a little class that does the same
work.
Also add __all__ to the package init so extra names are
not exported.
Resolves issue #2
Change-Id: Id59d394cd02372e2c31de336894f06653cb1e22d<commit_after>
|
# flake8: noqa
__all__ = [
'ExtensionManager',
'EnabledExtensionManager',
'NamedExtensionManager',
'HookManager',
'DriverManager',
]
from .extension import ExtensionManager
from .enabled import EnabledExtensionManager
from .named import NamedExtensionManager
from .hook import HookManager
from .driver import DriverManager
import logging
# Configure a NullHandler for our log messages in case
# the app we're used from does not set up logging.
LOG = logging.getLogger('stevedore')
if hasattr(logging, 'NullHandler'):
LOG.addHandler(logging.NullHandler())
else:
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
LOG.addHandler(NullHandler())
|
# flake8: noqa
from .extension import ExtensionManager
from .enabled import EnabledExtensionManager
from .named import NamedExtensionManager
from .hook import HookManager
from .driver import DriverManager
import logging
# Configure a NullHandler for our log messages in case
# the app we're used from does not set up logging.
LOG = logging.getLogger(__name__)
try:
LOG.addHandler(logging.NullHandler())
except AttributeError:
# No NullHandler, probably python 2.6
pass
Update null log handling for py26
Python 2.6 does not have a NullHandler in the logging
module, so introduce a little class that does the same
work.
Also add __all__ to the package init so extra names are
not exported.
Resolves issue #2
Change-Id: Id59d394cd02372e2c31de336894f06653cb1e22d# flake8: noqa
__all__ = [
'ExtensionManager',
'EnabledExtensionManager',
'NamedExtensionManager',
'HookManager',
'DriverManager',
]
from .extension import ExtensionManager
from .enabled import EnabledExtensionManager
from .named import NamedExtensionManager
from .hook import HookManager
from .driver import DriverManager
import logging
# Configure a NullHandler for our log messages in case
# the app we're used from does not set up logging.
LOG = logging.getLogger('stevedore')
if hasattr(logging, 'NullHandler'):
LOG.addHandler(logging.NullHandler())
else:
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
LOG.addHandler(NullHandler())
|
<commit_before># flake8: noqa
from .extension import ExtensionManager
from .enabled import EnabledExtensionManager
from .named import NamedExtensionManager
from .hook import HookManager
from .driver import DriverManager
import logging
# Configure a NullHandler for our log messages in case
# the app we're used from does not set up logging.
LOG = logging.getLogger(__name__)
try:
LOG.addHandler(logging.NullHandler())
except AttributeError:
# No NullHandler, probably python 2.6
pass
<commit_msg>Update null log handling for py26
Python 2.6 does not have a NullHandler in the logging
module, so introduce a little class that does the same
work.
Also add __all__ to the package init so extra names are
not exported.
Resolves issue #2
Change-Id: Id59d394cd02372e2c31de336894f06653cb1e22d<commit_after># flake8: noqa
__all__ = [
'ExtensionManager',
'EnabledExtensionManager',
'NamedExtensionManager',
'HookManager',
'DriverManager',
]
from .extension import ExtensionManager
from .enabled import EnabledExtensionManager
from .named import NamedExtensionManager
from .hook import HookManager
from .driver import DriverManager
import logging
# Configure a NullHandler for our log messages in case
# the app we're used from does not set up logging.
LOG = logging.getLogger('stevedore')
if hasattr(logging, 'NullHandler'):
LOG.addHandler(logging.NullHandler())
else:
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
LOG.addHandler(NullHandler())
|
78b058759eac8239e8b8b377a365cefabcf5c21a
|
algorithm/dynamic_connectivity/unionfind.py
|
algorithm/dynamic_connectivity/unionfind.py
|
#-*- coding:utf-8 -*-
class QuickFind(object):
def __init__(self,objnum):
self.id=[i for i in range(0,objnum)]
def union(self,p,q):
qid=self.id[q]
pid=self.id[p]
j=0
for v in self.id:
if pid==v:
self.id[j]=qid
j=j+1
def connected(self,q,p):
return self.id[q]==self.id[p]
class QuickUnion(object):
def __init__(self,objnum):
self.id=[i for i in range(0,objnum)]
def _root(self,obj):
i=obj
while i!=self.id[i]:
i=self.id[i]
return i
def union(self,p,q):
qroot=self._root(q)
proot=self._root(p)
self.id[proot]=qroot
def connected(self,q,p):
return self._root(q)==self._root(p)
if __name__=='__main__':
#uf=QuickFind(10)
uf=QuickUnion(20)
uf.union(1,4)
uf.union(0,9)
print('1 connected 4',uf.connected(1,4))
print('0 connected 9',uf.connected(0,9))
print('4 connected 3',uf.connected(4,3))
print(uf.id)
print('union 4 to 3')
uf.union(4,3)
print(uf.id)
print('4 connected 3',uf.connected(4,3))
print('1 connected 3',uf.connected(3,1))
|
Add QuickUnion algorithm(a better algorithm)
|
Add QuickUnion algorithm(a better algorithm)
|
Python
|
apache-2.0
|
free-free/algorithm,free-free/algorithm
|
Add QuickUnion algorithm(a better algorithm)
|
#-*- coding:utf-8 -*-
class QuickFind(object):
def __init__(self,objnum):
self.id=[i for i in range(0,objnum)]
def union(self,p,q):
qid=self.id[q]
pid=self.id[p]
j=0
for v in self.id:
if pid==v:
self.id[j]=qid
j=j+1
def connected(self,q,p):
return self.id[q]==self.id[p]
class QuickUnion(object):
def __init__(self,objnum):
self.id=[i for i in range(0,objnum)]
def _root(self,obj):
i=obj
while i!=self.id[i]:
i=self.id[i]
return i
def union(self,p,q):
qroot=self._root(q)
proot=self._root(p)
self.id[proot]=qroot
def connected(self,q,p):
return self._root(q)==self._root(p)
if __name__=='__main__':
#uf=QuickFind(10)
uf=QuickUnion(20)
uf.union(1,4)
uf.union(0,9)
print('1 connected 4',uf.connected(1,4))
print('0 connected 9',uf.connected(0,9))
print('4 connected 3',uf.connected(4,3))
print(uf.id)
print('union 4 to 3')
uf.union(4,3)
print(uf.id)
print('4 connected 3',uf.connected(4,3))
print('1 connected 3',uf.connected(3,1))
|
<commit_before><commit_msg>Add QuickUnion algorithm(a better algorithm)<commit_after>
|
#-*- coding:utf-8 -*-
class QuickFind(object):
def __init__(self,objnum):
self.id=[i for i in range(0,objnum)]
def union(self,p,q):
qid=self.id[q]
pid=self.id[p]
j=0
for v in self.id:
if pid==v:
self.id[j]=qid
j=j+1
def connected(self,q,p):
return self.id[q]==self.id[p]
class QuickUnion(object):
def __init__(self,objnum):
self.id=[i for i in range(0,objnum)]
def _root(self,obj):
i=obj
while i!=self.id[i]:
i=self.id[i]
return i
def union(self,p,q):
qroot=self._root(q)
proot=self._root(p)
self.id[proot]=qroot
def connected(self,q,p):
return self._root(q)==self._root(p)
if __name__=='__main__':
#uf=QuickFind(10)
uf=QuickUnion(20)
uf.union(1,4)
uf.union(0,9)
print('1 connected 4',uf.connected(1,4))
print('0 connected 9',uf.connected(0,9))
print('4 connected 3',uf.connected(4,3))
print(uf.id)
print('union 4 to 3')
uf.union(4,3)
print(uf.id)
print('4 connected 3',uf.connected(4,3))
print('1 connected 3',uf.connected(3,1))
|
Add QuickUnion algorithm(a better algorithm)#-*- coding:utf-8 -*-
class QuickFind(object):
def __init__(self,objnum):
self.id=[i for i in range(0,objnum)]
def union(self,p,q):
qid=self.id[q]
pid=self.id[p]
j=0
for v in self.id:
if pid==v:
self.id[j]=qid
j=j+1
def connected(self,q,p):
return self.id[q]==self.id[p]
class QuickUnion(object):
def __init__(self,objnum):
self.id=[i for i in range(0,objnum)]
def _root(self,obj):
i=obj
while i!=self.id[i]:
i=self.id[i]
return i
def union(self,p,q):
qroot=self._root(q)
proot=self._root(p)
self.id[proot]=qroot
def connected(self,q,p):
return self._root(q)==self._root(p)
if __name__=='__main__':
#uf=QuickFind(10)
uf=QuickUnion(20)
uf.union(1,4)
uf.union(0,9)
print('1 connected 4',uf.connected(1,4))
print('0 connected 9',uf.connected(0,9))
print('4 connected 3',uf.connected(4,3))
print(uf.id)
print('union 4 to 3')
uf.union(4,3)
print(uf.id)
print('4 connected 3',uf.connected(4,3))
print('1 connected 3',uf.connected(3,1))
|
<commit_before><commit_msg>Add QuickUnion algorithm(a better algorithm)<commit_after>#-*- coding:utf-8 -*-
class QuickFind(object):
def __init__(self,objnum):
self.id=[i for i in range(0,objnum)]
def union(self,p,q):
qid=self.id[q]
pid=self.id[p]
j=0
for v in self.id:
if pid==v:
self.id[j]=qid
j=j+1
def connected(self,q,p):
return self.id[q]==self.id[p]
class QuickUnion(object):
def __init__(self,objnum):
self.id=[i for i in range(0,objnum)]
def _root(self,obj):
i=obj
while i!=self.id[i]:
i=self.id[i]
return i
def union(self,p,q):
qroot=self._root(q)
proot=self._root(p)
self.id[proot]=qroot
def connected(self,q,p):
return self._root(q)==self._root(p)
if __name__=='__main__':
#uf=QuickFind(10)
uf=QuickUnion(20)
uf.union(1,4)
uf.union(0,9)
print('1 connected 4',uf.connected(1,4))
print('0 connected 9',uf.connected(0,9))
print('4 connected 3',uf.connected(4,3))
print(uf.id)
print('union 4 to 3')
uf.union(4,3)
print(uf.id)
print('4 connected 3',uf.connected(4,3))
print('1 connected 3',uf.connected(3,1))
|
|
4d2a513794ab61c1a6264807daaac9946cfebd25
|
article/migrations/0012_alter_content_field_to_have_verbose_name.py
|
article/migrations/0012_alter_content_field_to_have_verbose_name.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('article', '0011_alter_content_and_modular_content_fields'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=wagtail.wagtailcore.fields.RichTextField(verbose_name="Content - Deprecated. Use 'MODULAR CONTENT' instead.", blank=True),
),
]
|
Add migration for article content field verbose name introduction
|
Add migration for article content field verbose name introduction
|
Python
|
bsd-3-clause
|
PARINetwork/pari,PARINetwork/pari,PARINetwork/pari,PARINetwork/pari
|
Add migration for article content field verbose name introduction
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('article', '0011_alter_content_and_modular_content_fields'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=wagtail.wagtailcore.fields.RichTextField(verbose_name="Content - Deprecated. Use 'MODULAR CONTENT' instead.", blank=True),
),
]
|
<commit_before><commit_msg>Add migration for article content field verbose name introduction<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('article', '0011_alter_content_and_modular_content_fields'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=wagtail.wagtailcore.fields.RichTextField(verbose_name="Content - Deprecated. Use 'MODULAR CONTENT' instead.", blank=True),
),
]
|
Add migration for article content field verbose name introduction# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('article', '0011_alter_content_and_modular_content_fields'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=wagtail.wagtailcore.fields.RichTextField(verbose_name="Content - Deprecated. Use 'MODULAR CONTENT' instead.", blank=True),
),
]
|
<commit_before><commit_msg>Add migration for article content field verbose name introduction<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('article', '0011_alter_content_and_modular_content_fields'),
]
operations = [
migrations.AlterField(
model_name='article',
name='content',
field=wagtail.wagtailcore.fields.RichTextField(verbose_name="Content - Deprecated. Use 'MODULAR CONTENT' instead.", blank=True),
),
]
|
|
31e812c2abd7f7e8fd26dc36a87ce17c1ec75f86
|
examples/single_frame_datalogger.py
|
examples/single_frame_datalogger.py
|
from pymoku import Moku, MokuException, NoDataException
from pymoku.instruments import *
import time, logging, traceback
logging.basicConfig(format='%(asctime)s:%(name)s:%(levelname)s::%(message)s')
logging.getLogger('pymoku').setLevel(logging.DEBUG)
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('example')
i = m.discover_instrument()
if i is None or i.type != 'oscilloscope':
print "No or wrong instrument deployed"
i = Oscilloscope()
m.attach_instrument(i)
else:
print "Attached to existing Oscilloscope"
try:
# In this case, we set the underlying oscilloscope in to Roll mode then wait a bit to
# acquire samples. One could also leave the oscilloscope in whatever other X Mode they
# wished, pause the acquisition then stream from there to retrieve the full-rate version
# of a normal oscilloscope frame.
i.set_defaults()
i.set_samplerate(10)
i.set_xmode(OSC_ROLL)
i.commit()
i.datalogger_stop()
time.sleep(5)
# Could also save to a file then use datalogger_upload(), but grabbing the data directly
# from the network is cleaner
i.datalogger_start_single(filetype='net')
while True:
ch, idx, d = i.datalogger_get_samples(timeout=5)
print "Received samples %d to %d from channel %d" % (idx, idx + len(d), ch)
except NoDataException as e:
# This will be raised if we try and get samples but the session has finished.
print e
except Exception as e:
print traceback.format_exc()
finally:
i.datalogger_stop()
m.close()
|
Add a simple single-frame data acquisition example
|
Examples: Add a simple single-frame data acquisition example
|
Python
|
mit
|
liquidinstruments/pymoku,benizl/pymoku
|
Examples: Add a simple single-frame data acquisition example
|
from pymoku import Moku, MokuException, NoDataException
from pymoku.instruments import *
import time, logging, traceback
logging.basicConfig(format='%(asctime)s:%(name)s:%(levelname)s::%(message)s')
logging.getLogger('pymoku').setLevel(logging.DEBUG)
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('example')
i = m.discover_instrument()
if i is None or i.type != 'oscilloscope':
print "No or wrong instrument deployed"
i = Oscilloscope()
m.attach_instrument(i)
else:
print "Attached to existing Oscilloscope"
try:
# In this case, we set the underlying oscilloscope in to Roll mode then wait a bit to
# acquire samples. One could also leave the oscilloscope in whatever other X Mode they
# wished, pause the acquisition then stream from there to retrieve the full-rate version
# of a normal oscilloscope frame.
i.set_defaults()
i.set_samplerate(10)
i.set_xmode(OSC_ROLL)
i.commit()
i.datalogger_stop()
time.sleep(5)
# Could also save to a file then use datalogger_upload(), but grabbing the data directly
# from the network is cleaner
i.datalogger_start_single(filetype='net')
while True:
ch, idx, d = i.datalogger_get_samples(timeout=5)
print "Received samples %d to %d from channel %d" % (idx, idx + len(d), ch)
except NoDataException as e:
# This will be raised if we try and get samples but the session has finished.
print e
except Exception as e:
print traceback.format_exc()
finally:
i.datalogger_stop()
m.close()
|
<commit_before><commit_msg>Examples: Add a simple single-frame data acquisition example<commit_after>
|
from pymoku import Moku, MokuException, NoDataException
from pymoku.instruments import *
import time, logging, traceback
logging.basicConfig(format='%(asctime)s:%(name)s:%(levelname)s::%(message)s')
logging.getLogger('pymoku').setLevel(logging.DEBUG)
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('example')
i = m.discover_instrument()
if i is None or i.type != 'oscilloscope':
print "No or wrong instrument deployed"
i = Oscilloscope()
m.attach_instrument(i)
else:
print "Attached to existing Oscilloscope"
try:
# In this case, we set the underlying oscilloscope in to Roll mode then wait a bit to
# acquire samples. One could also leave the oscilloscope in whatever other X Mode they
# wished, pause the acquisition then stream from there to retrieve the full-rate version
# of a normal oscilloscope frame.
i.set_defaults()
i.set_samplerate(10)
i.set_xmode(OSC_ROLL)
i.commit()
i.datalogger_stop()
time.sleep(5)
# Could also save to a file then use datalogger_upload(), but grabbing the data directly
# from the network is cleaner
i.datalogger_start_single(filetype='net')
while True:
ch, idx, d = i.datalogger_get_samples(timeout=5)
print "Received samples %d to %d from channel %d" % (idx, idx + len(d), ch)
except NoDataException as e:
# This will be raised if we try and get samples but the session has finished.
print e
except Exception as e:
print traceback.format_exc()
finally:
i.datalogger_stop()
m.close()
|
Examples: Add a simple single-frame data acquisition examplefrom pymoku import Moku, MokuException, NoDataException
from pymoku.instruments import *
import time, logging, traceback
logging.basicConfig(format='%(asctime)s:%(name)s:%(levelname)s::%(message)s')
logging.getLogger('pymoku').setLevel(logging.DEBUG)
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('example')
i = m.discover_instrument()
if i is None or i.type != 'oscilloscope':
print "No or wrong instrument deployed"
i = Oscilloscope()
m.attach_instrument(i)
else:
print "Attached to existing Oscilloscope"
try:
# In this case, we set the underlying oscilloscope in to Roll mode then wait a bit to
# acquire samples. One could also leave the oscilloscope in whatever other X Mode they
# wished, pause the acquisition then stream from there to retrieve the full-rate version
# of a normal oscilloscope frame.
i.set_defaults()
i.set_samplerate(10)
i.set_xmode(OSC_ROLL)
i.commit()
i.datalogger_stop()
time.sleep(5)
# Could also save to a file then use datalogger_upload(), but grabbing the data directly
# from the network is cleaner
i.datalogger_start_single(filetype='net')
while True:
ch, idx, d = i.datalogger_get_samples(timeout=5)
print "Received samples %d to %d from channel %d" % (idx, idx + len(d), ch)
except NoDataException as e:
# This will be raised if we try and get samples but the session has finished.
print e
except Exception as e:
print traceback.format_exc()
finally:
i.datalogger_stop()
m.close()
|
<commit_before><commit_msg>Examples: Add a simple single-frame data acquisition example<commit_after>from pymoku import Moku, MokuException, NoDataException
from pymoku.instruments import *
import time, logging, traceback
logging.basicConfig(format='%(asctime)s:%(name)s:%(levelname)s::%(message)s')
logging.getLogger('pymoku').setLevel(logging.DEBUG)
# Use Moku.get_by_serial() or get_by_name() if you don't know the IP
m = Moku.get_by_name('example')
i = m.discover_instrument()
if i is None or i.type != 'oscilloscope':
print "No or wrong instrument deployed"
i = Oscilloscope()
m.attach_instrument(i)
else:
print "Attached to existing Oscilloscope"
try:
# In this case, we set the underlying oscilloscope in to Roll mode then wait a bit to
# acquire samples. One could also leave the oscilloscope in whatever other X Mode they
# wished, pause the acquisition then stream from there to retrieve the full-rate version
# of a normal oscilloscope frame.
i.set_defaults()
i.set_samplerate(10)
i.set_xmode(OSC_ROLL)
i.commit()
i.datalogger_stop()
time.sleep(5)
# Could also save to a file then use datalogger_upload(), but grabbing the data directly
# from the network is cleaner
i.datalogger_start_single(filetype='net')
while True:
ch, idx, d = i.datalogger_get_samples(timeout=5)
print "Received samples %d to %d from channel %d" % (idx, idx + len(d), ch)
except NoDataException as e:
# This will be raised if we try and get samples but the session has finished.
print e
except Exception as e:
print traceback.format_exc()
finally:
i.datalogger_stop()
m.close()
|
|
0c6930f1930dbba66ba928dab4ed195e6b6bf2cc
|
addons/crm/__terp__.py
|
addons/crm/__terp__.py
|
{
"name" : "Customer & Supplier Relationship Management",
"version" : "1.0",
"author" : "Tiny",
"website" : "http://tinyerp.com/module_crm.html",
"category" : "Generic Modules/CRM & SRM",
"description": """The Tiny ERP case and request tracker enables a group of
people to intelligently and efficiently manage tasks, issues, and requests.
It manages key tasks such as communication, identification, prioritization,
assignment, resolution and notification.
Tiny ERP ensures that all cases are successfly tracked by users, customers and
suppliers. It can automatically send reminders, escalate the request, trigger
specific methods and lots of others actions based on your enterprise own rules.
The greatest thing about this system is that users don't need to do anything
special. They can just send email to the request tracker. Tiny ERP will take
care of thanking them for their message, automatically routing it to the
appropriate staff, and making sure all future correspondence gets to the right
place.
The CRM module has a email gateway for the synchronisation interface
between mails and Tiny ERP.""",
"depends" : ["base", "account"],
"init_xml" : ["crm_data.xml"],
"demo_xml" : ["crm_demo.xml"],
"update_xml" : ["crm_view.xml", "crm_report.xml", "crm_wizard.xml"],
"active": False,
"installable": True
}
|
{
"name" : "Customer & Supplier Relationship Management",
"version" : "1.0",
"author" : "Tiny",
"website" : "http://tinyerp.com/module_crm.html",
"category" : "Generic Modules/CRM & SRM",
"description": """The Tiny ERP case and request tracker enables a group of
people to intelligently and efficiently manage tasks, issues, and requests.
It manages key tasks such as communication, identification, prioritization,
assignment, resolution and notification.
Tiny ERP ensures that all cases are successfly tracked by users, customers and
suppliers. It can automatically send reminders, escalate the request, trigger
specific methods and lots of others actions based on your enterprise own rules.
The greatest thing about this system is that users don't need to do anything
special. They can just send email to the request tracker. Tiny ERP will take
care of thanking them for their message, automatically routing it to the
appropriate staff, and making sure all future correspondence gets to the right
place.
The CRM module has a email gateway for the synchronisation interface
between mails and Tiny ERP.""",
"depends" : ["base", "account"],
"init_xml" : ["crm_data.xml"],
"demo_xml" : ["crm_demo.xml"],
"update_xml" : ["crm_view.xml", "crm_report.xml", "crm_wizard.xml","crm_security.xml"],
"active": False,
"installable": True
}
|
Add crm_security.xml file entry in update_xml section
|
Add crm_security.xml file entry in update_xml section
bzr revid: mga@tinyerp.com-80638551c5a66adf0a49181f6ff6ae283ced3709
|
Python
|
agpl-3.0
|
rahuldhote/odoo,CopeX/odoo,CopeX/odoo,JonathanStein/odoo,colinnewell/odoo,Codefans-fan/odoo,mustafat/odoo-1,shivam1111/odoo,CubicERP/odoo,andreparames/odoo,gdgellatly/OCB1,PongPi/isl-odoo,Nick-OpusVL/odoo,damdam-s/OpenUpgrade,ecosoft-odoo/odoo,andreparames/odoo,grap/OCB,NL66278/OCB,Daniel-CA/odoo,OpusVL/odoo,luistorresm/odoo,diagramsoftware/odoo,klunwebale/odoo,apanju/odoo,colinnewell/odoo,srimai/odoo,cpyou/odoo,apocalypsebg/odoo,Nowheresly/odoo,rowemoore/odoo,makinacorpus/odoo,hip-odoo/odoo,rahuldhote/odoo,alexcuellar/odoo,ramadhane/odoo,dfang/odoo,Adel-Magebinary/odoo,Elico-Corp/odoo_OCB,jolevq/odoopub,Daniel-CA/odoo,draugiskisprendimai/odoo,shivam1111/odoo,hassoon3/odoo,alexcuellar/odoo,brijeshkesariya/odoo,gsmartway/odoo,tvtsoft/odoo8,rgeleta/odoo,waytai/odoo,BT-astauder/odoo,arthru/OpenUpgrade,dllsf/odootest,SerpentCS/odoo,abdellatifkarroum/odoo,tvibliani/odoo,gorjuce/odoo,fjbatresv/odoo,apanju/odoo,dsfsdgsbngfggb/odoo,ujjwalwahi/odoo,hoatle/odoo,damdam-s/OpenUpgrade,fjbatresv/odoo,optima-ict/odoo,hbrunn/OpenUpgrade,fuhongliang/odoo,BT-rmartin/odoo,hip-odoo/odoo,bealdav/OpenUpgrade,erkrishna9/odoo,AuyaJackie/odoo,0k/odoo,rowemoore/odoo,takis/odoo,Noviat/odoo,BT-fgarbely/odoo,Grirrane/odoo,Ernesto99/odoo,pedrobaeza/odoo,florian-dacosta/OpenUpgrade,tarzan0820/odoo,Gitlab11/odoo,ShineFan/odoo,gvb/odoo,jaxkodex/odoo,Elico-Corp/odoo_OCB,joshuajan/odoo,cloud9UG/odoo,tvtsoft/odoo8,camptocamp/ngo-addons-backport,joariasl/odoo,CatsAndDogsbvba/odoo,0k/OpenUpgrade,massot/odoo,nuuuboo/odoo,kittiu/odoo,ApuliaSoftware/odoo,ApuliaSoftware/odoo,VielSoft/odoo,numerigraphe/odoo,gorjuce/odoo,avoinsystems/odoo,KontorConsulting/odoo,elmerdpadilla/iv,sinbazhou/odoo,aviciimaxwell/odoo,joariasl/odoo,joshuajan/odoo,rubencabrera/odoo,papouso/odoo,abenzbiria/clients_odoo,Gitlab11/odoo,bkirui/odoo,spadae22/odoo,ShineFan/odoo,OpenPymeMx/OCB,hbrunn/OpenUpgrade,minhtuancn/odoo,ChanduERP/odoo,0k/odoo,dalegregory/odoo,stephen144/odoo,fuhongliang/odoo,guerrerocarlos/odoo,srimai/odoo,stephen144/odoo,shaufi/odoo,n0m4dz/odoo,erkrishna9/odoo,simongoffin/website_version,juanalfonsopr/odoo,rdeheele/odoo,colinnewell/odoo,mszewczy/odoo,leoliujie/odoo,codekaki/odoo,dezynetechnologies/odoo,ThinkOpen-Solutions/odoo,lgscofield/odoo,ramadhane/odoo,mmbtba/odoo,ShineFan/odoo,fevxie/odoo,osvalr/odoo,bobisme/odoo,stonegithubs/odoo,BT-ojossen/odoo,provaleks/o8,jesramirez/odoo,xujb/odoo,virgree/odoo,tvibliani/odoo,Grirrane/odoo,abenzbiria/clients_odoo,mmbtba/odoo,Drooids/odoo,credativUK/OCB,fuselock/odoo,synconics/odoo,arthru/OpenUpgrade,bguillot/OpenUpgrade,QianBIG/odoo,jesramirez/odoo,ecosoft-odoo/odoo,abdellatifkarroum/odoo,idncom/odoo,nuncjo/odoo,bwrsandman/OpenUpgrade,lombritz/odoo,dkubiak789/odoo,ingadhoc/odoo,funkring/fdoo,CatsAndDogsbvba/odoo,jiachenning/odoo,waytai/odoo,inspyration/odoo,stonegithubs/odoo,oihane/odoo,goliveirab/odoo,podemos-info/odoo,BT-rmartin/odoo,salaria/odoo,chiragjogi/odoo,bkirui/odoo,jiachenning/odoo,datenbetrieb/odoo,provaleks/o8,nuuuboo/odoo,collex100/odoo,goliveirab/odoo,collex100/odoo,Noviat/odoo,pedrobaeza/OpenUpgrade,ubic135/odoo-design,papouso/odoo,fevxie/odoo,omprakasha/odoo,ujjwalwahi/odoo,florentx/OpenUpgrade,xujb/odoo,dariemp/odoo,oliverhr/odoo,BT-ojossen/odoo,FlorianLudwig/odoo,BT-ojossen/odoo,ingadhoc/odoo,AuyaJackie/odoo,shivam1111/odoo,fuhongliang/odoo,srsman/odoo,ramitalat/odoo,hmen89/odoo,0k/odoo,hubsaysnuaa/odoo,salaria/odoo,OpenUpgrade-dev/OpenUpgrade,highco-groupe/odoo,lsinfo/odoo,ShineFan/odoo,ThinkOpen-Solutions/odoo,storm-computers/odoo,bakhtout/odoo-educ,jolevq/odoopub,rschnapka/odoo,acshan/odoo,jfpla/odoo,Adel-Magebinary/odoo,dezynetechnologies/odoo,cedk/odoo,dllsf/odootest,prospwro/odoo,hbrunn/OpenUpgrade,frouty/odoo_oph,hanicker/odoo,BT-fgarbely/odoo,factorlibre/OCB,grap/OCB,elmerdpadilla/iv,janocat/odoo,n0m4dz/odoo,virgree/odoo,shingonoide/odoo,juanalfonsopr/odoo,Nowheresly/odoo,collex100/odoo,jiangzhixiao/odoo,factorlibre/OCB,feroda/odoo,damdam-s/OpenUpgrade,jusdng/odoo,rowemoore/odoo,nuncjo/odoo,savoirfairelinux/odoo,gsmartway/odoo,inspyration/odoo,Antiun/odoo,Danisan/odoo-1,TRESCLOUD/odoopub,0k/OpenUpgrade,janocat/odoo,microcom/odoo,dfang/odoo,rschnapka/odoo,bakhtout/odoo-educ,NL66278/OCB,guerrerocarlos/odoo,abdellatifkarroum/odoo,gdgellatly/OCB1,frouty/odoogoeen,odoousers2014/odoo,ramadhane/odoo,salaria/odoo,Maspear/odoo,Kilhog/odoo,guerrerocarlos/odoo,osvalr/odoo,PongPi/isl-odoo,sadleader/odoo,patmcb/odoo,xujb/odoo,hubsaysnuaa/odoo,luistorresm/odoo,ChanduERP/odoo,rgeleta/odoo,numerigraphe/odoo,chiragjogi/odoo,xzYue/odoo,Drooids/odoo,dfang/odoo,glovebx/odoo,dfang/odoo,ihsanudin/odoo,shaufi/odoo,BT-ojossen/odoo,provaleks/o8,Grirrane/odoo,optima-ict/odoo,cpyou/odoo,dariemp/odoo,nhomar/odoo-mirror,pedrobaeza/OpenUpgrade,abdellatifkarroum/odoo,tinkhaven-organization/odoo,luistorresm/odoo,xujb/odoo,nuuuboo/odoo,Ernesto99/odoo,demon-ru/iml-crm,cedk/odoo,BT-rmartin/odoo,nexiles/odoo,Eric-Zhong/odoo,n0m4dz/odoo,havt/odoo,RafaelTorrealba/odoo,jpshort/odoo,wangjun/odoo,cysnake4713/odoo,tinkhaven-organization/odoo,javierTerry/odoo,shingonoide/odoo,havt/odoo,tvibliani/odoo,alexcuellar/odoo,ccomb/OpenUpgrade,hubsaysnuaa/odoo,elmerdpadilla/iv,bobisme/odoo,rgeleta/odoo,diagramsoftware/odoo,jfpla/odoo,Danisan/odoo-1,vrenaville/ngo-addons-backport,nexiles/odoo,wangjun/odoo,hopeall/odoo,waytai/odoo,oliverhr/odoo,cysnake4713/odoo,GauravSahu/odoo,bakhtout/odoo-educ,wangjun/odoo,bplancher/odoo,nitinitprof/odoo,pedrobaeza/OpenUpgrade,nuuuboo/odoo,papouso/odoo,OpenPymeMx/OCB,bguillot/OpenUpgrade,pplatek/odoo,kifcaliph/odoo,addition-it-solutions/project-all,nexiles/odoo,CubicERP/odoo,stephen144/odoo,nuncjo/odoo,oihane/odoo,gavin-feng/odoo,addition-it-solutions/project-all,avoinsystems/odoo,Ernesto99/odoo,oasiswork/odoo,hanicker/odoo,osvalr/odoo,sergio-incaser/odoo,optima-ict/odoo,nuncjo/odoo,Adel-Magebinary/odoo,oihane/odoo,mszewczy/odoo,grap/OpenUpgrade,funkring/fdoo,JonathanStein/odoo,tinkerthaler/odoo,dezynetechnologies/odoo,colinnewell/odoo,slevenhagen/odoo,charbeljc/OCB,rubencabrera/odoo,slevenhagen/odoo,OSSESAC/odoopubarquiluz,ehirt/odoo,JonathanStein/odoo,abdellatifkarroum/odoo,cdrooom/odoo,jiachenning/odoo,Nick-OpusVL/odoo,cysnake4713/odoo,lombritz/odoo,rdeheele/odoo,hassoon3/odoo,glovebx/odoo,RafaelTorrealba/odoo,Maspear/odoo,JonathanStein/odoo,Maspear/odoo,apocalypsebg/odoo,camptocamp/ngo-addons-backport,stonegithubs/odoo,dariemp/odoo,mlaitinen/odoo,leorochael/odoo,fevxie/odoo,bobisme/odoo,FlorianLudwig/odoo,FlorianLudwig/odoo,demon-ru/iml-crm,bobisme/odoo,luistorresm/odoo,credativUK/OCB,Kilhog/odoo,nagyistoce/odoo-dev-odoo,wangjun/odoo,ubic135/odoo-design,bkirui/odoo,bwrsandman/OpenUpgrade,ehirt/odoo,ApuliaSoftware/odoo,optima-ict/odoo,x111ong/odoo,GauravSahu/odoo,avoinsystems/odoo,grap/OpenUpgrade,codekaki/odoo,xujb/odoo,luiseduardohdbackup/odoo,joariasl/odoo,Adel-Magebinary/odoo,mszewczy/odoo,alqfahad/odoo,pplatek/odoo,massot/odoo,GauravSahu/odoo,shingonoide/odoo,SAM-IT-SA/odoo,Bachaco-ve/odoo,Maspear/odoo,frouty/odoogoeen,sysadminmatmoz/OCB,savoirfairelinux/OpenUpgrade,arthru/OpenUpgrade,VitalPet/odoo,dllsf/odootest,Bachaco-ve/odoo,jaxkodex/odoo,omprakasha/odoo,lsinfo/odoo,leorochael/odoo,leorochael/odoo,incaser/odoo-odoo,leoliujie/odoo,oliverhr/odoo,nuncjo/odoo,grap/OCB,slevenhagen/odoo-npg,microcom/odoo,hip-odoo/odoo,spadae22/odoo,fdvarela/odoo8,ramitalat/odoo,dgzurita/odoo,takis/odoo,vnsofthe/odoo,ovnicraft/odoo,microcom/odoo,mkieszek/odoo,mustafat/odoo-1,fuhongliang/odoo,fossoult/odoo,bealdav/OpenUpgrade,sebalix/OpenUpgrade,cpyou/odoo,windedge/odoo,tvtsoft/odoo8,ChanduERP/odoo,goliveirab/odoo,kirca/OpenUpgrade,hifly/OpenUpgrade,acshan/odoo,hoatle/odoo,OpenUpgrade/OpenUpgrade,rschnapka/odoo,Danisan/odoo-1,jusdng/odoo,rowemoore/odoo,jfpla/odoo,rschnapka/odoo,SAM-IT-SA/odoo,srsman/odoo,jpshort/odoo,lgscofield/odoo,rschnapka/odoo,sysadminmatmoz/OCB,factorlibre/OCB,vrenaville/ngo-addons-backport,ubic135/odoo-design,omprakasha/odoo,steedos/odoo,acshan/odoo,savoirfairelinux/OpenUpgrade,jesramirez/odoo,simongoffin/website_version,Antiun/odoo,sysadminmatmoz/OCB,jiachenning/odoo,Endika/OpenUpgrade,florentx/OpenUpgrade,havt/odoo,nuncjo/odoo,avoinsystems/odoo,bealdav/OpenUpgrade,zchking/odoo,steedos/odoo,tarzan0820/odoo,sve-odoo/odoo,hopeall/odoo,tinkerthaler/odoo,cysnake4713/odoo,sve-odoo/odoo,odooindia/odoo,apanju/GMIO_Odoo,NeovaHealth/odoo,mlaitinen/odoo,srsman/odoo,TRESCLOUD/odoopub,fossoult/odoo,jiangzhixiao/odoo,makinacorpus/odoo,apanju/GMIO_Odoo,Codefans-fan/odoo,luiseduardohdbackup/odoo,jeasoft/odoo,blaggacao/OpenUpgrade,csrocha/OpenUpgrade,nhomar/odoo-mirror,naousse/odoo,ygol/odoo,Noviat/odoo,PongPi/isl-odoo,papouso/odoo,pplatek/odoo,bkirui/odoo,n0m4dz/odoo,xzYue/odoo,poljeff/odoo,numerigraphe/odoo,odoousers2014/odoo,gsmartway/odoo,odooindia/odoo,javierTerry/odoo,codekaki/odoo,vnsofthe/odoo,laslabs/odoo,ClearCorp-dev/odoo,ramadhane/odoo,hassoon3/odoo,luiseduardohdbackup/odoo,Antiun/odoo,csrocha/OpenUpgrade,pplatek/odoo,makinacorpus/odoo,shingonoide/odoo,lightcn/odoo,kifcaliph/odoo,fossoult/odoo,brijeshkesariya/odoo,oasiswork/odoo,cloud9UG/odoo,OSSESAC/odoopubarquiluz,BT-fgarbely/odoo,kirca/OpenUpgrade,Danisan/odoo-1,lombritz/odoo,zchking/odoo,bguillot/OpenUpgrade,jiangzhixiao/odoo,frouty/odoogoeen,optima-ict/odoo,diagramsoftware/odoo,hbrunn/OpenUpgrade,rahuldhote/odoo,alhashash/odoo,kirca/OpenUpgrade,NeovaHealth/odoo,bakhtout/odoo-educ,nhomar/odoo-mirror,guerrerocarlos/odoo,grap/OCB,spadae22/odoo,cdrooom/odoo,fgesora/odoo,draugiskisprendimai/odoo,gvb/odoo,jolevq/odoopub,leoliujie/odoo,synconics/odoo,gdgellatly/OCB1,lgscofield/odoo,aviciimaxwell/odoo,glovebx/odoo,mmbtba/odoo,Nick-OpusVL/odoo,Bachaco-ve/odoo,realsaiko/odoo,matrixise/odoo,odooindia/odoo,MarcosCommunity/odoo,storm-computers/odoo,nhomar/odoo,doomsterinc/odoo,ClearCorp-dev/odoo,srimai/odoo,Grirrane/odoo,alhashash/odoo,frouty/odoo_oph,syci/OCB,chiragjogi/odoo,Ernesto99/odoo,windedge/odoo,dkubiak789/odoo,dalegregory/odoo,Ernesto99/odoo,tarzan0820/odoo,dsfsdgsbngfggb/odoo,gorjuce/odoo,gsmartway/odoo,bguillot/OpenUpgrade,fossoult/odoo,odootr/odoo,codekaki/odoo,patmcb/odoo,blaggacao/OpenUpgrade,guewen/OpenUpgrade,funkring/fdoo,odoousers2014/odoo,bkirui/odoo,florian-dacosta/OpenUpgrade,zchking/odoo,codekaki/odoo,camptocamp/ngo-addons-backport,glovebx/odoo,fossoult/odoo,Drooids/odoo,lgscofield/odoo,rgeleta/odoo,slevenhagen/odoo,dkubiak789/odoo,nuncjo/odoo,frouty/odoogoeen,grap/OpenUpgrade,poljeff/odoo,waytai/odoo,hassoon3/odoo,Eric-Zhong/odoo,makinacorpus/odoo,sebalix/OpenUpgrade,collex100/odoo,jaxkodex/odoo,odoo-turkiye/odoo,0k/odoo,dfang/odoo,fgesora/odoo,0k/odoo,Kilhog/odoo,ujjwalwahi/odoo,alexteodor/odoo,Daniel-CA/odoo,rahuldhote/odoo,oliverhr/odoo,VitalPet/odoo,minhtuancn/odoo,gvb/odoo,alqfahad/odoo,sysadminmatmoz/OCB,doomsterinc/odoo,nhomar/odoo,kybriainfotech/iSocioCRM,andreparames/odoo,fjbatresv/odoo,diagramsoftware/odoo,OpusVL/odoo,VitalPet/odoo,BT-fgarbely/odoo,draugiskisprendimai/odoo,abdellatifkarroum/odoo,abstract-open-solutions/OCB,xzYue/odoo,feroda/odoo,dalegregory/odoo,BT-ojossen/odoo,TRESCLOUD/odoopub,ccomb/OpenUpgrade,stephen144/odoo,Endika/odoo,takis/odoo,Codefans-fan/odoo,bplancher/odoo,SerpentCS/odoo,chiragjogi/odoo,damdam-s/OpenUpgrade,odootr/odoo,eino-makitalo/odoo,podemos-info/odoo,eino-makitalo/odoo,kittiu/odoo,fevxie/odoo,doomsterinc/odoo,NL66278/OCB,Elico-Corp/odoo_OCB,alhashash/odoo,hopeall/odoo,gorjuce/odoo,janocat/odoo,odoo-turkiye/odoo,csrocha/OpenUpgrade,alqfahad/odoo,ihsanudin/odoo,vnsofthe/odoo,odoo-turkiye/odoo,Danisan/odoo-1,shaufi/odoo,juanalfonsopr/odoo,abdellatifkarroum/odoo,hifly/OpenUpgrade,AuyaJackie/odoo,luiseduardohdbackup/odoo,odooindia/odoo,Codefans-fan/odoo,guewen/OpenUpgrade,sv-dev1/odoo,poljeff/odoo,oihane/odoo,nagyistoce/odoo-dev-odoo,PongPi/isl-odoo,CubicERP/odoo,shaufi/odoo,jeasoft/odoo,andreparames/odoo,grap/OCB,laslabs/odoo,GauravSahu/odoo,laslabs/odoo,salaria/odoo,diagramsoftware/odoo,pedrobaeza/OpenUpgrade,kifcaliph/odoo,fevxie/odoo,tvtsoft/odoo8,Nick-OpusVL/odoo,lightcn/odoo,shivam1111/odoo,laslabs/odoo,alexcuellar/odoo,abstract-open-solutions/OCB,abstract-open-solutions/OCB,feroda/odoo,ubic135/odoo-design,MarcosCommunity/odoo,tarzan0820/odoo,thanhacun/odoo,gavin-feng/odoo,dariemp/odoo,rahuldhote/odoo,addition-it-solutions/project-all,damdam-s/OpenUpgrade,sv-dev1/odoo,jpshort/odoo,ovnicraft/odoo,ApuliaSoftware/odoo,draugiskisprendimai/odoo,savoirfairelinux/OpenUpgrade,sv-dev1/odoo,hoatle/odoo,poljeff/odoo,papouso/odoo,windedge/odoo,jpshort/odoo,guewen/OpenUpgrade,camptocamp/ngo-addons-backport,jfpla/odoo,ehirt/odoo,florian-dacosta/OpenUpgrade,CubicERP/odoo,dezynetechnologies/odoo,mkieszek/odoo,christophlsa/odoo,markeTIC/OCB,Gitlab11/odoo,rowemoore/odoo,ramitalat/odoo,avoinsystems/odoo,bguillot/OpenUpgrade,shaufi/odoo,hubsaysnuaa/odoo,OpusVL/odoo,Nick-OpusVL/odoo,Eric-Zhong/odoo,nagyistoce/odoo-dev-odoo,poljeff/odoo,mszewczy/odoo,acshan/odoo,slevenhagen/odoo-npg,CopeX/odoo,CatsAndDogsbvba/odoo,massot/odoo,csrocha/OpenUpgrade,cdrooom/odoo,cedk/odoo,cloud9UG/odoo,klunwebale/odoo,bakhtout/odoo-educ,camptocamp/ngo-addons-backport,hip-odoo/odoo,luiseduardohdbackup/odoo,bakhtout/odoo-educ,addition-it-solutions/project-all,tvtsoft/odoo8,QianBIG/odoo,juanalfonsopr/odoo,fossoult/odoo,dsfsdgsbngfggb/odoo,dariemp/odoo,Ichag/odoo,fuhongliang/odoo,rgeleta/odoo,minhtuancn/odoo,nuuuboo/odoo,OSSESAC/odoopubarquiluz,addition-it-solutions/project-all,RafaelTorrealba/odoo,nagyistoce/odoo-dev-odoo,kittiu/odoo,ingadhoc/odoo,doomsterinc/odoo,aviciimaxwell/odoo,apanju/GMIO_Odoo,demon-ru/iml-crm,ApuliaSoftware/odoo,bwrsandman/OpenUpgrade,CatsAndDogsbvba/odoo,osvalr/odoo,blaggacao/OpenUpgrade,synconics/odoo,frouty/odoo_oph,klunwebale/odoo,ecosoft-odoo/odoo,bwrsandman/OpenUpgrade,Endika/odoo,lsinfo/odoo,n0m4dz/odoo,alexcuellar/odoo,collex100/odoo,podemos-info/odoo,numerigraphe/odoo,BT-ojossen/odoo,pedrobaeza/odoo,dkubiak789/odoo,klunwebale/odoo,markeTIC/OCB,brijeshkesariya/odoo,guerrerocarlos/odoo,simongoffin/website_version,cedk/odoo,mlaitinen/odoo,Endika/odoo,ojengwa/odoo,ramadhane/odoo,camptocamp/ngo-addons-backport,sinbazhou/odoo,mvaled/OpenUpgrade,florian-dacosta/OpenUpgrade,bguillot/OpenUpgrade,Eric-Zhong/odoo,GauravSahu/odoo,charbeljc/OCB,SerpentCS/odoo,brijeshkesariya/odoo,tarzan0820/odoo,CubicERP/odoo,MarcosCommunity/odoo,Noviat/odoo,BT-rmartin/odoo,slevenhagen/odoo-npg,hip-odoo/odoo,Noviat/odoo,fgesora/odoo,wangjun/odoo,ChanduERP/odoo,alqfahad/odoo,Endika/OpenUpgrade,camptocamp/ngo-addons-backport,ramitalat/odoo,x111ong/odoo,shaufi10/odoo,ihsanudin/odoo,sve-odoo/odoo,aviciimaxwell/odoo,OpenUpgrade/OpenUpgrade,osvalr/odoo,datenbetrieb/odoo,zchking/odoo,VielSoft/odoo,mmbtba/odoo,windedge/odoo,SAM-IT-SA/odoo,Ichag/odoo,dsfsdgsbngfggb/odoo,grap/OpenUpgrade,nexiles/odoo,virgree/odoo,odoo-turkiye/odoo,alhashash/odoo,Endika/OpenUpgrade,Endika/odoo,nhomar/odoo,xujb/odoo,lombritz/odoo,naousse/odoo,fuselock/odoo,VielSoft/odoo,BT-fgarbely/odoo,highco-groupe/odoo,simongoffin/website_version,ccomb/OpenUpgrade,hopeall/odoo,odootr/odoo,thanhacun/odoo,glovebx/odoo,Codefans-fan/odoo,hanicker/odoo,srimai/odoo,OpenUpgrade-dev/OpenUpgrade,papouso/odoo,Nowheresly/odoo,dgzurita/odoo,slevenhagen/odoo,ojengwa/odoo,tvtsoft/odoo8,ojengwa/odoo,matrixise/odoo,virgree/odoo,alqfahad/odoo,rahuldhote/odoo,jfpla/odoo,vrenaville/ngo-addons-backport,javierTerry/odoo,credativUK/OCB,BT-rmartin/odoo,minhtuancn/odoo,Elico-Corp/odoo_OCB,prospwro/odoo,doomsterinc/odoo,mustafat/odoo-1,highco-groupe/odoo,alexteodor/odoo,mszewczy/odoo,blaggacao/OpenUpgrade,jaxkodex/odoo,stonegithubs/odoo,JCA-Developpement/Odoo,luistorresm/odoo,kittiu/odoo,Nowheresly/odoo,apocalypsebg/odoo,factorlibre/OCB,ojengwa/odoo,papouso/odoo,fuselock/odoo,christophlsa/odoo,csrocha/OpenUpgrade,realsaiko/odoo,havt/odoo,jeasoft/odoo,vrenaville/ngo-addons-backport,Gitlab11/odoo,mmbtba/odoo,tangyiyong/odoo,spadae22/odoo,cpyou/odoo,codekaki/odoo,joshuajan/odoo,agrista/odoo-saas,mvaled/OpenUpgrade,srsman/odoo,n0m4dz/odoo,synconics/odoo,QianBIG/odoo,takis/odoo,abenzbiria/clients_odoo,shivam1111/odoo,ChanduERP/odoo,blaggacao/OpenUpgrade,mvaled/OpenUpgrade,charbeljc/OCB,QianBIG/odoo,dalegregory/odoo,numerigraphe/odoo,shaufi10/odoo,ShineFan/odoo,fjbatresv/odoo,ingadhoc/odoo,VielSoft/odoo,ramitalat/odoo,lombritz/odoo,sergio-incaser/odoo,windedge/odoo,Endika/odoo,brijeshkesariya/odoo,ClearCorp-dev/odoo,RafaelTorrealba/odoo,virgree/odoo,charbeljc/OCB,srimai/odoo,sv-dev1/odoo,factorlibre/OCB,savoirfairelinux/OpenUpgrade,agrista/odoo-saas,nhomar/odoo-mirror,acshan/odoo,FlorianLudwig/odoo,deKupini/erp,mlaitinen/odoo,ThinkOpen-Solutions/odoo,shaufi/odoo,bwrsandman/OpenUpgrade,idncom/odoo,cloud9UG/odoo,Ichag/odoo,BT-astauder/odoo,nitinitprof/odoo,nuuuboo/odoo,nitinitprof/odoo,wangjun/odoo,massot/odoo,kirca/OpenUpgrade,gavin-feng/odoo,Endika/OpenUpgrade,KontorConsulting/odoo,damdam-s/OpenUpgrade,andreparames/odoo,windedge/odoo,dariemp/odoo,funkring/fdoo,tangyiyong/odoo,sebalix/OpenUpgrade,sinbazhou/odoo,OpenPymeMx/OCB,steedos/odoo,GauravSahu/odoo,omprakasha/odoo,OpenUpgrade-dev/OpenUpgrade,odootr/odoo,alexcuellar/odoo,tinkhaven-organization/odoo,JonathanStein/odoo,hifly/OpenUpgrade,BT-rmartin/odoo,syci/OCB,rdeheele/odoo,ingadhoc/odoo,savoirfairelinux/OpenUpgrade,agrista/odoo-saas,Noviat/odoo,tinkerthaler/odoo,bealdav/OpenUpgrade,pplatek/odoo,podemos-info/odoo,srimai/odoo,NeovaHealth/odoo,FlorianLudwig/odoo,joariasl/odoo,fevxie/odoo,Endika/OpenUpgrade,sadleader/odoo,aviciimaxwell/odoo,laslabs/odoo,Nick-OpusVL/odoo,dgzurita/odoo,OpenUpgrade-dev/OpenUpgrade,mkieszek/odoo,KontorConsulting/odoo,Endika/OpenUpgrade,hoatle/odoo,thanhacun/odoo,draugiskisprendimai/odoo,nhomar/odoo,gavin-feng/odoo,Codefans-fan/odoo,pedrobaeza/odoo,florian-dacosta/OpenUpgrade,slevenhagen/odoo,GauravSahu/odoo,charbeljc/OCB,dezynetechnologies/odoo,datenbetrieb/odoo,ihsanudin/odoo,cloud9UG/odoo,CubicERP/odoo,Nowheresly/odoo,provaleks/o8,Ernesto99/odoo,lightcn/odoo,zchking/odoo,mszewczy/odoo,tangyiyong/odoo,sve-odoo/odoo,abenzbiria/clients_odoo,hifly/OpenUpgrade,fuselock/odoo,storm-computers/odoo,collex100/odoo,mlaitinen/odoo,hmen89/odoo,JCA-Developpement/Odoo,ChanduERP/odoo,inspyration/odoo,mkieszek/odoo,nagyistoce/odoo-dev-odoo,incaser/odoo-odoo,bkirui/odoo,shaufi10/odoo,goliveirab/odoo,ShineFan/odoo,kybriainfotech/iSocioCRM,pedrobaeza/OpenUpgrade,bealdav/OpenUpgrade,ThinkOpen-Solutions/odoo,AuyaJackie/odoo,FlorianLudwig/odoo,christophlsa/odoo,shingonoide/odoo,dgzurita/odoo,cedk/odoo,QianBIG/odoo,stonegithubs/odoo,tangyiyong/odoo,BT-ojossen/odoo,idncom/odoo,agrista/odoo-saas,rowemoore/odoo,rschnapka/odoo,shaufi10/odoo,lombritz/odoo,factorlibre/OCB,luiseduardohdbackup/odoo,rubencabrera/odoo,cysnake4713/odoo,florentx/OpenUpgrade,dgzurita/odoo,Daniel-CA/odoo,patmcb/odoo,andreparames/odoo,ehirt/odoo,takis/odoo,hanicker/odoo,microcom/odoo,0k/OpenUpgrade,mvaled/OpenUpgrade,OpenPymeMx/OCB,Maspear/odoo,dkubiak789/odoo,kifcaliph/odoo,zchking/odoo,andreparames/odoo,srimai/odoo,colinnewell/odoo,odoo-turkiye/odoo,colinnewell/odoo,0k/OpenUpgrade,funkring/fdoo,christophlsa/odoo,leoliujie/odoo,guewen/OpenUpgrade,janocat/odoo,naousse/odoo,javierTerry/odoo,Eric-Zhong/odoo,incaser/odoo-odoo,matrixise/odoo,tinkhaven-organization/odoo,Ichag/odoo,ojengwa/odoo,microcom/odoo,sinbazhou/odoo,realsaiko/odoo,patmcb/odoo,apocalypsebg/odoo,savoirfairelinux/odoo,erkrishna9/odoo,grap/OpenUpgrade,stephen144/odoo,sv-dev1/odoo,OSSESAC/odoopubarquiluz,luistorresm/odoo,csrocha/OpenUpgrade,MarcosCommunity/odoo,tarzan0820/odoo,ramitalat/odoo,sergio-incaser/odoo,zchking/odoo,alexteodor/odoo,diagramsoftware/odoo,optima-ict/odoo,aviciimaxwell/odoo,hassoon3/odoo,doomsterinc/odoo,bplancher/odoo,thanhacun/odoo,sebalix/OpenUpgrade,AuyaJackie/odoo,OpenPymeMx/OCB,apanju/odoo,gorjuce/odoo,kittiu/odoo,ygol/odoo,Nowheresly/odoo,arthru/OpenUpgrade,apanju/GMIO_Odoo,Kilhog/odoo,odoo-turkiye/odoo,agrista/odoo-saas,dalegregory/odoo,alqfahad/odoo,bealdav/OpenUpgrade,RafaelTorrealba/odoo,Daniel-CA/odoo,Antiun/odoo,syci/OCB,virgree/odoo,dezynetechnologies/odoo,Antiun/odoo,PongPi/isl-odoo,OSSESAC/odoopubarquiluz,rdeheele/odoo,osvalr/odoo,rschnapka/odoo,florentx/OpenUpgrade,Danisan/odoo-1,erkrishna9/odoo,PongPi/isl-odoo,provaleks/o8,OpenPymeMx/OCB,christophlsa/odoo,n0m4dz/odoo,leorochael/odoo,SAM-IT-SA/odoo,xzYue/odoo,odootr/odoo,oasiswork/odoo,pedrobaeza/odoo,apanju/GMIO_Odoo,cpyou/odoo,ujjwalwahi/odoo,storm-computers/odoo,makinacorpus/odoo,acshan/odoo,ThinkOpen-Solutions/odoo,colinnewell/odoo,kybriainfotech/iSocioCRM,hoatle/odoo,thanhacun/odoo,provaleks/o8,shaufi10/odoo,jiangzhixiao/odoo,hopeall/odoo,jiangzhixiao/odoo,ClearCorp-dev/odoo,Adel-Magebinary/odoo,waytai/odoo,janocat/odoo,JGarcia-Panach/odoo,bkirui/odoo,takis/odoo,minhtuancn/odoo,simongoffin/website_version,rahuldhote/odoo,markeTIC/OCB,codekaki/odoo,apanju/odoo,nhomar/odoo,vrenaville/ngo-addons-backport,brijeshkesariya/odoo,Gitlab11/odoo,odootr/odoo,incaser/odoo-odoo,tangyiyong/odoo,grap/OCB,grap/OpenUpgrade,fjbatresv/odoo,tarzan0820/odoo,SerpentCS/odoo,ecosoft-odoo/odoo,kifcaliph/odoo,xzYue/odoo,jaxkodex/odoo,ubic135/odoo-design,mustafat/odoo-1,vnsofthe/odoo,patmcb/odoo,dariemp/odoo,odootr/odoo,joshuajan/odoo,VitalPet/odoo,dllsf/odootest,minhtuancn/odoo,odooindia/odoo,florentx/OpenUpgrade,SerpentCS/odoo,jesramirez/odoo,arthru/OpenUpgrade,sebalix/OpenUpgrade,Grirrane/odoo,vrenaville/ngo-addons-backport,tinkhaven-organization/odoo,bplancher/odoo,MarcosCommunity/odoo,vnsofthe/odoo,hubsaysnuaa/odoo,pedrobaeza/OpenUpgrade,dalegregory/odoo,hubsaysnuaa/odoo,JCA-Developpement/Odoo,savoirfairelinux/odoo,dkubiak789/odoo,slevenhagen/odoo-npg,Drooids/odoo,charbeljc/OCB,joariasl/odoo,JonathanStein/odoo,matrixise/odoo,Adel-Magebinary/odoo,MarcosCommunity/odoo,jiachenning/odoo,incaser/odoo-odoo,hmen89/odoo,sadleader/odoo,apanju/GMIO_Odoo,Maspear/odoo,Bachaco-ve/odoo,BT-astauder/odoo,goliveirab/odoo,realsaiko/odoo,ehirt/odoo,diagramsoftware/odoo,pedrobaeza/odoo,hbrunn/OpenUpgrade,sysadminmatmoz/OCB,x111ong/odoo,Endika/odoo,arthru/OpenUpgrade,VitalPet/odoo,provaleks/o8,Elico-Corp/odoo_OCB,takis/odoo,cloud9UG/odoo,nexiles/odoo,SerpentCS/odoo,frouty/odoogoeen,hoatle/odoo,CatsAndDogsbvba/odoo,Antiun/odoo,joshuajan/odoo,sadleader/odoo,OpenUpgrade/OpenUpgrade,mszewczy/odoo,fgesora/odoo,ecosoft-odoo/odoo,demon-ru/iml-crm,christophlsa/odoo,apocalypsebg/odoo,avoinsystems/odoo,tinkhaven-organization/odoo,stonegithubs/odoo,synconics/odoo,fgesora/odoo,gavin-feng/odoo,vrenaville/ngo-addons-backport,incaser/odoo-odoo,SAM-IT-SA/odoo,srsman/odoo,VitalPet/odoo,dsfsdgsbngfggb/odoo,OpenPymeMx/OCB,feroda/odoo,alexteodor/odoo,hanicker/odoo,PongPi/isl-odoo,ovnicraft/odoo,naousse/odoo,highco-groupe/odoo,bakhtout/odoo-educ,BT-astauder/odoo,Bachaco-ve/odoo,xzYue/odoo,x111ong/odoo,rubencabrera/odoo,thanhacun/odoo,lgscofield/odoo,odoousers2014/odoo,VielSoft/odoo,ccomb/OpenUpgrade,Danisan/odoo-1,spadae22/odoo,stonegithubs/odoo,OpenUpgrade/OpenUpgrade,draugiskisprendimai/odoo,bobisme/odoo,spadae22/odoo,jolevq/odoopub,synconics/odoo,sadleader/odoo,lightcn/odoo,microcom/odoo,storm-computers/odoo,gavin-feng/odoo,sv-dev1/odoo,OpenUpgrade/OpenUpgrade,alhashash/odoo,hmen89/odoo,tangyiyong/odoo,waytai/odoo,oasiswork/odoo,abenzbiria/clients_odoo,mmbtba/odoo,odoo-turkiye/odoo,eino-makitalo/odoo,mlaitinen/odoo,Ichag/odoo,osvalr/odoo,syci/OCB,x111ong/odoo,gsmartway/odoo,jeasoft/odoo,windedge/odoo,eino-makitalo/odoo,apocalypsebg/odoo,Adel-Magebinary/odoo,hoatle/odoo,chiragjogi/odoo,RafaelTorrealba/odoo,srsman/odoo,jfpla/odoo,shingonoide/odoo,x111ong/odoo,mmbtba/odoo,KontorConsulting/odoo,CopeX/odoo,cedk/odoo,juanalfonsopr/odoo,ApuliaSoftware/odoo,nitinitprof/odoo,gdgellatly/OCB1,salaria/odoo,slevenhagen/odoo-npg,ujjwalwahi/odoo,jusdng/odoo,OpenUpgrade/OpenUpgrade,virgree/odoo,Daniel-CA/odoo,syci/OCB,nhomar/odoo,dgzurita/odoo,nagyistoce/odoo-dev-odoo,deKupini/erp,fevxie/odoo,omprakasha/odoo,savoirfairelinux/odoo,Elico-Corp/odoo_OCB,csrocha/OpenUpgrade,Ichag/odoo,Drooids/odoo,bobisme/odoo,prospwro/odoo,gavin-feng/odoo,gvb/odoo,bwrsandman/OpenUpgrade,jfpla/odoo,steedos/odoo,gsmartway/odoo,ojengwa/odoo,eino-makitalo/odoo,havt/odoo,incaser/odoo-odoo,Gitlab11/odoo,mlaitinen/odoo,hopeall/odoo,datenbetrieb/odoo,FlorianLudwig/odoo,ThinkOpen-Solutions/odoo,credativUK/OCB,kittiu/odoo,aviciimaxwell/odoo,salaria/odoo,xujb/odoo,eino-makitalo/odoo,ujjwalwahi/odoo,alqfahad/odoo,kybriainfotech/iSocioCRM,jiachenning/odoo,ecosoft-odoo/odoo,oihane/odoo,jesramirez/odoo,abstract-open-solutions/OCB,factorlibre/OCB,chiragjogi/odoo,oihane/odoo,shaufi10/odoo,Daniel-CA/odoo,deKupini/erp,sergio-incaser/odoo,ovnicraft/odoo,klunwebale/odoo,jpshort/odoo,frouty/odoogoeen,wangjun/odoo,jiangzhixiao/odoo,tinkerthaler/odoo,savoirfairelinux/odoo,odoousers2014/odoo,fjbatresv/odoo,pplatek/odoo,funkring/fdoo,prospwro/odoo,naousse/odoo,NL66278/OCB,CatsAndDogsbvba/odoo,hifly/OpenUpgrade,collex100/odoo,OpenUpgrade-dev/OpenUpgrade,ygol/odoo,idncom/odoo,cdrooom/odoo,joshuajan/odoo,ujjwalwahi/odoo,sinbazhou/odoo,codekaki/odoo,naousse/odoo,0k/OpenUpgrade,deKupini/erp,bwrsandman/OpenUpgrade,steedos/odoo,podemos-info/odoo,klunwebale/odoo,rubencabrera/odoo,xzYue/odoo,odoousers2014/odoo,markeTIC/OCB,fgesora/odoo,grap/OpenUpgrade,erkrishna9/odoo,JGarcia-Panach/odoo,tvibliani/odoo,credativUK/OCB,leorochael/odoo,lombritz/odoo,prospwro/odoo,fuselock/odoo,nitinitprof/odoo,BT-astauder/odoo,Bachaco-ve/odoo,dkubiak789/odoo,javierTerry/odoo,datenbetrieb/odoo,fuselock/odoo,savoirfairelinux/OpenUpgrade,VielSoft/odoo,BT-fgarbely/odoo,gsmartway/odoo,mvaled/OpenUpgrade,fuhongliang/odoo,SAM-IT-SA/odoo,Antiun/odoo,frouty/odoo_oph,alexteodor/odoo,oasiswork/odoo,OpenPymeMx/OCB,dgzurita/odoo,hifly/OpenUpgrade,JGarcia-Panach/odoo,ehirt/odoo,fdvarela/odoo8,Endika/OpenUpgrade,jusdng/odoo,luistorresm/odoo,sinbazhou/odoo,fossoult/odoo,numerigraphe/odoo,Kilhog/odoo,hip-odoo/odoo,elmerdpadilla/iv,JGarcia-Panach/odoo,OSSESAC/odoopubarquiluz,stephen144/odoo,sv-dev1/odoo,VitalPet/odoo,ygol/odoo,inspyration/odoo,datenbetrieb/odoo,markeTIC/OCB,janocat/odoo,NeovaHealth/odoo,shaufi/odoo,oliverhr/odoo,Ernesto99/odoo,ccomb/OpenUpgrade,hassoon3/odoo,ygol/odoo,feroda/odoo,klunwebale/odoo,oihane/odoo,lsinfo/odoo,nuuuboo/odoo,javierTerry/odoo,kirca/OpenUpgrade,camptocamp/ngo-addons-backport,janocat/odoo,QianBIG/odoo,poljeff/odoo,massot/odoo,havt/odoo,juanalfonsopr/odoo,sebalix/OpenUpgrade,sergio-incaser/odoo,javierTerry/odoo,salaria/odoo,gorjuce/odoo,vrenaville/ngo-addons-backport,damdam-s/OpenUpgrade,pplatek/odoo,ygol/odoo,apanju/odoo,kittiu/odoo,hanicker/odoo,hifly/OpenUpgrade,ApuliaSoftware/odoo,srsman/odoo,ccomb/OpenUpgrade,hubsaysnuaa/odoo,mvaled/OpenUpgrade,leorochael/odoo,RafaelTorrealba/odoo,OpenUpgrade/OpenUpgrade,prospwro/odoo,synconics/odoo,jiangzhixiao/odoo,jeasoft/odoo,Codefans-fan/odoo,naousse/odoo,rowemoore/odoo,sysadminmatmoz/OCB,ecosoft-odoo/odoo,SerpentCS/odoo,JCA-Developpement/Odoo,Drooids/odoo,tinkerthaler/odoo,dezynetechnologies/odoo,jusdng/odoo,podemos-info/odoo,jeasoft/odoo,BT-rmartin/odoo,abstract-open-solutions/OCB,jpshort/odoo,rubencabrera/odoo,jaxkodex/odoo,joariasl/odoo,BT-fgarbely/odoo,slevenhagen/odoo,CubicERP/odoo,rschnapka/odoo,CopeX/odoo,ihsanudin/odoo,TRESCLOUD/odoopub,JGarcia-Panach/odoo,glovebx/odoo,glovebx/odoo,apocalypsebg/odoo,Bachaco-ve/odoo,vnsofthe/odoo,OpusVL/odoo,pedrobaeza/odoo,gdgellatly/OCB1,ThinkOpen-Solutions/odoo,x111ong/odoo,sergio-incaser/odoo,nitinitprof/odoo,sve-odoo/odoo,fjbatresv/odoo,blaggacao/OpenUpgrade,sinbazhou/odoo,jpshort/odoo,leoliujie/odoo,gvb/odoo,credativUK/OCB,patmcb/odoo,rubencabrera/odoo,OpenUpgrade-dev/OpenUpgrade,markeTIC/OCB,oasiswork/odoo,mkieszek/odoo,mustafat/odoo-1,brijeshkesariya/odoo,syci/OCB,elmerdpadilla/iv,Eric-Zhong/odoo,bplancher/odoo,florian-dacosta/OpenUpgrade,cloud9UG/odoo,datenbetrieb/odoo,apanju/odoo,dsfsdgsbngfggb/odoo,ClearCorp-dev/odoo,nexiles/odoo,abstract-open-solutions/OCB,dfang/odoo,frouty/odoogoeen,NeovaHealth/odoo,demon-ru/iml-crm,christophlsa/odoo,CatsAndDogsbvba/odoo,draugiskisprendimai/odoo,fdvarela/odoo8,guewen/OpenUpgrade,Kilhog/odoo,mvaled/OpenUpgrade,grap/OCB,oasiswork/odoo,KontorConsulting/odoo,spadae22/odoo,feroda/odoo,Noviat/odoo,KontorConsulting/odoo,shaufi10/odoo,ihsanudin/odoo,lightcn/odoo,thanhacun/odoo,kybriainfotech/iSocioCRM,ovnicraft/odoo,ramadhane/odoo,makinacorpus/odoo,makinacorpus/odoo,tvibliani/odoo,guewen/OpenUpgrade,TRESCLOUD/odoopub,laslabs/odoo,apanju/odoo,waytai/odoo,Nowheresly/odoo,JonathanStein/odoo,MarcosCommunity/odoo,luiseduardohdbackup/odoo,dalegregory/odoo,blaggacao/OpenUpgrade,lightcn/odoo,alhashash/odoo,ojengwa/odoo,Maspear/odoo,havt/odoo,omprakasha/odoo,SAM-IT-SA/odoo,VielSoft/odoo,leorochael/odoo,rgeleta/odoo,ramadhane/odoo,mustafat/odoo-1,omprakasha/odoo,nitinitprof/odoo,florentx/OpenUpgrade,patmcb/odoo,sebalix/OpenUpgrade,ShineFan/odoo,kybriainfotech/iSocioCRM,bplancher/odoo,prospwro/odoo,alexcuellar/odoo,jeasoft/odoo,kirca/OpenUpgrade,shivam1111/odoo,ovnicraft/odoo,AuyaJackie/odoo,gorjuce/odoo,ovnicraft/odoo,funkring/fdoo,grap/OCB,Endika/odoo,ChanduERP/odoo,dsfsdgsbngfggb/odoo,CopeX/odoo,NeovaHealth/odoo,oliverhr/odoo,guerrerocarlos/odoo,goliveirab/odoo,kybriainfotech/iSocioCRM,lightcn/odoo,matrixise/odoo,frouty/odoogoeen,ygol/odoo,Grirrane/odoo,hmen89/odoo,lsinfo/odoo,podemos-info/odoo,storm-computers/odoo,Nick-OpusVL/odoo,slevenhagen/odoo-npg,ingadhoc/odoo,jaxkodex/odoo,jolevq/odoopub,Gitlab11/odoo,fgesora/odoo,eino-makitalo/odoo,tvibliani/odoo,ihsanudin/odoo,addition-it-solutions/project-all,slevenhagen/odoo-npg,idncom/odoo,leoliujie/odoo,lsinfo/odoo,nexiles/odoo,realsaiko/odoo,shivam1111/odoo,oliverhr/odoo,guerrerocarlos/odoo,JCA-Developpement/Odoo,doomsterinc/odoo,apanju/GMIO_Odoo,AuyaJackie/odoo,hbrunn/OpenUpgrade,credativUK/OCB,joariasl/odoo,bguillot/OpenUpgrade,gdgellatly/OCB1,rdeheele/odoo,Ichag/odoo,idncom/odoo,cedk/odoo,tinkerthaler/odoo,nagyistoce/odoo-dev-odoo,fdvarela/odoo8,tangyiyong/odoo,kirca/OpenUpgrade,ingadhoc/odoo,goliveirab/odoo,Eric-Zhong/odoo,avoinsystems/odoo,abstract-open-solutions/OCB,lgscofield/odoo,steedos/odoo,jusdng/odoo,fdvarela/odoo8,hopeall/odoo,ccomb/OpenUpgrade,numerigraphe/odoo,hanicker/odoo,credativUK/OCB,mkieszek/odoo,jeasoft/odoo,leoliujie/odoo,gvb/odoo,nhomar/odoo-mirror,shingonoide/odoo,Drooids/odoo,tvibliani/odoo,acshan/odoo,VitalPet/odoo,highco-groupe/odoo,gdgellatly/OCB1,JGarcia-Panach/odoo,MarcosCommunity/odoo,lgscofield/odoo,tinkerthaler/odoo,CopeX/odoo,gdgellatly/OCB1,mustafat/odoo-1,guewen/OpenUpgrade,poljeff/odoo,feroda/odoo,frouty/odoo_oph,dllsf/odootest,rgeleta/odoo,pedrobaeza/OpenUpgrade,KontorConsulting/odoo,slevenhagen/odoo,juanalfonsopr/odoo,deKupini/erp,ehirt/odoo,chiragjogi/odoo,JGarcia-Panach/odoo,gvb/odoo,lsinfo/odoo,charbeljc/OCB,minhtuancn/odoo,jusdng/odoo,tinkhaven-organization/odoo,markeTIC/OCB,NL66278/OCB,steedos/odoo,0k/OpenUpgrade,fuselock/odoo,vnsofthe/odoo,NeovaHealth/odoo,idncom/odoo,bobisme/odoo,frouty/odoo_oph,sysadminmatmoz/OCB,fuhongliang/odoo,savoirfairelinux/odoo,Kilhog/odoo
|
{
"name" : "Customer & Supplier Relationship Management",
"version" : "1.0",
"author" : "Tiny",
"website" : "http://tinyerp.com/module_crm.html",
"category" : "Generic Modules/CRM & SRM",
"description": """The Tiny ERP case and request tracker enables a group of
people to intelligently and efficiently manage tasks, issues, and requests.
It manages key tasks such as communication, identification, prioritization,
assignment, resolution and notification.
Tiny ERP ensures that all cases are successfly tracked by users, customers and
suppliers. It can automatically send reminders, escalate the request, trigger
specific methods and lots of others actions based on your enterprise own rules.
The greatest thing about this system is that users don't need to do anything
special. They can just send email to the request tracker. Tiny ERP will take
care of thanking them for their message, automatically routing it to the
appropriate staff, and making sure all future correspondence gets to the right
place.
The CRM module has a email gateway for the synchronisation interface
between mails and Tiny ERP.""",
"depends" : ["base", "account"],
"init_xml" : ["crm_data.xml"],
"demo_xml" : ["crm_demo.xml"],
"update_xml" : ["crm_view.xml", "crm_report.xml", "crm_wizard.xml"],
"active": False,
"installable": True
}
Add crm_security.xml file entry in update_xml section
bzr revid: mga@tinyerp.com-80638551c5a66adf0a49181f6ff6ae283ced3709
|
{
"name" : "Customer & Supplier Relationship Management",
"version" : "1.0",
"author" : "Tiny",
"website" : "http://tinyerp.com/module_crm.html",
"category" : "Generic Modules/CRM & SRM",
"description": """The Tiny ERP case and request tracker enables a group of
people to intelligently and efficiently manage tasks, issues, and requests.
It manages key tasks such as communication, identification, prioritization,
assignment, resolution and notification.
Tiny ERP ensures that all cases are successfly tracked by users, customers and
suppliers. It can automatically send reminders, escalate the request, trigger
specific methods and lots of others actions based on your enterprise own rules.
The greatest thing about this system is that users don't need to do anything
special. They can just send email to the request tracker. Tiny ERP will take
care of thanking them for their message, automatically routing it to the
appropriate staff, and making sure all future correspondence gets to the right
place.
The CRM module has a email gateway for the synchronisation interface
between mails and Tiny ERP.""",
"depends" : ["base", "account"],
"init_xml" : ["crm_data.xml"],
"demo_xml" : ["crm_demo.xml"],
"update_xml" : ["crm_view.xml", "crm_report.xml", "crm_wizard.xml","crm_security.xml"],
"active": False,
"installable": True
}
|
<commit_before>{
"name" : "Customer & Supplier Relationship Management",
"version" : "1.0",
"author" : "Tiny",
"website" : "http://tinyerp.com/module_crm.html",
"category" : "Generic Modules/CRM & SRM",
"description": """The Tiny ERP case and request tracker enables a group of
people to intelligently and efficiently manage tasks, issues, and requests.
It manages key tasks such as communication, identification, prioritization,
assignment, resolution and notification.
Tiny ERP ensures that all cases are successfly tracked by users, customers and
suppliers. It can automatically send reminders, escalate the request, trigger
specific methods and lots of others actions based on your enterprise own rules.
The greatest thing about this system is that users don't need to do anything
special. They can just send email to the request tracker. Tiny ERP will take
care of thanking them for their message, automatically routing it to the
appropriate staff, and making sure all future correspondence gets to the right
place.
The CRM module has a email gateway for the synchronisation interface
between mails and Tiny ERP.""",
"depends" : ["base", "account"],
"init_xml" : ["crm_data.xml"],
"demo_xml" : ["crm_demo.xml"],
"update_xml" : ["crm_view.xml", "crm_report.xml", "crm_wizard.xml"],
"active": False,
"installable": True
}
<commit_msg>Add crm_security.xml file entry in update_xml section
bzr revid: mga@tinyerp.com-80638551c5a66adf0a49181f6ff6ae283ced3709<commit_after>
|
{
"name" : "Customer & Supplier Relationship Management",
"version" : "1.0",
"author" : "Tiny",
"website" : "http://tinyerp.com/module_crm.html",
"category" : "Generic Modules/CRM & SRM",
"description": """The Tiny ERP case and request tracker enables a group of
people to intelligently and efficiently manage tasks, issues, and requests.
It manages key tasks such as communication, identification, prioritization,
assignment, resolution and notification.
Tiny ERP ensures that all cases are successfly tracked by users, customers and
suppliers. It can automatically send reminders, escalate the request, trigger
specific methods and lots of others actions based on your enterprise own rules.
The greatest thing about this system is that users don't need to do anything
special. They can just send email to the request tracker. Tiny ERP will take
care of thanking them for their message, automatically routing it to the
appropriate staff, and making sure all future correspondence gets to the right
place.
The CRM module has a email gateway for the synchronisation interface
between mails and Tiny ERP.""",
"depends" : ["base", "account"],
"init_xml" : ["crm_data.xml"],
"demo_xml" : ["crm_demo.xml"],
"update_xml" : ["crm_view.xml", "crm_report.xml", "crm_wizard.xml","crm_security.xml"],
"active": False,
"installable": True
}
|
{
"name" : "Customer & Supplier Relationship Management",
"version" : "1.0",
"author" : "Tiny",
"website" : "http://tinyerp.com/module_crm.html",
"category" : "Generic Modules/CRM & SRM",
"description": """The Tiny ERP case and request tracker enables a group of
people to intelligently and efficiently manage tasks, issues, and requests.
It manages key tasks such as communication, identification, prioritization,
assignment, resolution and notification.
Tiny ERP ensures that all cases are successfly tracked by users, customers and
suppliers. It can automatically send reminders, escalate the request, trigger
specific methods and lots of others actions based on your enterprise own rules.
The greatest thing about this system is that users don't need to do anything
special. They can just send email to the request tracker. Tiny ERP will take
care of thanking them for their message, automatically routing it to the
appropriate staff, and making sure all future correspondence gets to the right
place.
The CRM module has a email gateway for the synchronisation interface
between mails and Tiny ERP.""",
"depends" : ["base", "account"],
"init_xml" : ["crm_data.xml"],
"demo_xml" : ["crm_demo.xml"],
"update_xml" : ["crm_view.xml", "crm_report.xml", "crm_wizard.xml"],
"active": False,
"installable": True
}
Add crm_security.xml file entry in update_xml section
bzr revid: mga@tinyerp.com-80638551c5a66adf0a49181f6ff6ae283ced3709{
"name" : "Customer & Supplier Relationship Management",
"version" : "1.0",
"author" : "Tiny",
"website" : "http://tinyerp.com/module_crm.html",
"category" : "Generic Modules/CRM & SRM",
"description": """The Tiny ERP case and request tracker enables a group of
people to intelligently and efficiently manage tasks, issues, and requests.
It manages key tasks such as communication, identification, prioritization,
assignment, resolution and notification.
Tiny ERP ensures that all cases are successfly tracked by users, customers and
suppliers. It can automatically send reminders, escalate the request, trigger
specific methods and lots of others actions based on your enterprise own rules.
The greatest thing about this system is that users don't need to do anything
special. They can just send email to the request tracker. Tiny ERP will take
care of thanking them for their message, automatically routing it to the
appropriate staff, and making sure all future correspondence gets to the right
place.
The CRM module has a email gateway for the synchronisation interface
between mails and Tiny ERP.""",
"depends" : ["base", "account"],
"init_xml" : ["crm_data.xml"],
"demo_xml" : ["crm_demo.xml"],
"update_xml" : ["crm_view.xml", "crm_report.xml", "crm_wizard.xml","crm_security.xml"],
"active": False,
"installable": True
}
|
<commit_before>{
"name" : "Customer & Supplier Relationship Management",
"version" : "1.0",
"author" : "Tiny",
"website" : "http://tinyerp.com/module_crm.html",
"category" : "Generic Modules/CRM & SRM",
"description": """The Tiny ERP case and request tracker enables a group of
people to intelligently and efficiently manage tasks, issues, and requests.
It manages key tasks such as communication, identification, prioritization,
assignment, resolution and notification.
Tiny ERP ensures that all cases are successfly tracked by users, customers and
suppliers. It can automatically send reminders, escalate the request, trigger
specific methods and lots of others actions based on your enterprise own rules.
The greatest thing about this system is that users don't need to do anything
special. They can just send email to the request tracker. Tiny ERP will take
care of thanking them for their message, automatically routing it to the
appropriate staff, and making sure all future correspondence gets to the right
place.
The CRM module has a email gateway for the synchronisation interface
between mails and Tiny ERP.""",
"depends" : ["base", "account"],
"init_xml" : ["crm_data.xml"],
"demo_xml" : ["crm_demo.xml"],
"update_xml" : ["crm_view.xml", "crm_report.xml", "crm_wizard.xml"],
"active": False,
"installable": True
}
<commit_msg>Add crm_security.xml file entry in update_xml section
bzr revid: mga@tinyerp.com-80638551c5a66adf0a49181f6ff6ae283ced3709<commit_after>{
"name" : "Customer & Supplier Relationship Management",
"version" : "1.0",
"author" : "Tiny",
"website" : "http://tinyerp.com/module_crm.html",
"category" : "Generic Modules/CRM & SRM",
"description": """The Tiny ERP case and request tracker enables a group of
people to intelligently and efficiently manage tasks, issues, and requests.
It manages key tasks such as communication, identification, prioritization,
assignment, resolution and notification.
Tiny ERP ensures that all cases are successfly tracked by users, customers and
suppliers. It can automatically send reminders, escalate the request, trigger
specific methods and lots of others actions based on your enterprise own rules.
The greatest thing about this system is that users don't need to do anything
special. They can just send email to the request tracker. Tiny ERP will take
care of thanking them for their message, automatically routing it to the
appropriate staff, and making sure all future correspondence gets to the right
place.
The CRM module has a email gateway for the synchronisation interface
between mails and Tiny ERP.""",
"depends" : ["base", "account"],
"init_xml" : ["crm_data.xml"],
"demo_xml" : ["crm_demo.xml"],
"update_xml" : ["crm_view.xml", "crm_report.xml", "crm_wizard.xml","crm_security.xml"],
"active": False,
"installable": True
}
|
1636e4755d9e0a58b4626750d82102152641068c
|
projects/DensePose/tests/test_structures.py
|
projects/DensePose/tests/test_structures.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from densepose.structures import normalized_coords_transform
class TestStructures(unittest.TestCase):
def test_normalized_coords_transform(self):
bbox = (32, 24, 288, 216)
x0, y0, w, h = bbox
xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h
f = normalized_coords_transform(*bbox)
# Top-left
expected_p, actual_p = (-1, -1), f((xmin, ymin))
self.assertEqual(expected_p, actual_p)
# Top-right
expected_p, actual_p = (1, -1), f((xmax, ymin))
self.assertEqual(expected_p, actual_p)
# Bottom-left
expected_p, actual_p = (-1, 1), f((xmin, ymax))
self.assertEqual(expected_p, actual_p)
# Bottom-right
expected_p, actual_p = (1, 1), f((xmax, ymax))
self.assertEqual(expected_p, actual_p)
|
Add target for unit tests + basic test
|
Add target for unit tests + basic test
Summary: Add target for unit tests + basic test
Reviewed By: vkhalidov
Differential Revision: D20429230
fbshipit-source-id: 36bee00d2dd57a350e2a6f3dd58a87f0c9c8227e
|
Python
|
apache-2.0
|
facebookresearch/detectron2,facebookresearch/detectron2,facebookresearch/detectron2
|
Add target for unit tests + basic test
Summary: Add target for unit tests + basic test
Reviewed By: vkhalidov
Differential Revision: D20429230
fbshipit-source-id: 36bee00d2dd57a350e2a6f3dd58a87f0c9c8227e
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from densepose.structures import normalized_coords_transform
class TestStructures(unittest.TestCase):
def test_normalized_coords_transform(self):
bbox = (32, 24, 288, 216)
x0, y0, w, h = bbox
xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h
f = normalized_coords_transform(*bbox)
# Top-left
expected_p, actual_p = (-1, -1), f((xmin, ymin))
self.assertEqual(expected_p, actual_p)
# Top-right
expected_p, actual_p = (1, -1), f((xmax, ymin))
self.assertEqual(expected_p, actual_p)
# Bottom-left
expected_p, actual_p = (-1, 1), f((xmin, ymax))
self.assertEqual(expected_p, actual_p)
# Bottom-right
expected_p, actual_p = (1, 1), f((xmax, ymax))
self.assertEqual(expected_p, actual_p)
|
<commit_before><commit_msg>Add target for unit tests + basic test
Summary: Add target for unit tests + basic test
Reviewed By: vkhalidov
Differential Revision: D20429230
fbshipit-source-id: 36bee00d2dd57a350e2a6f3dd58a87f0c9c8227e<commit_after>
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from densepose.structures import normalized_coords_transform
class TestStructures(unittest.TestCase):
def test_normalized_coords_transform(self):
bbox = (32, 24, 288, 216)
x0, y0, w, h = bbox
xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h
f = normalized_coords_transform(*bbox)
# Top-left
expected_p, actual_p = (-1, -1), f((xmin, ymin))
self.assertEqual(expected_p, actual_p)
# Top-right
expected_p, actual_p = (1, -1), f((xmax, ymin))
self.assertEqual(expected_p, actual_p)
# Bottom-left
expected_p, actual_p = (-1, 1), f((xmin, ymax))
self.assertEqual(expected_p, actual_p)
# Bottom-right
expected_p, actual_p = (1, 1), f((xmax, ymax))
self.assertEqual(expected_p, actual_p)
|
Add target for unit tests + basic test
Summary: Add target for unit tests + basic test
Reviewed By: vkhalidov
Differential Revision: D20429230
fbshipit-source-id: 36bee00d2dd57a350e2a6f3dd58a87f0c9c8227e# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from densepose.structures import normalized_coords_transform
class TestStructures(unittest.TestCase):
def test_normalized_coords_transform(self):
bbox = (32, 24, 288, 216)
x0, y0, w, h = bbox
xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h
f = normalized_coords_transform(*bbox)
# Top-left
expected_p, actual_p = (-1, -1), f((xmin, ymin))
self.assertEqual(expected_p, actual_p)
# Top-right
expected_p, actual_p = (1, -1), f((xmax, ymin))
self.assertEqual(expected_p, actual_p)
# Bottom-left
expected_p, actual_p = (-1, 1), f((xmin, ymax))
self.assertEqual(expected_p, actual_p)
# Bottom-right
expected_p, actual_p = (1, 1), f((xmax, ymax))
self.assertEqual(expected_p, actual_p)
|
<commit_before><commit_msg>Add target for unit tests + basic test
Summary: Add target for unit tests + basic test
Reviewed By: vkhalidov
Differential Revision: D20429230
fbshipit-source-id: 36bee00d2dd57a350e2a6f3dd58a87f0c9c8227e<commit_after># Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from densepose.structures import normalized_coords_transform
class TestStructures(unittest.TestCase):
def test_normalized_coords_transform(self):
bbox = (32, 24, 288, 216)
x0, y0, w, h = bbox
xmin, ymin, xmax, ymax = x0, y0, x0 + w, y0 + h
f = normalized_coords_transform(*bbox)
# Top-left
expected_p, actual_p = (-1, -1), f((xmin, ymin))
self.assertEqual(expected_p, actual_p)
# Top-right
expected_p, actual_p = (1, -1), f((xmax, ymin))
self.assertEqual(expected_p, actual_p)
# Bottom-left
expected_p, actual_p = (-1, 1), f((xmin, ymax))
self.assertEqual(expected_p, actual_p)
# Bottom-right
expected_p, actual_p = (1, 1), f((xmax, ymax))
self.assertEqual(expected_p, actual_p)
|
|
b2eb9b8ec3223a359b9c44d5880022d1b48b11f2
|
corehq/motech/repeaters/migrations/0003_migrate_connectionsettings.py
|
corehq/motech/repeaters/migrations/0003_migrate_connectionsettings.py
|
from django.db import migrations
from corehq.motech.repeaters.models import Repeater
def _migrate_to_connectionsettings(apps, schema_editor):
for repeater in iter_repeaters():
if not repeater.connection_settings_id:
repeater.create_connection_settings()
def iter_repeaters():
for result in Repeater.get_db().view('repeaters/repeaters',
reduce=False,
include_docs=True).all():
yield Repeater.wrap(result['doc'])
class Migration(migrations.Migration):
dependencies = [
('repeaters', '0002_sqlrepeatrecord'),
('motech', '0007_auto_20200909_2138'),
]
operations = [
migrations.RunPython(_migrate_to_connectionsettings,
reverse_code=migrations.RunPython.noop,
elidable=True),
]
|
Create ConnectionSettings for all Repeaters
|
Create ConnectionSettings for all Repeaters
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Create ConnectionSettings for all Repeaters
|
from django.db import migrations
from corehq.motech.repeaters.models import Repeater
def _migrate_to_connectionsettings(apps, schema_editor):
for repeater in iter_repeaters():
if not repeater.connection_settings_id:
repeater.create_connection_settings()
def iter_repeaters():
for result in Repeater.get_db().view('repeaters/repeaters',
reduce=False,
include_docs=True).all():
yield Repeater.wrap(result['doc'])
class Migration(migrations.Migration):
dependencies = [
('repeaters', '0002_sqlrepeatrecord'),
('motech', '0007_auto_20200909_2138'),
]
operations = [
migrations.RunPython(_migrate_to_connectionsettings,
reverse_code=migrations.RunPython.noop,
elidable=True),
]
|
<commit_before><commit_msg>Create ConnectionSettings for all Repeaters<commit_after>
|
from django.db import migrations
from corehq.motech.repeaters.models import Repeater
def _migrate_to_connectionsettings(apps, schema_editor):
for repeater in iter_repeaters():
if not repeater.connection_settings_id:
repeater.create_connection_settings()
def iter_repeaters():
for result in Repeater.get_db().view('repeaters/repeaters',
reduce=False,
include_docs=True).all():
yield Repeater.wrap(result['doc'])
class Migration(migrations.Migration):
dependencies = [
('repeaters', '0002_sqlrepeatrecord'),
('motech', '0007_auto_20200909_2138'),
]
operations = [
migrations.RunPython(_migrate_to_connectionsettings,
reverse_code=migrations.RunPython.noop,
elidable=True),
]
|
Create ConnectionSettings for all Repeatersfrom django.db import migrations
from corehq.motech.repeaters.models import Repeater
def _migrate_to_connectionsettings(apps, schema_editor):
for repeater in iter_repeaters():
if not repeater.connection_settings_id:
repeater.create_connection_settings()
def iter_repeaters():
for result in Repeater.get_db().view('repeaters/repeaters',
reduce=False,
include_docs=True).all():
yield Repeater.wrap(result['doc'])
class Migration(migrations.Migration):
dependencies = [
('repeaters', '0002_sqlrepeatrecord'),
('motech', '0007_auto_20200909_2138'),
]
operations = [
migrations.RunPython(_migrate_to_connectionsettings,
reverse_code=migrations.RunPython.noop,
elidable=True),
]
|
<commit_before><commit_msg>Create ConnectionSettings for all Repeaters<commit_after>from django.db import migrations
from corehq.motech.repeaters.models import Repeater
def _migrate_to_connectionsettings(apps, schema_editor):
for repeater in iter_repeaters():
if not repeater.connection_settings_id:
repeater.create_connection_settings()
def iter_repeaters():
for result in Repeater.get_db().view('repeaters/repeaters',
reduce=False,
include_docs=True).all():
yield Repeater.wrap(result['doc'])
class Migration(migrations.Migration):
dependencies = [
('repeaters', '0002_sqlrepeatrecord'),
('motech', '0007_auto_20200909_2138'),
]
operations = [
migrations.RunPython(_migrate_to_connectionsettings,
reverse_code=migrations.RunPython.noop,
elidable=True),
]
|
|
5582556d735da829fe629cc36f9664047997a5bb
|
src/ggrc_basic_permissions/roles/Auditor.py
|
src/ggrc_basic_permissions/roles/Auditor.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "Audit"
description = """
The permissions required by an auditor to access relevant resources for the
program being audited.
"""
permissions = {
"read": [
"Audit",
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Meeting",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"UserRole",
"Context",
],
"create": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse"
],
"delete": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
}
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "Audit"
description = """
The permissions required by an auditor to access relevant resources for the
program being audited.
"""
permissions = {
"read": [
"Audit",
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Meeting",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"UserRole",
"Context",
],
"create": [
"Request",
"ControlAssessment",
"Issue",
"Relationship",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse"
],
"delete": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
}
|
Allow auditor to create relationship in context
|
Allow auditor to create relationship in context
This allows the auditor to map objects to the audit but not to unmap them while
not giving him edit permissions on the audit.
|
Python
|
apache-2.0
|
kr41/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "Audit"
description = """
The permissions required by an auditor to access relevant resources for the
program being audited.
"""
permissions = {
"read": [
"Audit",
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Meeting",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"UserRole",
"Context",
],
"create": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse"
],
"delete": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
}
Allow auditor to create relationship in context
This allows the auditor to map objects to the audit but not to unmap them while
not giving him edit permissions on the audit.
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "Audit"
description = """
The permissions required by an auditor to access relevant resources for the
program being audited.
"""
permissions = {
"read": [
"Audit",
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Meeting",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"UserRole",
"Context",
],
"create": [
"Request",
"ControlAssessment",
"Issue",
"Relationship",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse"
],
"delete": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
}
|
<commit_before># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "Audit"
description = """
The permissions required by an auditor to access relevant resources for the
program being audited.
"""
permissions = {
"read": [
"Audit",
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Meeting",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"UserRole",
"Context",
],
"create": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse"
],
"delete": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
}
<commit_msg>Allow auditor to create relationship in context
This allows the auditor to map objects to the audit but not to unmap them while
not giving him edit permissions on the audit.<commit_after>
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "Audit"
description = """
The permissions required by an auditor to access relevant resources for the
program being audited.
"""
permissions = {
"read": [
"Audit",
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Meeting",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"UserRole",
"Context",
],
"create": [
"Request",
"ControlAssessment",
"Issue",
"Relationship",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse"
],
"delete": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
}
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "Audit"
description = """
The permissions required by an auditor to access relevant resources for the
program being audited.
"""
permissions = {
"read": [
"Audit",
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Meeting",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"UserRole",
"Context",
],
"create": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse"
],
"delete": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
}
Allow auditor to create relationship in context
This allows the auditor to map objects to the audit but not to unmap them while
not giving him edit permissions on the audit.# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "Audit"
description = """
The permissions required by an auditor to access relevant resources for the
program being audited.
"""
permissions = {
"read": [
"Audit",
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Meeting",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"UserRole",
"Context",
],
"create": [
"Request",
"ControlAssessment",
"Issue",
"Relationship",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse"
],
"delete": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
}
|
<commit_before># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "Audit"
description = """
The permissions required by an auditor to access relevant resources for the
program being audited.
"""
permissions = {
"read": [
"Audit",
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Meeting",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"UserRole",
"Context",
],
"create": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse"
],
"delete": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
}
<commit_msg>Allow auditor to create relationship in context
This allows the auditor to map objects to the audit but not to unmap them while
not giving him edit permissions on the audit.<commit_after># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
scope = "Audit"
description = """
The permissions required by an auditor to access relevant resources for the
program being audited.
"""
permissions = {
"read": [
"Audit",
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
"Meeting",
"ObjectDocument",
"ObjectPerson",
"Relationship",
"Document",
"Meeting",
"UserRole",
"Context",
],
"create": [
"Request",
"ControlAssessment",
"Issue",
"Relationship",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
"view_object_page": [
"__GGRC_ALL__"
],
"update": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse"
],
"delete": [
"Request",
"ControlAssessment",
"Issue",
"DocumentationResponse",
"InterviewResponse",
"PopulationSampleResponse",
],
}
|
d3a5a3994a47b0b0f31be3951aead595f15dbbd9
|
blivet/tasks/fstask.py
|
blivet/tasks/fstask.py
|
# fstask.py
# Superclass for filesystem tasks.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
import abc
from six import add_metaclass
from . import task
@add_metaclass(abc.ABCMeta)
class FSTask(task.Task):
""" An abstract class that encapsulates the fact that all FSTasks
have a single master object: the filesystem that they belong to.
"""
description = "parent of all filesystem tasks"
def __init__(self, an_fs):
""" Initializer.
:param FS an_fs: a filesystem object
"""
self.fs = an_fs
class UnimplementedFSTask(FSTask, task.UnimplementedTask):
""" A convenience class for unimplemented filesystem tasks.
Useful in the usual case where an Unimplemented task has
no special methods that it is required to implement.
"""
pass
|
Add an abstract parent of all filesystem tasks.
|
Add an abstract parent of all filesystem tasks.
It defines the initializer, which should not vary among filesystem tasks.
Include a convenience class for simple unimplemented filesystem tasks.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
|
Python
|
lgpl-2.1
|
rvykydal/blivet,vojtechtrefny/blivet,AdamWill/blivet,rvykydal/blivet,rhinstaller/blivet,vpodzime/blivet,vojtechtrefny/blivet,jkonecny12/blivet,rhinstaller/blivet,jkonecny12/blivet,vpodzime/blivet,AdamWill/blivet
|
Add an abstract parent of all filesystem tasks.
It defines the initializer, which should not vary among filesystem tasks.
Include a convenience class for simple unimplemented filesystem tasks.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
|
# fstask.py
# Superclass for filesystem tasks.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
import abc
from six import add_metaclass
from . import task
@add_metaclass(abc.ABCMeta)
class FSTask(task.Task):
""" An abstract class that encapsulates the fact that all FSTasks
have a single master object: the filesystem that they belong to.
"""
description = "parent of all filesystem tasks"
def __init__(self, an_fs):
""" Initializer.
:param FS an_fs: a filesystem object
"""
self.fs = an_fs
class UnimplementedFSTask(FSTask, task.UnimplementedTask):
""" A convenience class for unimplemented filesystem tasks.
Useful in the usual case where an Unimplemented task has
no special methods that it is required to implement.
"""
pass
|
<commit_before><commit_msg>Add an abstract parent of all filesystem tasks.
It defines the initializer, which should not vary among filesystem tasks.
Include a convenience class for simple unimplemented filesystem tasks.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after>
|
# fstask.py
# Superclass for filesystem tasks.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
import abc
from six import add_metaclass
from . import task
@add_metaclass(abc.ABCMeta)
class FSTask(task.Task):
""" An abstract class that encapsulates the fact that all FSTasks
have a single master object: the filesystem that they belong to.
"""
description = "parent of all filesystem tasks"
def __init__(self, an_fs):
""" Initializer.
:param FS an_fs: a filesystem object
"""
self.fs = an_fs
class UnimplementedFSTask(FSTask, task.UnimplementedTask):
""" A convenience class for unimplemented filesystem tasks.
Useful in the usual case where an Unimplemented task has
no special methods that it is required to implement.
"""
pass
|
Add an abstract parent of all filesystem tasks.
It defines the initializer, which should not vary among filesystem tasks.
Include a convenience class for simple unimplemented filesystem tasks.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com># fstask.py
# Superclass for filesystem tasks.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
import abc
from six import add_metaclass
from . import task
@add_metaclass(abc.ABCMeta)
class FSTask(task.Task):
""" An abstract class that encapsulates the fact that all FSTasks
have a single master object: the filesystem that they belong to.
"""
description = "parent of all filesystem tasks"
def __init__(self, an_fs):
""" Initializer.
:param FS an_fs: a filesystem object
"""
self.fs = an_fs
class UnimplementedFSTask(FSTask, task.UnimplementedTask):
""" A convenience class for unimplemented filesystem tasks.
Useful in the usual case where an Unimplemented task has
no special methods that it is required to implement.
"""
pass
|
<commit_before><commit_msg>Add an abstract parent of all filesystem tasks.
It defines the initializer, which should not vary among filesystem tasks.
Include a convenience class for simple unimplemented filesystem tasks.
Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after># fstask.py
# Superclass for filesystem tasks.
#
# Copyright (C) 2015 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Anne Mulhern <amulhern@redhat.com>
import abc
from six import add_metaclass
from . import task
@add_metaclass(abc.ABCMeta)
class FSTask(task.Task):
""" An abstract class that encapsulates the fact that all FSTasks
have a single master object: the filesystem that they belong to.
"""
description = "parent of all filesystem tasks"
def __init__(self, an_fs):
""" Initializer.
:param FS an_fs: a filesystem object
"""
self.fs = an_fs
class UnimplementedFSTask(FSTask, task.UnimplementedTask):
""" A convenience class for unimplemented filesystem tasks.
Useful in the usual case where an Unimplemented task has
no special methods that it is required to implement.
"""
pass
|
|
ce96d59cf4b63101c061796ecf3793cc107a74c1
|
migrations/versions/0105_opg_letter_org.py
|
migrations/versions/0105_opg_letter_org.py
|
"""empty message
Revision ID: 0105_opg_letter_org
Revises: 0104_more_letter_orgs
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0105_opg_letter_org'
down_revision = '0104_more_letter_orgs'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from flask import current_app
def upgrade():
op.execute("""
INSERT INTO dvla_organisation VALUES
('002', 'Office of the Public Guardian')
""")
def downgrade():
# data migration, no downloads
pass
|
Add OPG to letter organisations
|
Add OPG to letter organisations
Logo was added here:
https://github.com/alphagov/notifications-template-preview/pull/19
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add OPG to letter organisations
Logo was added here:
https://github.com/alphagov/notifications-template-preview/pull/19
|
"""empty message
Revision ID: 0105_opg_letter_org
Revises: 0104_more_letter_orgs
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0105_opg_letter_org'
down_revision = '0104_more_letter_orgs'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from flask import current_app
def upgrade():
op.execute("""
INSERT INTO dvla_organisation VALUES
('002', 'Office of the Public Guardian')
""")
def downgrade():
# data migration, no downloads
pass
|
<commit_before><commit_msg>Add OPG to letter organisations
Logo was added here:
https://github.com/alphagov/notifications-template-preview/pull/19<commit_after>
|
"""empty message
Revision ID: 0105_opg_letter_org
Revises: 0104_more_letter_orgs
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0105_opg_letter_org'
down_revision = '0104_more_letter_orgs'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from flask import current_app
def upgrade():
op.execute("""
INSERT INTO dvla_organisation VALUES
('002', 'Office of the Public Guardian')
""")
def downgrade():
# data migration, no downloads
pass
|
Add OPG to letter organisations
Logo was added here:
https://github.com/alphagov/notifications-template-preview/pull/19"""empty message
Revision ID: 0105_opg_letter_org
Revises: 0104_more_letter_orgs
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0105_opg_letter_org'
down_revision = '0104_more_letter_orgs'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from flask import current_app
def upgrade():
op.execute("""
INSERT INTO dvla_organisation VALUES
('002', 'Office of the Public Guardian')
""")
def downgrade():
# data migration, no downloads
pass
|
<commit_before><commit_msg>Add OPG to letter organisations
Logo was added here:
https://github.com/alphagov/notifications-template-preview/pull/19<commit_after>"""empty message
Revision ID: 0105_opg_letter_org
Revises: 0104_more_letter_orgs
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0105_opg_letter_org'
down_revision = '0104_more_letter_orgs'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from flask import current_app
def upgrade():
op.execute("""
INSERT INTO dvla_organisation VALUES
('002', 'Office of the Public Guardian')
""")
def downgrade():
# data migration, no downloads
pass
|
|
d1d130ea5d79fa65f9c2f8a2c50ac8b490580d17
|
tests/test_mailparsers_security_announce.py
|
tests/test_mailparsers_security_announce.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import SecurityAnnounceParser as p
class TestMailParserSecurityAnnounce(unittest.TestCase):
def setUp(self):
self.headers = {
'List-Id': '<debian-security-announce.lists.debian.org>',
'Date': 'Sat, 19 Apr 2008 19:18:38 +0100',
'Subject': '[SECURITY] [DSA 1234-5] New pinafore packages ' \
'fix inertial dampener problem',
}
def testSimple(self):
msg = p.parse(self.headers, [])
self.assertEqual(msg.dsa_number, 1234)
self.assertEqual(msg.dsa_revision, 5)
self.assertEqual(msg.package, 'pinafore')
self.assertEqual(msg.problem, 'inertial dampener problem')
self.assertEqual(msg.year, 2008)
def testNoDate(self):
del self.headers['Date']
self.failIf(p.parse(self.headers, []))
def testNoSubject(self):
del self.headers['Subject']
self.failIf(p.parse(self.headers, []))
def testNoListId(self):
del self.headers['List-Id']
self.failIf(p.parse(self.headers, []))
def testWrongListId(self):
self.headers['List-Id'] = '<debian-ponies-announce.lists.debian.org>'
self.failIf(p.parse(self.headers, []))
if __name__ == "__main__":
unittest.main()
|
Add some tests of SecurityAnnounceParser
|
Add some tests of SecurityAnnounceParser
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk>
|
Python
|
agpl-3.0
|
xtaran/debian-devel-changes-bot,lamby/debian-devel-changes-bot,sebastinas/debian-devel-changes-bot,xtaran/debian-devel-changes-bot,lamby/debian-devel-changes-bot,lamby/debian-devel-changes-bot
|
Add some tests of SecurityAnnounceParser
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import SecurityAnnounceParser as p
class TestMailParserSecurityAnnounce(unittest.TestCase):
def setUp(self):
self.headers = {
'List-Id': '<debian-security-announce.lists.debian.org>',
'Date': 'Sat, 19 Apr 2008 19:18:38 +0100',
'Subject': '[SECURITY] [DSA 1234-5] New pinafore packages ' \
'fix inertial dampener problem',
}
def testSimple(self):
msg = p.parse(self.headers, [])
self.assertEqual(msg.dsa_number, 1234)
self.assertEqual(msg.dsa_revision, 5)
self.assertEqual(msg.package, 'pinafore')
self.assertEqual(msg.problem, 'inertial dampener problem')
self.assertEqual(msg.year, 2008)
def testNoDate(self):
del self.headers['Date']
self.failIf(p.parse(self.headers, []))
def testNoSubject(self):
del self.headers['Subject']
self.failIf(p.parse(self.headers, []))
def testNoListId(self):
del self.headers['List-Id']
self.failIf(p.parse(self.headers, []))
def testWrongListId(self):
self.headers['List-Id'] = '<debian-ponies-announce.lists.debian.org>'
self.failIf(p.parse(self.headers, []))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add some tests of SecurityAnnounceParser
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk><commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import SecurityAnnounceParser as p
class TestMailParserSecurityAnnounce(unittest.TestCase):
def setUp(self):
self.headers = {
'List-Id': '<debian-security-announce.lists.debian.org>',
'Date': 'Sat, 19 Apr 2008 19:18:38 +0100',
'Subject': '[SECURITY] [DSA 1234-5] New pinafore packages ' \
'fix inertial dampener problem',
}
def testSimple(self):
msg = p.parse(self.headers, [])
self.assertEqual(msg.dsa_number, 1234)
self.assertEqual(msg.dsa_revision, 5)
self.assertEqual(msg.package, 'pinafore')
self.assertEqual(msg.problem, 'inertial dampener problem')
self.assertEqual(msg.year, 2008)
def testNoDate(self):
del self.headers['Date']
self.failIf(p.parse(self.headers, []))
def testNoSubject(self):
del self.headers['Subject']
self.failIf(p.parse(self.headers, []))
def testNoListId(self):
del self.headers['List-Id']
self.failIf(p.parse(self.headers, []))
def testWrongListId(self):
self.headers['List-Id'] = '<debian-ponies-announce.lists.debian.org>'
self.failIf(p.parse(self.headers, []))
if __name__ == "__main__":
unittest.main()
|
Add some tests of SecurityAnnounceParser
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import SecurityAnnounceParser as p
class TestMailParserSecurityAnnounce(unittest.TestCase):
def setUp(self):
self.headers = {
'List-Id': '<debian-security-announce.lists.debian.org>',
'Date': 'Sat, 19 Apr 2008 19:18:38 +0100',
'Subject': '[SECURITY] [DSA 1234-5] New pinafore packages ' \
'fix inertial dampener problem',
}
def testSimple(self):
msg = p.parse(self.headers, [])
self.assertEqual(msg.dsa_number, 1234)
self.assertEqual(msg.dsa_revision, 5)
self.assertEqual(msg.package, 'pinafore')
self.assertEqual(msg.problem, 'inertial dampener problem')
self.assertEqual(msg.year, 2008)
def testNoDate(self):
del self.headers['Date']
self.failIf(p.parse(self.headers, []))
def testNoSubject(self):
del self.headers['Subject']
self.failIf(p.parse(self.headers, []))
def testNoListId(self):
del self.headers['List-Id']
self.failIf(p.parse(self.headers, []))
def testWrongListId(self):
self.headers['List-Id'] = '<debian-ponies-announce.lists.debian.org>'
self.failIf(p.parse(self.headers, []))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add some tests of SecurityAnnounceParser
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk><commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import SecurityAnnounceParser as p
class TestMailParserSecurityAnnounce(unittest.TestCase):
def setUp(self):
self.headers = {
'List-Id': '<debian-security-announce.lists.debian.org>',
'Date': 'Sat, 19 Apr 2008 19:18:38 +0100',
'Subject': '[SECURITY] [DSA 1234-5] New pinafore packages ' \
'fix inertial dampener problem',
}
def testSimple(self):
msg = p.parse(self.headers, [])
self.assertEqual(msg.dsa_number, 1234)
self.assertEqual(msg.dsa_revision, 5)
self.assertEqual(msg.package, 'pinafore')
self.assertEqual(msg.problem, 'inertial dampener problem')
self.assertEqual(msg.year, 2008)
def testNoDate(self):
del self.headers['Date']
self.failIf(p.parse(self.headers, []))
def testNoSubject(self):
del self.headers['Subject']
self.failIf(p.parse(self.headers, []))
def testNoListId(self):
del self.headers['List-Id']
self.failIf(p.parse(self.headers, []))
def testWrongListId(self):
self.headers['List-Id'] = '<debian-ponies-announce.lists.debian.org>'
self.failIf(p.parse(self.headers, []))
if __name__ == "__main__":
unittest.main()
|
|
612ae3adb2636fb3a926cd29d87b4b388ca48476
|
scripts/delete_old_user_login_events.py
|
scripts/delete_old_user_login_events.py
|
#!/usr/bin/env python
"""Delete login user events older than a given number of days.
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime, timedelta
import click
from byceps.database import db
from byceps.services.user.models.event import UserEvent as DbUserEvent
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
@click.command()
@click.option(
'--dry-run', is_flag=True, help='count but do not delete affected records',
)
@click.argument('minimum_age_in_days', type=int)
def execute(dry_run, minimum_age_in_days):
latest_occurred_at = get_latest_occurred_at(minimum_age_in_days)
click.secho(
f'Deleting all user login events older than {minimum_age_in_days} days '
f'(i.e. before {latest_occurred_at:%Y-%m-%d %H:%M:%S}) ...'
)
num_deleted = delete_user_login_events_before(latest_occurred_at, dry_run)
click.secho(f'{num_deleted} user login events deleted.')
if dry_run:
click.secho(
f'This was a dry run; no records have been deleted.', fg='yellow'
)
def get_latest_occurred_at(minimum_age_in_days: int) -> datetime:
now = datetime.utcnow()
return now - timedelta(days=minimum_age_in_days)
def delete_user_login_events_before(
latest_occurred_at: datetime, dry_run: bool
) -> int:
num_deleted = DbUserEvent.query \
.filter_by(event_type='user-logged-in') \
.filter(DbUserEvent.occurred_at <= latest_occurred_at) \
.delete()
if not dry_run:
db.session.commit()
return num_deleted
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to delete user login events older than a number of days
|
Add script to delete user login events older than a number of days
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps
|
Add script to delete user login events older than a number of days
|
#!/usr/bin/env python
"""Delete login user events older than a given number of days.
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime, timedelta
import click
from byceps.database import db
from byceps.services.user.models.event import UserEvent as DbUserEvent
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
@click.command()
@click.option(
'--dry-run', is_flag=True, help='count but do not delete affected records',
)
@click.argument('minimum_age_in_days', type=int)
def execute(dry_run, minimum_age_in_days):
latest_occurred_at = get_latest_occurred_at(minimum_age_in_days)
click.secho(
f'Deleting all user login events older than {minimum_age_in_days} days '
f'(i.e. before {latest_occurred_at:%Y-%m-%d %H:%M:%S}) ...'
)
num_deleted = delete_user_login_events_before(latest_occurred_at, dry_run)
click.secho(f'{num_deleted} user login events deleted.')
if dry_run:
click.secho(
f'This was a dry run; no records have been deleted.', fg='yellow'
)
def get_latest_occurred_at(minimum_age_in_days: int) -> datetime:
now = datetime.utcnow()
return now - timedelta(days=minimum_age_in_days)
def delete_user_login_events_before(
latest_occurred_at: datetime, dry_run: bool
) -> int:
num_deleted = DbUserEvent.query \
.filter_by(event_type='user-logged-in') \
.filter(DbUserEvent.occurred_at <= latest_occurred_at) \
.delete()
if not dry_run:
db.session.commit()
return num_deleted
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to delete user login events older than a number of days<commit_after>
|
#!/usr/bin/env python
"""Delete login user events older than a given number of days.
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime, timedelta
import click
from byceps.database import db
from byceps.services.user.models.event import UserEvent as DbUserEvent
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
@click.command()
@click.option(
'--dry-run', is_flag=True, help='count but do not delete affected records',
)
@click.argument('minimum_age_in_days', type=int)
def execute(dry_run, minimum_age_in_days):
latest_occurred_at = get_latest_occurred_at(minimum_age_in_days)
click.secho(
f'Deleting all user login events older than {minimum_age_in_days} days '
f'(i.e. before {latest_occurred_at:%Y-%m-%d %H:%M:%S}) ...'
)
num_deleted = delete_user_login_events_before(latest_occurred_at, dry_run)
click.secho(f'{num_deleted} user login events deleted.')
if dry_run:
click.secho(
f'This was a dry run; no records have been deleted.', fg='yellow'
)
def get_latest_occurred_at(minimum_age_in_days: int) -> datetime:
now = datetime.utcnow()
return now - timedelta(days=minimum_age_in_days)
def delete_user_login_events_before(
latest_occurred_at: datetime, dry_run: bool
) -> int:
num_deleted = DbUserEvent.query \
.filter_by(event_type='user-logged-in') \
.filter(DbUserEvent.occurred_at <= latest_occurred_at) \
.delete()
if not dry_run:
db.session.commit()
return num_deleted
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to delete user login events older than a number of days#!/usr/bin/env python
"""Delete login user events older than a given number of days.
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime, timedelta
import click
from byceps.database import db
from byceps.services.user.models.event import UserEvent as DbUserEvent
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
@click.command()
@click.option(
'--dry-run', is_flag=True, help='count but do not delete affected records',
)
@click.argument('minimum_age_in_days', type=int)
def execute(dry_run, minimum_age_in_days):
latest_occurred_at = get_latest_occurred_at(minimum_age_in_days)
click.secho(
f'Deleting all user login events older than {minimum_age_in_days} days '
f'(i.e. before {latest_occurred_at:%Y-%m-%d %H:%M:%S}) ...'
)
num_deleted = delete_user_login_events_before(latest_occurred_at, dry_run)
click.secho(f'{num_deleted} user login events deleted.')
if dry_run:
click.secho(
f'This was a dry run; no records have been deleted.', fg='yellow'
)
def get_latest_occurred_at(minimum_age_in_days: int) -> datetime:
now = datetime.utcnow()
return now - timedelta(days=minimum_age_in_days)
def delete_user_login_events_before(
latest_occurred_at: datetime, dry_run: bool
) -> int:
num_deleted = DbUserEvent.query \
.filter_by(event_type='user-logged-in') \
.filter(DbUserEvent.occurred_at <= latest_occurred_at) \
.delete()
if not dry_run:
db.session.commit()
return num_deleted
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to delete user login events older than a number of days<commit_after>#!/usr/bin/env python
"""Delete login user events older than a given number of days.
:Copyright: 2006-2020 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime, timedelta
import click
from byceps.database import db
from byceps.services.user.models.event import UserEvent as DbUserEvent
from byceps.util.system import get_config_filename_from_env_or_exit
from _util import app_context
@click.command()
@click.option(
'--dry-run', is_flag=True, help='count but do not delete affected records',
)
@click.argument('minimum_age_in_days', type=int)
def execute(dry_run, minimum_age_in_days):
latest_occurred_at = get_latest_occurred_at(minimum_age_in_days)
click.secho(
f'Deleting all user login events older than {minimum_age_in_days} days '
f'(i.e. before {latest_occurred_at:%Y-%m-%d %H:%M:%S}) ...'
)
num_deleted = delete_user_login_events_before(latest_occurred_at, dry_run)
click.secho(f'{num_deleted} user login events deleted.')
if dry_run:
click.secho(
f'This was a dry run; no records have been deleted.', fg='yellow'
)
def get_latest_occurred_at(minimum_age_in_days: int) -> datetime:
now = datetime.utcnow()
return now - timedelta(days=minimum_age_in_days)
def delete_user_login_events_before(
latest_occurred_at: datetime, dry_run: bool
) -> int:
num_deleted = DbUserEvent.query \
.filter_by(event_type='user-logged-in') \
.filter(DbUserEvent.occurred_at <= latest_occurred_at) \
.delete()
if not dry_run:
db.session.commit()
return num_deleted
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
|
a33685a7a1432b86572c49ac895a7822cf6b58d2
|
weathertracking/bin/gather_observations.py
|
weathertracking/bin/gather_observations.py
|
from weathertracking.models import WeatherStation, WeatherReport
if __name__ == '__main__': # called as a script
station_list = WeatherStation.objects.auto_poll() # retrieve all stations marked to poll automatically
for station in station_list:
var = station.update()
|
Add script usable for cron/queue-ing retrievable of automatically polled stations
|
Add script usable for cron/queue-ing retrievable of automatically polled stations
git-svn-id: 138413ff556ff9267caefd2d209ba774763e85fb@8 696a86c2-f982-11dd-8259-6defae454439
|
Python
|
bsd-3-clause
|
adamfast/django-weathertracking
|
Add script usable for cron/queue-ing retrievable of automatically polled stations
git-svn-id: 138413ff556ff9267caefd2d209ba774763e85fb@8 696a86c2-f982-11dd-8259-6defae454439
|
from weathertracking.models import WeatherStation, WeatherReport
if __name__ == '__main__': # called as a script
station_list = WeatherStation.objects.auto_poll() # retrieve all stations marked to poll automatically
for station in station_list:
var = station.update()
|
<commit_before><commit_msg>Add script usable for cron/queue-ing retrievable of automatically polled stations
git-svn-id: 138413ff556ff9267caefd2d209ba774763e85fb@8 696a86c2-f982-11dd-8259-6defae454439<commit_after>
|
from weathertracking.models import WeatherStation, WeatherReport
if __name__ == '__main__': # called as a script
station_list = WeatherStation.objects.auto_poll() # retrieve all stations marked to poll automatically
for station in station_list:
var = station.update()
|
Add script usable for cron/queue-ing retrievable of automatically polled stations
git-svn-id: 138413ff556ff9267caefd2d209ba774763e85fb@8 696a86c2-f982-11dd-8259-6defae454439from weathertracking.models import WeatherStation, WeatherReport
if __name__ == '__main__': # called as a script
station_list = WeatherStation.objects.auto_poll() # retrieve all stations marked to poll automatically
for station in station_list:
var = station.update()
|
<commit_before><commit_msg>Add script usable for cron/queue-ing retrievable of automatically polled stations
git-svn-id: 138413ff556ff9267caefd2d209ba774763e85fb@8 696a86c2-f982-11dd-8259-6defae454439<commit_after>from weathertracking.models import WeatherStation, WeatherReport
if __name__ == '__main__': # called as a script
station_list = WeatherStation.objects.auto_poll() # retrieve all stations marked to poll automatically
for station in station_list:
var = station.update()
|
|
6c339a14170ea77926bc8eb47bd7e61c80db4a42
|
tests/test_cookiecutter_generation.py
|
tests/test_cookiecutter_generation.py
|
# -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def context():
return {
"project_name": "My Test Project",
"repo_name": "my_test_project",
"author_name": "Test Author",
"email": "test@example.com",
"description": "A short description of the project.",
"domain_name": "example.com",
"version": "0.1.0",
"timezone": "UTC",
"now": "2015/01/13",
"year": "2015"
}
def test_default_configuration(cookies, context):
result = cookies.bake(extra_context=context)
assert result.exit_code == 0
|
Implement a basic test with cookies.bake()
|
Implement a basic test with cookies.bake()
|
Python
|
bsd-3-clause
|
ovidner/cookiecutter-django,luzfcb/cookiecutter-django,kappataumu/cookiecutter-django,aleprovencio/cookiecutter-django,HandyCodeJob/hcj-django-temp,hairychris/cookiecutter-django,calculuscowboy/cookiecutter-django,webspired/cookiecutter-django,hairychris/cookiecutter-django,schacki/cookiecutter-django,topwebmaster/cookiecutter-django,luzfcb/cookiecutter-django,asyncee/cookiecutter-django,ddiazpinto/cookiecutter-django,thisjustin/cookiecutter-django,ingenioustechie/cookiecutter-django-openshift,mistalaba/cookiecutter-django,thisjustin/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,ingenioustechie/cookiecutter-django-openshift,aeikenberry/cookiecutter-django-rest-babel,crdoconnor/cookiecutter-django,kappataumu/cookiecutter-django,jondelmil/cookiecutter-django,ovidner/cookiecutter-django,ddiazpinto/cookiecutter-django,hairychris/cookiecutter-django,webspired/cookiecutter-django,hackebrot/cookiecutter-django,hackebrot/cookiecutter-django,webyneter/cookiecutter-django,yunti/cookiecutter-django,ad-m/cookiecutter-django,bopo/cookiecutter-django,drxos/cookiecutter-django-dokku,ddiazpinto/cookiecutter-django,ingenioustechie/cookiecutter-django-openshift,nunchaks/cookiecutter-django,schacki/cookiecutter-django,bopo/cookiecutter-django,calculuscowboy/cookiecutter-django,jondelmil/cookiecutter-django,calculuscowboy/cookiecutter-django,ddiazpinto/cookiecutter-django,andresgz/cookiecutter-django,trungdong/cookiecutter-django,mistalaba/cookiecutter-django,bopo/cookiecutter-django,Parbhat/cookiecutter-django-foundation,ryankanno/cookiecutter-django,Parbhat/cookiecutter-django-foundation,gappsexperts/cookiecutter-django,asyncee/cookiecutter-django,mjhea0/cookiecutter-django,calculuscowboy/cookiecutter-django,luzfcb/cookiecutter-django,ovidner/cookiecutter-django,Parbhat/cookiecutter-django-foundation,aleprovencio/cookiecutter-django,mjhea0/cookiecutter-django,kappataumu/cookiecutter-django,HandyCodeJob/hcj-django-temp,gappsexperts/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,nunchaks/cookiecutter-django,andresgz/cookiecutter-django,trungdong/cookiecutter-django,topwebmaster/cookiecutter-django,hairychris/cookiecutter-django,mjhea0/cookiecutter-django,ryankanno/cookiecutter-django,asyncee/cookiecutter-django,crdoconnor/cookiecutter-django,luzfcb/cookiecutter-django,thisjustin/cookiecutter-django,HandyCodeJob/hcj-django-temp,schacki/cookiecutter-django,Parbhat/cookiecutter-django-foundation,drxos/cookiecutter-django-dokku,asyncee/cookiecutter-django,yunti/cookiecutter-django,drxos/cookiecutter-django-dokku,hackebrot/cookiecutter-django,topwebmaster/cookiecutter-django,nunchaks/cookiecutter-django,webspired/cookiecutter-django,ad-m/cookiecutter-django,schacki/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,nunchaks/cookiecutter-django,pydanny/cookiecutter-django,ryankanno/cookiecutter-django,jondelmil/cookiecutter-django,webyneter/cookiecutter-django,kappataumu/cookiecutter-django,drxos/cookiecutter-django-dokku,pydanny/cookiecutter-django,thisjustin/cookiecutter-django,ovidner/cookiecutter-django,pydanny/cookiecutter-django,aleprovencio/cookiecutter-django,ad-m/cookiecutter-django,yunti/cookiecutter-django,topwebmaster/cookiecutter-django,HandyCodeJob/hcj-django-temp,pydanny/cookiecutter-django,andresgz/cookiecutter-django,jondelmil/cookiecutter-django,hackebrot/cookiecutter-django,ingenioustechie/cookiecutter-django-openshift,andresgz/cookiecutter-django,trungdong/cookiecutter-django,ryankanno/cookiecutter-django,gappsexperts/cookiecutter-django,yunti/cookiecutter-django,trungdong/cookiecutter-django,bopo/cookiecutter-django,ad-m/cookiecutter-django,mistalaba/cookiecutter-django,webyneter/cookiecutter-django,webyneter/cookiecutter-django,webspired/cookiecutter-django,crdoconnor/cookiecutter-django,aleprovencio/cookiecutter-django,mjhea0/cookiecutter-django,crdoconnor/cookiecutter-django,mistalaba/cookiecutter-django,gappsexperts/cookiecutter-django
|
Implement a basic test with cookies.bake()
|
# -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def context():
return {
"project_name": "My Test Project",
"repo_name": "my_test_project",
"author_name": "Test Author",
"email": "test@example.com",
"description": "A short description of the project.",
"domain_name": "example.com",
"version": "0.1.0",
"timezone": "UTC",
"now": "2015/01/13",
"year": "2015"
}
def test_default_configuration(cookies, context):
result = cookies.bake(extra_context=context)
assert result.exit_code == 0
|
<commit_before><commit_msg>Implement a basic test with cookies.bake()<commit_after>
|
# -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def context():
return {
"project_name": "My Test Project",
"repo_name": "my_test_project",
"author_name": "Test Author",
"email": "test@example.com",
"description": "A short description of the project.",
"domain_name": "example.com",
"version": "0.1.0",
"timezone": "UTC",
"now": "2015/01/13",
"year": "2015"
}
def test_default_configuration(cookies, context):
result = cookies.bake(extra_context=context)
assert result.exit_code == 0
|
Implement a basic test with cookies.bake()# -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def context():
return {
"project_name": "My Test Project",
"repo_name": "my_test_project",
"author_name": "Test Author",
"email": "test@example.com",
"description": "A short description of the project.",
"domain_name": "example.com",
"version": "0.1.0",
"timezone": "UTC",
"now": "2015/01/13",
"year": "2015"
}
def test_default_configuration(cookies, context):
result = cookies.bake(extra_context=context)
assert result.exit_code == 0
|
<commit_before><commit_msg>Implement a basic test with cookies.bake()<commit_after># -*- coding: utf-8 -*-
import pytest
@pytest.fixture
def context():
return {
"project_name": "My Test Project",
"repo_name": "my_test_project",
"author_name": "Test Author",
"email": "test@example.com",
"description": "A short description of the project.",
"domain_name": "example.com",
"version": "0.1.0",
"timezone": "UTC",
"now": "2015/01/13",
"year": "2015"
}
def test_default_configuration(cookies, context):
result = cookies.bake(extra_context=context)
assert result.exit_code == 0
|
|
20b82906da1ab9601a6525cb14b56cafb91e91e0
|
tutorials/graphs/timeSeriesFromCSV.py
|
tutorials/graphs/timeSeriesFromCSV.py
|
## \file
## \ingroup tutorial_graphs
## \notebook -js
## This macro illustrates the use of the time axis on a TGraph
## with data read from a text file containing the SWAN usage
## statistics during July 2017.
##
## \macro_image
## \macro_code
##
## \authors Danilo Piparo, Olivier Couet
import ROOT
# Open the data file. This csv contains the usage statistics of a CERN IT
# service, SWAN, during two weeks. We would like to plot this data with
# ROOT to draw some conclusions from it.
dirName = str(ROOT.gROOT.GetTutorialDir())
dirName += "/graphs/"
dirName= dirName.replace("/./", "/")
inputFileName = "%s/SWAN2017.dat" %dirName
# Create the time graph
g = ROOT.TGraph()
g.SetTitle("SWAN Users during July 2017;Time;Number of Users")
# Read the data and fill the graph with time along the X axis and number
# of users along the Y axis
lines = open(inputFileName, "r").readlines()
for i, line in enumerate(lines):
d, h, value = line.split()
g.SetPoint(i, ROOT.TDatime("%s %s" %(d,h)).Convert(), float(value))
# Draw the graph
c = ROOT.TCanvas("c", "c", 950, 500)
c.SetLeftMargin(0.07)
c.SetRightMargin(0.04)
c.SetGrid()
g.SetLineWidth(3)
g.SetLineColor(ROOT.kBlue)
g.Draw("al")
g.GetYaxis().CenterTitle()
# Make the X axis labelled with time
xaxis = g.GetXaxis()
xaxis.SetTimeDisplay(1)
xaxis.CenterTitle()
xaxis.SetTimeFormat("%a %d")
xaxis.SetTimeOffset(0)
xaxis.SetNdivisions(-219)
xaxis.SetLimits(ROOT.TDatime(2017, 7, 3, 0, 0, 0).Convert(), ROOT.TDatime(2017, 7, 22, 0, 0, 0).Convert())
xaxis.SetLabelSize(0.025)
xaxis.CenterLabels()
|
Add Python tutorial about time series
|
Add Python tutorial about time series
|
Python
|
lgpl-2.1
|
olifre/root,root-mirror/root,root-mirror/root,root-mirror/root,olifre/root,root-mirror/root,karies/root,root-mirror/root,olifre/root,root-mirror/root,root-mirror/root,karies/root,olifre/root,karies/root,karies/root,olifre/root,olifre/root,karies/root,karies/root,root-mirror/root,karies/root,olifre/root,karies/root,olifre/root,olifre/root,olifre/root,karies/root,root-mirror/root,karies/root,olifre/root,karies/root,root-mirror/root,root-mirror/root
|
Add Python tutorial about time series
|
## \file
## \ingroup tutorial_graphs
## \notebook -js
## This macro illustrates the use of the time axis on a TGraph
## with data read from a text file containing the SWAN usage
## statistics during July 2017.
##
## \macro_image
## \macro_code
##
## \authors Danilo Piparo, Olivier Couet
import ROOT
# Open the data file. This csv contains the usage statistics of a CERN IT
# service, SWAN, during two weeks. We would like to plot this data with
# ROOT to draw some conclusions from it.
dirName = str(ROOT.gROOT.GetTutorialDir())
dirName += "/graphs/"
dirName= dirName.replace("/./", "/")
inputFileName = "%s/SWAN2017.dat" %dirName
# Create the time graph
g = ROOT.TGraph()
g.SetTitle("SWAN Users during July 2017;Time;Number of Users")
# Read the data and fill the graph with time along the X axis and number
# of users along the Y axis
lines = open(inputFileName, "r").readlines()
for i, line in enumerate(lines):
d, h, value = line.split()
g.SetPoint(i, ROOT.TDatime("%s %s" %(d,h)).Convert(), float(value))
# Draw the graph
c = ROOT.TCanvas("c", "c", 950, 500)
c.SetLeftMargin(0.07)
c.SetRightMargin(0.04)
c.SetGrid()
g.SetLineWidth(3)
g.SetLineColor(ROOT.kBlue)
g.Draw("al")
g.GetYaxis().CenterTitle()
# Make the X axis labelled with time
xaxis = g.GetXaxis()
xaxis.SetTimeDisplay(1)
xaxis.CenterTitle()
xaxis.SetTimeFormat("%a %d")
xaxis.SetTimeOffset(0)
xaxis.SetNdivisions(-219)
xaxis.SetLimits(ROOT.TDatime(2017, 7, 3, 0, 0, 0).Convert(), ROOT.TDatime(2017, 7, 22, 0, 0, 0).Convert())
xaxis.SetLabelSize(0.025)
xaxis.CenterLabels()
|
<commit_before><commit_msg>Add Python tutorial about time series<commit_after>
|
## \file
## \ingroup tutorial_graphs
## \notebook -js
## This macro illustrates the use of the time axis on a TGraph
## with data read from a text file containing the SWAN usage
## statistics during July 2017.
##
## \macro_image
## \macro_code
##
## \authors Danilo Piparo, Olivier Couet
import ROOT
# Open the data file. This csv contains the usage statistics of a CERN IT
# service, SWAN, during two weeks. We would like to plot this data with
# ROOT to draw some conclusions from it.
dirName = str(ROOT.gROOT.GetTutorialDir())
dirName += "/graphs/"
dirName= dirName.replace("/./", "/")
inputFileName = "%s/SWAN2017.dat" %dirName
# Create the time graph
g = ROOT.TGraph()
g.SetTitle("SWAN Users during July 2017;Time;Number of Users")
# Read the data and fill the graph with time along the X axis and number
# of users along the Y axis
lines = open(inputFileName, "r").readlines()
for i, line in enumerate(lines):
d, h, value = line.split()
g.SetPoint(i, ROOT.TDatime("%s %s" %(d,h)).Convert(), float(value))
# Draw the graph
c = ROOT.TCanvas("c", "c", 950, 500)
c.SetLeftMargin(0.07)
c.SetRightMargin(0.04)
c.SetGrid()
g.SetLineWidth(3)
g.SetLineColor(ROOT.kBlue)
g.Draw("al")
g.GetYaxis().CenterTitle()
# Make the X axis labelled with time
xaxis = g.GetXaxis()
xaxis.SetTimeDisplay(1)
xaxis.CenterTitle()
xaxis.SetTimeFormat("%a %d")
xaxis.SetTimeOffset(0)
xaxis.SetNdivisions(-219)
xaxis.SetLimits(ROOT.TDatime(2017, 7, 3, 0, 0, 0).Convert(), ROOT.TDatime(2017, 7, 22, 0, 0, 0).Convert())
xaxis.SetLabelSize(0.025)
xaxis.CenterLabels()
|
Add Python tutorial about time series## \file
## \ingroup tutorial_graphs
## \notebook -js
## This macro illustrates the use of the time axis on a TGraph
## with data read from a text file containing the SWAN usage
## statistics during July 2017.
##
## \macro_image
## \macro_code
##
## \authors Danilo Piparo, Olivier Couet
import ROOT
# Open the data file. This csv contains the usage statistics of a CERN IT
# service, SWAN, during two weeks. We would like to plot this data with
# ROOT to draw some conclusions from it.
dirName = str(ROOT.gROOT.GetTutorialDir())
dirName += "/graphs/"
dirName= dirName.replace("/./", "/")
inputFileName = "%s/SWAN2017.dat" %dirName
# Create the time graph
g = ROOT.TGraph()
g.SetTitle("SWAN Users during July 2017;Time;Number of Users")
# Read the data and fill the graph with time along the X axis and number
# of users along the Y axis
lines = open(inputFileName, "r").readlines()
for i, line in enumerate(lines):
d, h, value = line.split()
g.SetPoint(i, ROOT.TDatime("%s %s" %(d,h)).Convert(), float(value))
# Draw the graph
c = ROOT.TCanvas("c", "c", 950, 500)
c.SetLeftMargin(0.07)
c.SetRightMargin(0.04)
c.SetGrid()
g.SetLineWidth(3)
g.SetLineColor(ROOT.kBlue)
g.Draw("al")
g.GetYaxis().CenterTitle()
# Make the X axis labelled with time
xaxis = g.GetXaxis()
xaxis.SetTimeDisplay(1)
xaxis.CenterTitle()
xaxis.SetTimeFormat("%a %d")
xaxis.SetTimeOffset(0)
xaxis.SetNdivisions(-219)
xaxis.SetLimits(ROOT.TDatime(2017, 7, 3, 0, 0, 0).Convert(), ROOT.TDatime(2017, 7, 22, 0, 0, 0).Convert())
xaxis.SetLabelSize(0.025)
xaxis.CenterLabels()
|
<commit_before><commit_msg>Add Python tutorial about time series<commit_after>## \file
## \ingroup tutorial_graphs
## \notebook -js
## This macro illustrates the use of the time axis on a TGraph
## with data read from a text file containing the SWAN usage
## statistics during July 2017.
##
## \macro_image
## \macro_code
##
## \authors Danilo Piparo, Olivier Couet
import ROOT
# Open the data file. This csv contains the usage statistics of a CERN IT
# service, SWAN, during two weeks. We would like to plot this data with
# ROOT to draw some conclusions from it.
dirName = str(ROOT.gROOT.GetTutorialDir())
dirName += "/graphs/"
dirName= dirName.replace("/./", "/")
inputFileName = "%s/SWAN2017.dat" %dirName
# Create the time graph
g = ROOT.TGraph()
g.SetTitle("SWAN Users during July 2017;Time;Number of Users")
# Read the data and fill the graph with time along the X axis and number
# of users along the Y axis
lines = open(inputFileName, "r").readlines()
for i, line in enumerate(lines):
d, h, value = line.split()
g.SetPoint(i, ROOT.TDatime("%s %s" %(d,h)).Convert(), float(value))
# Draw the graph
c = ROOT.TCanvas("c", "c", 950, 500)
c.SetLeftMargin(0.07)
c.SetRightMargin(0.04)
c.SetGrid()
g.SetLineWidth(3)
g.SetLineColor(ROOT.kBlue)
g.Draw("al")
g.GetYaxis().CenterTitle()
# Make the X axis labelled with time
xaxis = g.GetXaxis()
xaxis.SetTimeDisplay(1)
xaxis.CenterTitle()
xaxis.SetTimeFormat("%a %d")
xaxis.SetTimeOffset(0)
xaxis.SetNdivisions(-219)
xaxis.SetLimits(ROOT.TDatime(2017, 7, 3, 0, 0, 0).Convert(), ROOT.TDatime(2017, 7, 22, 0, 0, 0).Convert())
xaxis.SetLabelSize(0.025)
xaxis.CenterLabels()
|
|
b5142de511907167b3c9c7add5d14e6ec799633c
|
example/NaCl/dynmat.py
|
example/NaCl/dynmat.py
|
#!/usr/bin/env python
import yaml
import numpy as np
data = yaml.load(open("qpoints.yaml"))
dynmat = []
dynmat_data = data['phonon'][0]['dynamical_matrix']
for row in dynmat_data:
vals = np.reshape(row, (-1, 2))
dynmat.append(vals[:, 0] + vals[:, 1] * 1j)
dynmat = np.array(dynmat)
eigvals, eigvecs, = np.linalg.eigh(dynmat)
frequencies = np.sqrt(np.abs(eigvals.real)) * np.sign(eigvals.real)
conversion_factor_to_THz = 15.633302
print frequencies * conversion_factor_to_THz
|
Add an example script in NaCle
|
Add an example script in NaCle
|
Python
|
bsd-3-clause
|
atztogo/phonopy,atztogo/phonopy,atztogo/phonopy,atztogo/phonopy
|
Add an example script in NaCle
|
#!/usr/bin/env python
import yaml
import numpy as np
data = yaml.load(open("qpoints.yaml"))
dynmat = []
dynmat_data = data['phonon'][0]['dynamical_matrix']
for row in dynmat_data:
vals = np.reshape(row, (-1, 2))
dynmat.append(vals[:, 0] + vals[:, 1] * 1j)
dynmat = np.array(dynmat)
eigvals, eigvecs, = np.linalg.eigh(dynmat)
frequencies = np.sqrt(np.abs(eigvals.real)) * np.sign(eigvals.real)
conversion_factor_to_THz = 15.633302
print frequencies * conversion_factor_to_THz
|
<commit_before><commit_msg>Add an example script in NaCle<commit_after>
|
#!/usr/bin/env python
import yaml
import numpy as np
data = yaml.load(open("qpoints.yaml"))
dynmat = []
dynmat_data = data['phonon'][0]['dynamical_matrix']
for row in dynmat_data:
vals = np.reshape(row, (-1, 2))
dynmat.append(vals[:, 0] + vals[:, 1] * 1j)
dynmat = np.array(dynmat)
eigvals, eigvecs, = np.linalg.eigh(dynmat)
frequencies = np.sqrt(np.abs(eigvals.real)) * np.sign(eigvals.real)
conversion_factor_to_THz = 15.633302
print frequencies * conversion_factor_to_THz
|
Add an example script in NaCle#!/usr/bin/env python
import yaml
import numpy as np
data = yaml.load(open("qpoints.yaml"))
dynmat = []
dynmat_data = data['phonon'][0]['dynamical_matrix']
for row in dynmat_data:
vals = np.reshape(row, (-1, 2))
dynmat.append(vals[:, 0] + vals[:, 1] * 1j)
dynmat = np.array(dynmat)
eigvals, eigvecs, = np.linalg.eigh(dynmat)
frequencies = np.sqrt(np.abs(eigvals.real)) * np.sign(eigvals.real)
conversion_factor_to_THz = 15.633302
print frequencies * conversion_factor_to_THz
|
<commit_before><commit_msg>Add an example script in NaCle<commit_after>#!/usr/bin/env python
import yaml
import numpy as np
data = yaml.load(open("qpoints.yaml"))
dynmat = []
dynmat_data = data['phonon'][0]['dynamical_matrix']
for row in dynmat_data:
vals = np.reshape(row, (-1, 2))
dynmat.append(vals[:, 0] + vals[:, 1] * 1j)
dynmat = np.array(dynmat)
eigvals, eigvecs, = np.linalg.eigh(dynmat)
frequencies = np.sqrt(np.abs(eigvals.real)) * np.sign(eigvals.real)
conversion_factor_to_THz = 15.633302
print frequencies * conversion_factor_to_THz
|
|
2b0e7e221bcf6b01fa3eccfe6b33bf0763613630
|
scripts/add_rtp_process_event.py
|
scripts/add_rtp_process_event.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Collaboration
# Licensed under the 2-clause BSD License
"""
Script to add an RTP process event record to M&C.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import h5py
from astropy.time import Time
import hera_mc.mc as mc
parser = mc.get_mc_argument_parser()
parser.add_argument(
"filename", metavar="FILE", type=str, help="Name of the file to add an event for."
)
parser.add_argument(
"event",
metavar="EVENT",
type=str,
help=(
"Event to add for the file. Must be one of: "
'"queued", "started", "finished", "error"'
),
)
# parse args
args = parser.parse_args()
# get the obsid from the file
with h5py.File(args.filename, "r") as h5f:
time_array = h5f["Header/time_array"][()]
t0 = Time(np.unique(time_array)[0], scale="utc", format="jd")
obsid = int(np.floor(t0.gps))
# add the process event
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
session.add_rtp_process_event(time=Time.now(), obsid=obsid, event=args.event)
|
Add script for adding RTP process events
|
Add script for adding RTP process events
|
Python
|
bsd-2-clause
|
HERA-Team/Monitor_and_Control,HERA-Team/hera_mc,HERA-Team/hera_mc
|
Add script for adding RTP process events
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Collaboration
# Licensed under the 2-clause BSD License
"""
Script to add an RTP process event record to M&C.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import h5py
from astropy.time import Time
import hera_mc.mc as mc
parser = mc.get_mc_argument_parser()
parser.add_argument(
"filename", metavar="FILE", type=str, help="Name of the file to add an event for."
)
parser.add_argument(
"event",
metavar="EVENT",
type=str,
help=(
"Event to add for the file. Must be one of: "
'"queued", "started", "finished", "error"'
),
)
# parse args
args = parser.parse_args()
# get the obsid from the file
with h5py.File(args.filename, "r") as h5f:
time_array = h5f["Header/time_array"][()]
t0 = Time(np.unique(time_array)[0], scale="utc", format="jd")
obsid = int(np.floor(t0.gps))
# add the process event
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
session.add_rtp_process_event(time=Time.now(), obsid=obsid, event=args.event)
|
<commit_before><commit_msg>Add script for adding RTP process events<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Collaboration
# Licensed under the 2-clause BSD License
"""
Script to add an RTP process event record to M&C.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import h5py
from astropy.time import Time
import hera_mc.mc as mc
parser = mc.get_mc_argument_parser()
parser.add_argument(
"filename", metavar="FILE", type=str, help="Name of the file to add an event for."
)
parser.add_argument(
"event",
metavar="EVENT",
type=str,
help=(
"Event to add for the file. Must be one of: "
'"queued", "started", "finished", "error"'
),
)
# parse args
args = parser.parse_args()
# get the obsid from the file
with h5py.File(args.filename, "r") as h5f:
time_array = h5f["Header/time_array"][()]
t0 = Time(np.unique(time_array)[0], scale="utc", format="jd")
obsid = int(np.floor(t0.gps))
# add the process event
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
session.add_rtp_process_event(time=Time.now(), obsid=obsid, event=args.event)
|
Add script for adding RTP process events#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Collaboration
# Licensed under the 2-clause BSD License
"""
Script to add an RTP process event record to M&C.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import h5py
from astropy.time import Time
import hera_mc.mc as mc
parser = mc.get_mc_argument_parser()
parser.add_argument(
"filename", metavar="FILE", type=str, help="Name of the file to add an event for."
)
parser.add_argument(
"event",
metavar="EVENT",
type=str,
help=(
"Event to add for the file. Must be one of: "
'"queued", "started", "finished", "error"'
),
)
# parse args
args = parser.parse_args()
# get the obsid from the file
with h5py.File(args.filename, "r") as h5f:
time_array = h5f["Header/time_array"][()]
t0 = Time(np.unique(time_array)[0], scale="utc", format="jd")
obsid = int(np.floor(t0.gps))
# add the process event
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
session.add_rtp_process_event(time=Time.now(), obsid=obsid, event=args.event)
|
<commit_before><commit_msg>Add script for adding RTP process events<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 the HERA Collaboration
# Licensed under the 2-clause BSD License
"""
Script to add an RTP process event record to M&C.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
import h5py
from astropy.time import Time
import hera_mc.mc as mc
parser = mc.get_mc_argument_parser()
parser.add_argument(
"filename", metavar="FILE", type=str, help="Name of the file to add an event for."
)
parser.add_argument(
"event",
metavar="EVENT",
type=str,
help=(
"Event to add for the file. Must be one of: "
'"queued", "started", "finished", "error"'
),
)
# parse args
args = parser.parse_args()
# get the obsid from the file
with h5py.File(args.filename, "r") as h5f:
time_array = h5f["Header/time_array"][()]
t0 = Time(np.unique(time_array)[0], scale="utc", format="jd")
obsid = int(np.floor(t0.gps))
# add the process event
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
session.add_rtp_process_event(time=Time.now(), obsid=obsid, event=args.event)
|
|
c1fe209bde5d7edf95b809ebcb00c2167d74ede5
|
quant/demo/demo_DAX.py
|
quant/demo/demo_DAX.py
|
#-*-coding:utf-8-*-
#!/usr/bin/python
# coding: UTF-8
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import math
#从雅虎财经获取DAX指数的数据
DAX = web.DataReader(name='^GDAXI', data_source='yahoo',start = '2000-1-1')
#查看一下数据的一些信息 上面这一方法返回的是一个pandas dataframe的数据结构
print (DAX.info())
#绘制收盘价的曲线
DAX['Close'].plot(figsize=(8,5))
#计算每日的涨跌幅
DAX['Return'] = np.log(DAX['Close']/DAX['Close'].shift(1))
print (DAX[['Close','Return']].tail())
#将收盘价与每日涨跌幅度放在一张图上
DAX[['Close','Return']].plot(subplots = True,style = 'b',figsize=(8,5))
#42与252个交易日为窗口取移动平均
DAX['42d']=pd.rolling_mean(DAX['Close'],window=42)
DAX['252d']=pd.rolling_mean(DAX['Close'],window=252)
#绘制MA与收盘价
DAX[['Close','42d','252d']].plot(figsize=(8,5))
#计算波动率,然后根据均方根法则进行年化
DAX['Mov_Vol']=pd.rolling_std(DAX['Return'],window = 252)*math.sqrt(252)
DAX[['Close','Mov_Vol','Return']].plot(subplots = True, style = 'b',figsize = (8,7))
|
Add demo of DAX analysis and MA
|
Add demo of DAX analysis and MA
|
Python
|
apache-2.0
|
yunfeiz/py_learnt
|
Add demo of DAX analysis and MA
|
#-*-coding:utf-8-*-
#!/usr/bin/python
# coding: UTF-8
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import math
#从雅虎财经获取DAX指数的数据
DAX = web.DataReader(name='^GDAXI', data_source='yahoo',start = '2000-1-1')
#查看一下数据的一些信息 上面这一方法返回的是一个pandas dataframe的数据结构
print (DAX.info())
#绘制收盘价的曲线
DAX['Close'].plot(figsize=(8,5))
#计算每日的涨跌幅
DAX['Return'] = np.log(DAX['Close']/DAX['Close'].shift(1))
print (DAX[['Close','Return']].tail())
#将收盘价与每日涨跌幅度放在一张图上
DAX[['Close','Return']].plot(subplots = True,style = 'b',figsize=(8,5))
#42与252个交易日为窗口取移动平均
DAX['42d']=pd.rolling_mean(DAX['Close'],window=42)
DAX['252d']=pd.rolling_mean(DAX['Close'],window=252)
#绘制MA与收盘价
DAX[['Close','42d','252d']].plot(figsize=(8,5))
#计算波动率,然后根据均方根法则进行年化
DAX['Mov_Vol']=pd.rolling_std(DAX['Return'],window = 252)*math.sqrt(252)
DAX[['Close','Mov_Vol','Return']].plot(subplots = True, style = 'b',figsize = (8,7))
|
<commit_before><commit_msg>Add demo of DAX analysis and MA<commit_after>
|
#-*-coding:utf-8-*-
#!/usr/bin/python
# coding: UTF-8
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import math
#从雅虎财经获取DAX指数的数据
DAX = web.DataReader(name='^GDAXI', data_source='yahoo',start = '2000-1-1')
#查看一下数据的一些信息 上面这一方法返回的是一个pandas dataframe的数据结构
print (DAX.info())
#绘制收盘价的曲线
DAX['Close'].plot(figsize=(8,5))
#计算每日的涨跌幅
DAX['Return'] = np.log(DAX['Close']/DAX['Close'].shift(1))
print (DAX[['Close','Return']].tail())
#将收盘价与每日涨跌幅度放在一张图上
DAX[['Close','Return']].plot(subplots = True,style = 'b',figsize=(8,5))
#42与252个交易日为窗口取移动平均
DAX['42d']=pd.rolling_mean(DAX['Close'],window=42)
DAX['252d']=pd.rolling_mean(DAX['Close'],window=252)
#绘制MA与收盘价
DAX[['Close','42d','252d']].plot(figsize=(8,5))
#计算波动率,然后根据均方根法则进行年化
DAX['Mov_Vol']=pd.rolling_std(DAX['Return'],window = 252)*math.sqrt(252)
DAX[['Close','Mov_Vol','Return']].plot(subplots = True, style = 'b',figsize = (8,7))
|
Add demo of DAX analysis and MA#-*-coding:utf-8-*-
#!/usr/bin/python
# coding: UTF-8
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import math
#从雅虎财经获取DAX指数的数据
DAX = web.DataReader(name='^GDAXI', data_source='yahoo',start = '2000-1-1')
#查看一下数据的一些信息 上面这一方法返回的是一个pandas dataframe的数据结构
print (DAX.info())
#绘制收盘价的曲线
DAX['Close'].plot(figsize=(8,5))
#计算每日的涨跌幅
DAX['Return'] = np.log(DAX['Close']/DAX['Close'].shift(1))
print (DAX[['Close','Return']].tail())
#将收盘价与每日涨跌幅度放在一张图上
DAX[['Close','Return']].plot(subplots = True,style = 'b',figsize=(8,5))
#42与252个交易日为窗口取移动平均
DAX['42d']=pd.rolling_mean(DAX['Close'],window=42)
DAX['252d']=pd.rolling_mean(DAX['Close'],window=252)
#绘制MA与收盘价
DAX[['Close','42d','252d']].plot(figsize=(8,5))
#计算波动率,然后根据均方根法则进行年化
DAX['Mov_Vol']=pd.rolling_std(DAX['Return'],window = 252)*math.sqrt(252)
DAX[['Close','Mov_Vol','Return']].plot(subplots = True, style = 'b',figsize = (8,7))
|
<commit_before><commit_msg>Add demo of DAX analysis and MA<commit_after>#-*-coding:utf-8-*-
#!/usr/bin/python
# coding: UTF-8
import numpy as np
import pandas as pd
import pandas_datareader.data as web
import math
#从雅虎财经获取DAX指数的数据
DAX = web.DataReader(name='^GDAXI', data_source='yahoo',start = '2000-1-1')
#查看一下数据的一些信息 上面这一方法返回的是一个pandas dataframe的数据结构
print (DAX.info())
#绘制收盘价的曲线
DAX['Close'].plot(figsize=(8,5))
#计算每日的涨跌幅
DAX['Return'] = np.log(DAX['Close']/DAX['Close'].shift(1))
print (DAX[['Close','Return']].tail())
#将收盘价与每日涨跌幅度放在一张图上
DAX[['Close','Return']].plot(subplots = True,style = 'b',figsize=(8,5))
#42与252个交易日为窗口取移动平均
DAX['42d']=pd.rolling_mean(DAX['Close'],window=42)
DAX['252d']=pd.rolling_mean(DAX['Close'],window=252)
#绘制MA与收盘价
DAX[['Close','42d','252d']].plot(figsize=(8,5))
#计算波动率,然后根据均方根法则进行年化
DAX['Mov_Vol']=pd.rolling_std(DAX['Return'],window = 252)*math.sqrt(252)
DAX[['Close','Mov_Vol','Return']].plot(subplots = True, style = 'b',figsize = (8,7))
|
|
d197fe8140d8d01e7b517fa0a6a5698548a65af8
|
scripts/dump_s_file.py
|
scripts/dump_s_file.py
|
#!/usr/bin/env python3
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pccora'))
from pccora import *
import math
def dump_values(obj):
for key in obj:
print("%s -> %s" % (key, obj[key]))
def dump_array_values(obj, elapsed_time):
i = 0
for container in obj:
if i >= 2999 and i <= 3019:
ti = container['time']
p = math.exp(float(container['logarithmic_pressure'])/float(4096))
t = float(container['temperature'])
h = float(container['humidity'])
nu = container['n_data']
c = container['cycles']
print("%s\t%s\t%.2f %.1f %.1f\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (i+1, (ti-elapsed_time), p, t, h, nu, container['c1'], container['c2'], container['c3'], container['c4'], container['c5'], container['c6'], container['c7'], container['c8'], c))
i = i + 1
def main():
file = '/home/kinow/Downloads/97031210.59s'
pccora_parser = PCCORAParser()
pccora_parser.parse_s_file(file)
head = pccora_parser.get_header()
ident = pccora_parser.get_identification()
elapsed_time = ident['time_elapsed']
syspar = pccora_parser.get_syspar()
hires_data = pccora_parser.get_hires_data()
dump_array_values(hires_data, elapsed_time)
print("\n")
if __name__ == '__main__':
main()
|
Add script to dump S-file PC-CORA data
|
Add script to dump S-file PC-CORA data
|
Python
|
mit
|
kinow/pccora
|
Add script to dump S-file PC-CORA data
|
#!/usr/bin/env python3
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pccora'))
from pccora import *
import math
def dump_values(obj):
for key in obj:
print("%s -> %s" % (key, obj[key]))
def dump_array_values(obj, elapsed_time):
i = 0
for container in obj:
if i >= 2999 and i <= 3019:
ti = container['time']
p = math.exp(float(container['logarithmic_pressure'])/float(4096))
t = float(container['temperature'])
h = float(container['humidity'])
nu = container['n_data']
c = container['cycles']
print("%s\t%s\t%.2f %.1f %.1f\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (i+1, (ti-elapsed_time), p, t, h, nu, container['c1'], container['c2'], container['c3'], container['c4'], container['c5'], container['c6'], container['c7'], container['c8'], c))
i = i + 1
def main():
file = '/home/kinow/Downloads/97031210.59s'
pccora_parser = PCCORAParser()
pccora_parser.parse_s_file(file)
head = pccora_parser.get_header()
ident = pccora_parser.get_identification()
elapsed_time = ident['time_elapsed']
syspar = pccora_parser.get_syspar()
hires_data = pccora_parser.get_hires_data()
dump_array_values(hires_data, elapsed_time)
print("\n")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to dump S-file PC-CORA data<commit_after>
|
#!/usr/bin/env python3
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pccora'))
from pccora import *
import math
def dump_values(obj):
for key in obj:
print("%s -> %s" % (key, obj[key]))
def dump_array_values(obj, elapsed_time):
i = 0
for container in obj:
if i >= 2999 and i <= 3019:
ti = container['time']
p = math.exp(float(container['logarithmic_pressure'])/float(4096))
t = float(container['temperature'])
h = float(container['humidity'])
nu = container['n_data']
c = container['cycles']
print("%s\t%s\t%.2f %.1f %.1f\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (i+1, (ti-elapsed_time), p, t, h, nu, container['c1'], container['c2'], container['c3'], container['c4'], container['c5'], container['c6'], container['c7'], container['c8'], c))
i = i + 1
def main():
file = '/home/kinow/Downloads/97031210.59s'
pccora_parser = PCCORAParser()
pccora_parser.parse_s_file(file)
head = pccora_parser.get_header()
ident = pccora_parser.get_identification()
elapsed_time = ident['time_elapsed']
syspar = pccora_parser.get_syspar()
hires_data = pccora_parser.get_hires_data()
dump_array_values(hires_data, elapsed_time)
print("\n")
if __name__ == '__main__':
main()
|
Add script to dump S-file PC-CORA data#!/usr/bin/env python3
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pccora'))
from pccora import *
import math
def dump_values(obj):
for key in obj:
print("%s -> %s" % (key, obj[key]))
def dump_array_values(obj, elapsed_time):
i = 0
for container in obj:
if i >= 2999 and i <= 3019:
ti = container['time']
p = math.exp(float(container['logarithmic_pressure'])/float(4096))
t = float(container['temperature'])
h = float(container['humidity'])
nu = container['n_data']
c = container['cycles']
print("%s\t%s\t%.2f %.1f %.1f\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (i+1, (ti-elapsed_time), p, t, h, nu, container['c1'], container['c2'], container['c3'], container['c4'], container['c5'], container['c6'], container['c7'], container['c8'], c))
i = i + 1
def main():
file = '/home/kinow/Downloads/97031210.59s'
pccora_parser = PCCORAParser()
pccora_parser.parse_s_file(file)
head = pccora_parser.get_header()
ident = pccora_parser.get_identification()
elapsed_time = ident['time_elapsed']
syspar = pccora_parser.get_syspar()
hires_data = pccora_parser.get_hires_data()
dump_array_values(hires_data, elapsed_time)
print("\n")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to dump S-file PC-CORA data<commit_after>#!/usr/bin/env python3
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pccora'))
from pccora import *
import math
def dump_values(obj):
for key in obj:
print("%s -> %s" % (key, obj[key]))
def dump_array_values(obj, elapsed_time):
i = 0
for container in obj:
if i >= 2999 and i <= 3019:
ti = container['time']
p = math.exp(float(container['logarithmic_pressure'])/float(4096))
t = float(container['temperature'])
h = float(container['humidity'])
nu = container['n_data']
c = container['cycles']
print("%s\t%s\t%.2f %.1f %.1f\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (i+1, (ti-elapsed_time), p, t, h, nu, container['c1'], container['c2'], container['c3'], container['c4'], container['c5'], container['c6'], container['c7'], container['c8'], c))
i = i + 1
def main():
file = '/home/kinow/Downloads/97031210.59s'
pccora_parser = PCCORAParser()
pccora_parser.parse_s_file(file)
head = pccora_parser.get_header()
ident = pccora_parser.get_identification()
elapsed_time = ident['time_elapsed']
syspar = pccora_parser.get_syspar()
hires_data = pccora_parser.get_hires_data()
dump_array_values(hires_data, elapsed_time)
print("\n")
if __name__ == '__main__':
main()
|
|
0ea2108f1cb4ab889e780fdff2ad0730290950e0
|
tests/basics/gen_yield_from_stopped.py
|
tests/basics/gen_yield_from_stopped.py
|
# Yielding from stopped generator is ok and results in None
def gen():
return 1
# This yield is just to make this a generator
yield
f = gen()
def run():
print((yield from f))
print((yield from f))
print((yield from f))
try:
next(run())
except StopIteration:
print("StopIteration")
|
Add testcase for yielding from a stopped generator.
|
tests: Add testcase for yielding from a stopped generator.
|
Python
|
mit
|
alex-robbins/micropython,cwyark/micropython,MrSurly/micropython-esp32,bvernoux/micropython,selste/micropython,Peetz0r/micropython-esp32,ryannathans/micropython,matthewelse/micropython,pozetroninc/micropython,ryannathans/micropython,PappaPeppar/micropython,hiway/micropython,pozetroninc/micropython,trezor/micropython,chrisdearman/micropython,deshipu/micropython,cwyark/micropython,jmarcelino/pycom-micropython,turbinenreiter/micropython,AriZuu/micropython,toolmacher/micropython,AriZuu/micropython,AriZuu/micropython,Timmenem/micropython,pozetroninc/micropython,Timmenem/micropython,hosaka/micropython,puuu/micropython,matthewelse/micropython,oopy/micropython,drrk/micropython,Peetz0r/micropython-esp32,deshipu/micropython,dxxb/micropython,drrk/micropython,toolmacher/micropython,infinnovation/micropython,swegener/micropython,swegener/micropython,praemdonck/micropython,pozetroninc/micropython,chrisdearman/micropython,adafruit/circuitpython,cwyark/micropython,MrSurly/micropython,dxxb/micropython,oopy/micropython,pfalcon/micropython,emfcamp/micropython,MrSurly/micropython-esp32,dxxb/micropython,micropython/micropython-esp32,MrSurly/micropython-esp32,SHA2017-badge/micropython-esp32,mpalomer/micropython,hiway/micropython,deshipu/micropython,misterdanb/micropython,tobbad/micropython,redbear/micropython,bvernoux/micropython,TDAbboud/micropython,pfalcon/micropython,infinnovation/micropython,alex-march/micropython,emfcamp/micropython,alex-march/micropython,selste/micropython,swegener/micropython,adafruit/circuitpython,adafruit/circuitpython,SHA2017-badge/micropython-esp32,ganshun666/micropython,TDAbboud/micropython,mhoffma/micropython,torwag/micropython,puuu/micropython,ganshun666/micropython,dxxb/micropython,PappaPeppar/micropython,TDAbboud/micropython,micropython/micropython-esp32,dinau/micropython,misterdanb/micropython,turbinenreiter/micropython,misterdanb/micropython,redbear/micropython,SHA2017-badge/micropython-esp32,ganshun666/micropython,Peetz0r/micropython-esp32,jmarcelino/pycom-micropython,praemdonck/micropython,toolmacher/micropython,tralamazza/micropython,mpalomer/micropython,MrSurly/micropython,chrisdearman/micropython,praemdonck/micropython,MrSurly/micropython,tobbad/micropython,deshipu/micropython,oopy/micropython,ganshun666/micropython,ryannathans/micropython,infinnovation/micropython,redbear/micropython,kerneltask/micropython,lowRISC/micropython,tuc-osg/micropython,lowRISC/micropython,cwyark/micropython,Peetz0r/micropython-esp32,adafruit/micropython,HenrikSolver/micropython,puuu/micropython,jmarcelino/pycom-micropython,dinau/micropython,alex-march/micropython,tuc-osg/micropython,trezor/micropython,pramasoul/micropython,HenrikSolver/micropython,dmazzella/micropython,henriknelson/micropython,pramasoul/micropython,HenrikSolver/micropython,drrk/micropython,lowRISC/micropython,adafruit/micropython,tuc-osg/micropython,SHA2017-badge/micropython-esp32,henriknelson/micropython,pfalcon/micropython,AriZuu/micropython,torwag/micropython,kerneltask/micropython,blazewicz/micropython,hosaka/micropython,ryannathans/micropython,alex-robbins/micropython,jmarcelino/pycom-micropython,dmazzella/micropython,turbinenreiter/micropython,bvernoux/micropython,blazewicz/micropython,lowRISC/micropython,mpalomer/micropython,turbinenreiter/micropython,bvernoux/micropython,swegener/micropython,selste/micropython,misterdanb/micropython,alex-robbins/micropython,drrk/micropython,matthewelse/micropython,emfcamp/micropython,dinau/micropython,HenrikSolver/micropython,alex-march/micropython,oopy/micropython,tobbad/micropython,tralamazza/micropython,hiway/micropython,dmazzella/micropython,matthewelse/micropython,tralamazza/micropython,infinnovation/micropython,deshipu/micropython,selste/micropython,ryannathans/micropython,Timmenem/micropython,dinau/micropython,MrSurly/micropython,PappaPeppar/micropython,alex-robbins/micropython,hosaka/micropython,pfalcon/micropython,swegener/micropython,infinnovation/micropython,matthewelse/micropython,PappaPeppar/micropython,Timmenem/micropython,hiway/micropython,matthewelse/micropython,cwyark/micropython,pramasoul/micropython,alex-march/micropython,HenrikSolver/micropython,henriknelson/micropython,dxxb/micropython,Timmenem/micropython,Peetz0r/micropython-esp32,adafruit/micropython,mhoffma/micropython,kerneltask/micropython,micropython/micropython-esp32,adafruit/micropython,chrisdearman/micropython,praemdonck/micropython,lowRISC/micropython,henriknelson/micropython,SHA2017-badge/micropython-esp32,TDAbboud/micropython,toolmacher/micropython,redbear/micropython,emfcamp/micropython,tuc-osg/micropython,torwag/micropython,hiway/micropython,blazewicz/micropython,micropython/micropython-esp32,mhoffma/micropython,toolmacher/micropython,turbinenreiter/micropython,tobbad/micropython,tralamazza/micropython,chrisdearman/micropython,puuu/micropython,pfalcon/micropython,adafruit/circuitpython,pramasoul/micropython,trezor/micropython,TDAbboud/micropython,oopy/micropython,ganshun666/micropython,blazewicz/micropython,redbear/micropython,praemdonck/micropython,drrk/micropython,tobbad/micropython,kerneltask/micropython,selste/micropython,henriknelson/micropython,mpalomer/micropython,MrSurly/micropython-esp32,torwag/micropython,puuu/micropython,jmarcelino/pycom-micropython,PappaPeppar/micropython,tuc-osg/micropython,misterdanb/micropython,mhoffma/micropython,hosaka/micropython,blazewicz/micropython,MrSurly/micropython-esp32,alex-robbins/micropython,trezor/micropython,AriZuu/micropython,kerneltask/micropython,bvernoux/micropython,pramasoul/micropython,pozetroninc/micropython,trezor/micropython,emfcamp/micropython,mpalomer/micropython,hosaka/micropython,dmazzella/micropython,torwag/micropython,adafruit/circuitpython,adafruit/micropython,dinau/micropython,mhoffma/micropython,MrSurly/micropython,micropython/micropython-esp32,adafruit/circuitpython
|
tests: Add testcase for yielding from a stopped generator.
|
# Yielding from stopped generator is ok and results in None
def gen():
return 1
# This yield is just to make this a generator
yield
f = gen()
def run():
print((yield from f))
print((yield from f))
print((yield from f))
try:
next(run())
except StopIteration:
print("StopIteration")
|
<commit_before><commit_msg>tests: Add testcase for yielding from a stopped generator.<commit_after>
|
# Yielding from stopped generator is ok and results in None
def gen():
return 1
# This yield is just to make this a generator
yield
f = gen()
def run():
print((yield from f))
print((yield from f))
print((yield from f))
try:
next(run())
except StopIteration:
print("StopIteration")
|
tests: Add testcase for yielding from a stopped generator.# Yielding from stopped generator is ok and results in None
def gen():
return 1
# This yield is just to make this a generator
yield
f = gen()
def run():
print((yield from f))
print((yield from f))
print((yield from f))
try:
next(run())
except StopIteration:
print("StopIteration")
|
<commit_before><commit_msg>tests: Add testcase for yielding from a stopped generator.<commit_after># Yielding from stopped generator is ok and results in None
def gen():
return 1
# This yield is just to make this a generator
yield
f = gen()
def run():
print((yield from f))
print((yield from f))
print((yield from f))
try:
next(run())
except StopIteration:
print("StopIteration")
|
|
264a67f649cab578aaafea4b23bc38b05705a1ac
|
tests/styles/document_matcher_tests.py
|
tests/styles/document_matcher_tests.py
|
from nose.tools import istest, assert_equal
from mammoth import document_matchers
@istest
def equal_to_matcher_is_case_insensitive():
matcher = document_matchers.equal_to("Heading 1")
assert_equal(True, matcher.matches("heaDING 1"))
assert_equal(False, matcher.matches("heaDING 2"))
|
Add test for making sure equal_to is case-insensitive
|
Add test for making sure equal_to is case-insensitive
|
Python
|
bsd-2-clause
|
mwilliamson/python-mammoth
|
Add test for making sure equal_to is case-insensitive
|
from nose.tools import istest, assert_equal
from mammoth import document_matchers
@istest
def equal_to_matcher_is_case_insensitive():
matcher = document_matchers.equal_to("Heading 1")
assert_equal(True, matcher.matches("heaDING 1"))
assert_equal(False, matcher.matches("heaDING 2"))
|
<commit_before><commit_msg>Add test for making sure equal_to is case-insensitive<commit_after>
|
from nose.tools import istest, assert_equal
from mammoth import document_matchers
@istest
def equal_to_matcher_is_case_insensitive():
matcher = document_matchers.equal_to("Heading 1")
assert_equal(True, matcher.matches("heaDING 1"))
assert_equal(False, matcher.matches("heaDING 2"))
|
Add test for making sure equal_to is case-insensitivefrom nose.tools import istest, assert_equal
from mammoth import document_matchers
@istest
def equal_to_matcher_is_case_insensitive():
matcher = document_matchers.equal_to("Heading 1")
assert_equal(True, matcher.matches("heaDING 1"))
assert_equal(False, matcher.matches("heaDING 2"))
|
<commit_before><commit_msg>Add test for making sure equal_to is case-insensitive<commit_after>from nose.tools import istest, assert_equal
from mammoth import document_matchers
@istest
def equal_to_matcher_is_case_insensitive():
matcher = document_matchers.equal_to("Heading 1")
assert_equal(True, matcher.matches("heaDING 1"))
assert_equal(False, matcher.matches("heaDING 2"))
|
|
78821f2df84bbb822e076fb1591dfccc09bcb43c
|
cpm_data/migrations/0004_add_seasons_data.py
|
cpm_data/migrations/0004_add_seasons_data.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-27 22:21
from __future__ import unicode_literals
from django.db import migrations
def _get_seasons():
return '2012 2013 2014 2015 2016 2017'.split()
def add_seasons(apps, schema_editor):
Season = apps.get_model('cpm_data.Season')
Season.objects.bulk_create(
[Season(name_en=s, name_be=s, name_ru=s) for s in _get_seasons()]
)
def remove_seasons(apps, schema_editor):
Season = apps.get_model('cpm_data.Season')
Season.objects.delete(name_en__in=_get_seasons())
class Migration(migrations.Migration):
dependencies = [
('cpm_data', '0003_seasonrelatedjurymember_seasonrelatedpartner'),
]
operations = [
migrations.RunPython(add_seasons, remove_seasons),
]
|
Add migrations for adding seasons
|
Add migrations for adding seasons
|
Python
|
unlicense
|
kinaklub/next.filmfest.by,nott/next.filmfest.by,nott/next.filmfest.by,nott/next.filmfest.by,kinaklub/next.filmfest.by,kinaklub/next.filmfest.by,kinaklub/next.filmfest.by,nott/next.filmfest.by
|
Add migrations for adding seasons
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-27 22:21
from __future__ import unicode_literals
from django.db import migrations
def _get_seasons():
return '2012 2013 2014 2015 2016 2017'.split()
def add_seasons(apps, schema_editor):
Season = apps.get_model('cpm_data.Season')
Season.objects.bulk_create(
[Season(name_en=s, name_be=s, name_ru=s) for s in _get_seasons()]
)
def remove_seasons(apps, schema_editor):
Season = apps.get_model('cpm_data.Season')
Season.objects.delete(name_en__in=_get_seasons())
class Migration(migrations.Migration):
dependencies = [
('cpm_data', '0003_seasonrelatedjurymember_seasonrelatedpartner'),
]
operations = [
migrations.RunPython(add_seasons, remove_seasons),
]
|
<commit_before><commit_msg>Add migrations for adding seasons<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-27 22:21
from __future__ import unicode_literals
from django.db import migrations
def _get_seasons():
return '2012 2013 2014 2015 2016 2017'.split()
def add_seasons(apps, schema_editor):
Season = apps.get_model('cpm_data.Season')
Season.objects.bulk_create(
[Season(name_en=s, name_be=s, name_ru=s) for s in _get_seasons()]
)
def remove_seasons(apps, schema_editor):
Season = apps.get_model('cpm_data.Season')
Season.objects.delete(name_en__in=_get_seasons())
class Migration(migrations.Migration):
dependencies = [
('cpm_data', '0003_seasonrelatedjurymember_seasonrelatedpartner'),
]
operations = [
migrations.RunPython(add_seasons, remove_seasons),
]
|
Add migrations for adding seasons# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-27 22:21
from __future__ import unicode_literals
from django.db import migrations
def _get_seasons():
return '2012 2013 2014 2015 2016 2017'.split()
def add_seasons(apps, schema_editor):
Season = apps.get_model('cpm_data.Season')
Season.objects.bulk_create(
[Season(name_en=s, name_be=s, name_ru=s) for s in _get_seasons()]
)
def remove_seasons(apps, schema_editor):
Season = apps.get_model('cpm_data.Season')
Season.objects.delete(name_en__in=_get_seasons())
class Migration(migrations.Migration):
dependencies = [
('cpm_data', '0003_seasonrelatedjurymember_seasonrelatedpartner'),
]
operations = [
migrations.RunPython(add_seasons, remove_seasons),
]
|
<commit_before><commit_msg>Add migrations for adding seasons<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-27 22:21
from __future__ import unicode_literals
from django.db import migrations
def _get_seasons():
return '2012 2013 2014 2015 2016 2017'.split()
def add_seasons(apps, schema_editor):
Season = apps.get_model('cpm_data.Season')
Season.objects.bulk_create(
[Season(name_en=s, name_be=s, name_ru=s) for s in _get_seasons()]
)
def remove_seasons(apps, schema_editor):
Season = apps.get_model('cpm_data.Season')
Season.objects.delete(name_en__in=_get_seasons())
class Migration(migrations.Migration):
dependencies = [
('cpm_data', '0003_seasonrelatedjurymember_seasonrelatedpartner'),
]
operations = [
migrations.RunPython(add_seasons, remove_seasons),
]
|
|
e477e7eefb3d562c300b366c0d3c801f2e640f13
|
tools/development/slow_query_finder.py
|
tools/development/slow_query_finder.py
|
#! /usr/bin/env python
# -.- coding: utf-8 -.-
# Zeitgeist
#
# Copyright © 2011 Collabora Ltd.
# By Trever Fischer <trever.fischer@collabora.co.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyevolve import G1DList
from pyevolve import GSimpleGA
from zeitgeist.datamodel import TimeRange, StorageState, ResultType
from zeitgeist.datamodel import Event, Subject, Interpretation, Manifestation
import benchmark as engine
import time
# Chromosome to data mapping:
# 0, 1 - Timerange begin and end. If both are zero, we use timerange.always()
# 2 - The search type. Anything over 30 is a dead individual.
# 3-5 - Specify template properties. Anything besides 0 and 1 is dead.
# 3 - Specify a subject interpretation
# 4 - Specify a subject manifestation
# 5 - Specify an event actor
def buildQuery(chromosome):
storage = StorageState.Any
numResults = 10
if chromosome[0] == 0 and chromosome[1] == 0:
timerange = TimeRange.always()
else:
timerange = (chromosome[0], chromosome[1])
searchType = chromosome[2]
if searchType > 30:
return None
for c in chromosome[3:5]:
if c > 1:
#return 0
pass
eventTemplate = {}
subjectTemplate = {}
if chromosome[3] == 1:
subjectTemplate['interpretation'] = Interpretation.VIDEO
if chromosome[4] == 1:
subjectTemplate['manifestation'] = Manifestation.FILE_DATA_OBJECT
if chromosome[5] == 1:
eventTemplate['actor'] = "application://google-chrome.desktop"
templates = [Event.new_for_values(subjects=[Subject.new_for_values(**subjectTemplate)], **eventTemplate)]
return (timerange, templates, storage, numResults, searchType)
def eval_func(chromosome):
query = buildQuery(chromosome)
if query is None:
return 0
print "Testing with %r"%(query,)
start = time.time()
results = engine.find_events(*query)
return time.time() - start
genome = G1DList.G1DList(6)
genome.evaluator.set(eval_func)
ga = GSimpleGA.GSimpleGA(genome)
ga.evolve(freq_stats = 1)
print buildQuery(ga.bestIndividual())
|
Add a script that uses a genetic algorithm to find slow queries
|
Add a script that uses a genetic algorithm to find slow queries
|
Python
|
lgpl-2.1
|
freedesktop-unofficial-mirror/zeitgeist__zeitgeist,freedesktop-unofficial-mirror/zeitgeist__zeitgeist,freedesktop-unofficial-mirror/zeitgeist__zeitgeist,freedesktop-unofficial-mirror/zeitgeist__zeitgeist,freedesktop-unofficial-mirror/zeitgeist__zeitgeist
|
Add a script that uses a genetic algorithm to find slow queries
|
#! /usr/bin/env python
# -.- coding: utf-8 -.-
# Zeitgeist
#
# Copyright © 2011 Collabora Ltd.
# By Trever Fischer <trever.fischer@collabora.co.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyevolve import G1DList
from pyevolve import GSimpleGA
from zeitgeist.datamodel import TimeRange, StorageState, ResultType
from zeitgeist.datamodel import Event, Subject, Interpretation, Manifestation
import benchmark as engine
import time
# Chromosome to data mapping:
# 0, 1 - Timerange begin and end. If both are zero, we use timerange.always()
# 2 - The search type. Anything over 30 is a dead individual.
# 3-5 - Specify template properties. Anything besides 0 and 1 is dead.
# 3 - Specify a subject interpretation
# 4 - Specify a subject manifestation
# 5 - Specify an event actor
def buildQuery(chromosome):
storage = StorageState.Any
numResults = 10
if chromosome[0] == 0 and chromosome[1] == 0:
timerange = TimeRange.always()
else:
timerange = (chromosome[0], chromosome[1])
searchType = chromosome[2]
if searchType > 30:
return None
for c in chromosome[3:5]:
if c > 1:
#return 0
pass
eventTemplate = {}
subjectTemplate = {}
if chromosome[3] == 1:
subjectTemplate['interpretation'] = Interpretation.VIDEO
if chromosome[4] == 1:
subjectTemplate['manifestation'] = Manifestation.FILE_DATA_OBJECT
if chromosome[5] == 1:
eventTemplate['actor'] = "application://google-chrome.desktop"
templates = [Event.new_for_values(subjects=[Subject.new_for_values(**subjectTemplate)], **eventTemplate)]
return (timerange, templates, storage, numResults, searchType)
def eval_func(chromosome):
query = buildQuery(chromosome)
if query is None:
return 0
print "Testing with %r"%(query,)
start = time.time()
results = engine.find_events(*query)
return time.time() - start
genome = G1DList.G1DList(6)
genome.evaluator.set(eval_func)
ga = GSimpleGA.GSimpleGA(genome)
ga.evolve(freq_stats = 1)
print buildQuery(ga.bestIndividual())
|
<commit_before><commit_msg>Add a script that uses a genetic algorithm to find slow queries<commit_after>
|
#! /usr/bin/env python
# -.- coding: utf-8 -.-
# Zeitgeist
#
# Copyright © 2011 Collabora Ltd.
# By Trever Fischer <trever.fischer@collabora.co.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyevolve import G1DList
from pyevolve import GSimpleGA
from zeitgeist.datamodel import TimeRange, StorageState, ResultType
from zeitgeist.datamodel import Event, Subject, Interpretation, Manifestation
import benchmark as engine
import time
# Chromosome to data mapping:
# 0, 1 - Timerange begin and end. If both are zero, we use timerange.always()
# 2 - The search type. Anything over 30 is a dead individual.
# 3-5 - Specify template properties. Anything besides 0 and 1 is dead.
# 3 - Specify a subject interpretation
# 4 - Specify a subject manifestation
# 5 - Specify an event actor
def buildQuery(chromosome):
storage = StorageState.Any
numResults = 10
if chromosome[0] == 0 and chromosome[1] == 0:
timerange = TimeRange.always()
else:
timerange = (chromosome[0], chromosome[1])
searchType = chromosome[2]
if searchType > 30:
return None
for c in chromosome[3:5]:
if c > 1:
#return 0
pass
eventTemplate = {}
subjectTemplate = {}
if chromosome[3] == 1:
subjectTemplate['interpretation'] = Interpretation.VIDEO
if chromosome[4] == 1:
subjectTemplate['manifestation'] = Manifestation.FILE_DATA_OBJECT
if chromosome[5] == 1:
eventTemplate['actor'] = "application://google-chrome.desktop"
templates = [Event.new_for_values(subjects=[Subject.new_for_values(**subjectTemplate)], **eventTemplate)]
return (timerange, templates, storage, numResults, searchType)
def eval_func(chromosome):
query = buildQuery(chromosome)
if query is None:
return 0
print "Testing with %r"%(query,)
start = time.time()
results = engine.find_events(*query)
return time.time() - start
genome = G1DList.G1DList(6)
genome.evaluator.set(eval_func)
ga = GSimpleGA.GSimpleGA(genome)
ga.evolve(freq_stats = 1)
print buildQuery(ga.bestIndividual())
|
Add a script that uses a genetic algorithm to find slow queries#! /usr/bin/env python
# -.- coding: utf-8 -.-
# Zeitgeist
#
# Copyright © 2011 Collabora Ltd.
# By Trever Fischer <trever.fischer@collabora.co.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyevolve import G1DList
from pyevolve import GSimpleGA
from zeitgeist.datamodel import TimeRange, StorageState, ResultType
from zeitgeist.datamodel import Event, Subject, Interpretation, Manifestation
import benchmark as engine
import time
# Chromosome to data mapping:
# 0, 1 - Timerange begin and end. If both are zero, we use timerange.always()
# 2 - The search type. Anything over 30 is a dead individual.
# 3-5 - Specify template properties. Anything besides 0 and 1 is dead.
# 3 - Specify a subject interpretation
# 4 - Specify a subject manifestation
# 5 - Specify an event actor
def buildQuery(chromosome):
storage = StorageState.Any
numResults = 10
if chromosome[0] == 0 and chromosome[1] == 0:
timerange = TimeRange.always()
else:
timerange = (chromosome[0], chromosome[1])
searchType = chromosome[2]
if searchType > 30:
return None
for c in chromosome[3:5]:
if c > 1:
#return 0
pass
eventTemplate = {}
subjectTemplate = {}
if chromosome[3] == 1:
subjectTemplate['interpretation'] = Interpretation.VIDEO
if chromosome[4] == 1:
subjectTemplate['manifestation'] = Manifestation.FILE_DATA_OBJECT
if chromosome[5] == 1:
eventTemplate['actor'] = "application://google-chrome.desktop"
templates = [Event.new_for_values(subjects=[Subject.new_for_values(**subjectTemplate)], **eventTemplate)]
return (timerange, templates, storage, numResults, searchType)
def eval_func(chromosome):
query = buildQuery(chromosome)
if query is None:
return 0
print "Testing with %r"%(query,)
start = time.time()
results = engine.find_events(*query)
return time.time() - start
genome = G1DList.G1DList(6)
genome.evaluator.set(eval_func)
ga = GSimpleGA.GSimpleGA(genome)
ga.evolve(freq_stats = 1)
print buildQuery(ga.bestIndividual())
|
<commit_before><commit_msg>Add a script that uses a genetic algorithm to find slow queries<commit_after>#! /usr/bin/env python
# -.- coding: utf-8 -.-
# Zeitgeist
#
# Copyright © 2011 Collabora Ltd.
# By Trever Fischer <trever.fischer@collabora.co.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pyevolve import G1DList
from pyevolve import GSimpleGA
from zeitgeist.datamodel import TimeRange, StorageState, ResultType
from zeitgeist.datamodel import Event, Subject, Interpretation, Manifestation
import benchmark as engine
import time
# Chromosome to data mapping:
# 0, 1 - Timerange begin and end. If both are zero, we use timerange.always()
# 2 - The search type. Anything over 30 is a dead individual.
# 3-5 - Specify template properties. Anything besides 0 and 1 is dead.
# 3 - Specify a subject interpretation
# 4 - Specify a subject manifestation
# 5 - Specify an event actor
def buildQuery(chromosome):
storage = StorageState.Any
numResults = 10
if chromosome[0] == 0 and chromosome[1] == 0:
timerange = TimeRange.always()
else:
timerange = (chromosome[0], chromosome[1])
searchType = chromosome[2]
if searchType > 30:
return None
for c in chromosome[3:5]:
if c > 1:
#return 0
pass
eventTemplate = {}
subjectTemplate = {}
if chromosome[3] == 1:
subjectTemplate['interpretation'] = Interpretation.VIDEO
if chromosome[4] == 1:
subjectTemplate['manifestation'] = Manifestation.FILE_DATA_OBJECT
if chromosome[5] == 1:
eventTemplate['actor'] = "application://google-chrome.desktop"
templates = [Event.new_for_values(subjects=[Subject.new_for_values(**subjectTemplate)], **eventTemplate)]
return (timerange, templates, storage, numResults, searchType)
def eval_func(chromosome):
query = buildQuery(chromosome)
if query is None:
return 0
print "Testing with %r"%(query,)
start = time.time()
results = engine.find_events(*query)
return time.time() - start
genome = G1DList.G1DList(6)
genome.evaluator.set(eval_func)
ga = GSimpleGA.GSimpleGA(genome)
ga.evolve(freq_stats = 1)
print buildQuery(ga.bestIndividual())
|
|
6dca5c4564bbd2c8b6546090a646c7c841f1d7be
|
etc/theory/idealness_experiments/analysis.py
|
etc/theory/idealness_experiments/analysis.py
|
# -*- coding: utf-8 -*-
from string import printable
from generate_huffman import get_huffman_tree
def get_ideal_tree(freq_list):
freq_list = sorted(freq_list, reverse=True)
output = []
output.append('Char\t#\tHuff.codelen')
tree = []
for (current_occ, current_char) in freq_list:
if tree:
previous = tree[-1]
if current_occ == previous[1]:
tree.append((current_char, current_occ, previous[2]))
else:
tree.append((current_char, current_occ, previous[2]+1))
else:
tree.append((current_char, current_occ, 1))
for n in tree:
output.append('{}\t{}\t{}'.format(repr(n[0]), n[1], n[2]))
return '\n'.join(output)
with open('social_network_script') as f:
text = f.read()
frequencies = []
for c in printable:
if c in text:
frequencies.append((text.count(c), c))
huffman_tree = get_huffman_tree(frequencies)
with open('huffman_social_network', 'w') as f:
f.write(huffman_tree)
ideal_tree = get_ideal_tree(frequencies)
with open('ideal_social_network', 'w') as f:
f.write(ideal_tree)
|
Add idealness experiment py script
|
Add idealness experiment py script
|
Python
|
mit
|
esarafianou/rupture,dionyziz/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dionyziz/rupture,dimriou/rupture,dionyziz/rupture,esarafianou/rupture,dionyziz/rupture,dimriou/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dionyziz/rupture,esarafianou/rupture,dimriou/rupture,dimriou/rupture,esarafianou/rupture,dimriou/rupture
|
Add idealness experiment py script
|
# -*- coding: utf-8 -*-
from string import printable
from generate_huffman import get_huffman_tree
def get_ideal_tree(freq_list):
freq_list = sorted(freq_list, reverse=True)
output = []
output.append('Char\t#\tHuff.codelen')
tree = []
for (current_occ, current_char) in freq_list:
if tree:
previous = tree[-1]
if current_occ == previous[1]:
tree.append((current_char, current_occ, previous[2]))
else:
tree.append((current_char, current_occ, previous[2]+1))
else:
tree.append((current_char, current_occ, 1))
for n in tree:
output.append('{}\t{}\t{}'.format(repr(n[0]), n[1], n[2]))
return '\n'.join(output)
with open('social_network_script') as f:
text = f.read()
frequencies = []
for c in printable:
if c in text:
frequencies.append((text.count(c), c))
huffman_tree = get_huffman_tree(frequencies)
with open('huffman_social_network', 'w') as f:
f.write(huffman_tree)
ideal_tree = get_ideal_tree(frequencies)
with open('ideal_social_network', 'w') as f:
f.write(ideal_tree)
|
<commit_before><commit_msg>Add idealness experiment py script<commit_after>
|
# -*- coding: utf-8 -*-
from string import printable
from generate_huffman import get_huffman_tree
def get_ideal_tree(freq_list):
freq_list = sorted(freq_list, reverse=True)
output = []
output.append('Char\t#\tHuff.codelen')
tree = []
for (current_occ, current_char) in freq_list:
if tree:
previous = tree[-1]
if current_occ == previous[1]:
tree.append((current_char, current_occ, previous[2]))
else:
tree.append((current_char, current_occ, previous[2]+1))
else:
tree.append((current_char, current_occ, 1))
for n in tree:
output.append('{}\t{}\t{}'.format(repr(n[0]), n[1], n[2]))
return '\n'.join(output)
with open('social_network_script') as f:
text = f.read()
frequencies = []
for c in printable:
if c in text:
frequencies.append((text.count(c), c))
huffman_tree = get_huffman_tree(frequencies)
with open('huffman_social_network', 'w') as f:
f.write(huffman_tree)
ideal_tree = get_ideal_tree(frequencies)
with open('ideal_social_network', 'w') as f:
f.write(ideal_tree)
|
Add idealness experiment py script# -*- coding: utf-8 -*-
from string import printable
from generate_huffman import get_huffman_tree
def get_ideal_tree(freq_list):
freq_list = sorted(freq_list, reverse=True)
output = []
output.append('Char\t#\tHuff.codelen')
tree = []
for (current_occ, current_char) in freq_list:
if tree:
previous = tree[-1]
if current_occ == previous[1]:
tree.append((current_char, current_occ, previous[2]))
else:
tree.append((current_char, current_occ, previous[2]+1))
else:
tree.append((current_char, current_occ, 1))
for n in tree:
output.append('{}\t{}\t{}'.format(repr(n[0]), n[1], n[2]))
return '\n'.join(output)
with open('social_network_script') as f:
text = f.read()
frequencies = []
for c in printable:
if c in text:
frequencies.append((text.count(c), c))
huffman_tree = get_huffman_tree(frequencies)
with open('huffman_social_network', 'w') as f:
f.write(huffman_tree)
ideal_tree = get_ideal_tree(frequencies)
with open('ideal_social_network', 'w') as f:
f.write(ideal_tree)
|
<commit_before><commit_msg>Add idealness experiment py script<commit_after># -*- coding: utf-8 -*-
from string import printable
from generate_huffman import get_huffman_tree
def get_ideal_tree(freq_list):
freq_list = sorted(freq_list, reverse=True)
output = []
output.append('Char\t#\tHuff.codelen')
tree = []
for (current_occ, current_char) in freq_list:
if tree:
previous = tree[-1]
if current_occ == previous[1]:
tree.append((current_char, current_occ, previous[2]))
else:
tree.append((current_char, current_occ, previous[2]+1))
else:
tree.append((current_char, current_occ, 1))
for n in tree:
output.append('{}\t{}\t{}'.format(repr(n[0]), n[1], n[2]))
return '\n'.join(output)
with open('social_network_script') as f:
text = f.read()
frequencies = []
for c in printable:
if c in text:
frequencies.append((text.count(c), c))
huffman_tree = get_huffman_tree(frequencies)
with open('huffman_social_network', 'w') as f:
f.write(huffman_tree)
ideal_tree = get_ideal_tree(frequencies)
with open('ideal_social_network', 'w') as f:
f.write(ideal_tree)
|
|
8e59a09b8270af79d2ad3a2564904137a470c628
|
events/migrations/0006_auto_20150827_2020.py
|
events/migrations/0006_auto_20150827_2020.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20150607_2005'),
]
operations = [
migrations.AlterField(
model_name='event',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='event',
name='keywords',
field=models.ManyToManyField(to='events.Keyword'),
),
migrations.AlterField(
model_name='keyword',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='organization',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='place',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
]
|
Add migration for previous commit
|
Add migration for previous commit
|
Python
|
mit
|
tuomas777/linkedevents,aapris/linkedevents,City-of-Helsinki/linkedevents,kooditiimi/linkedevents,aapris/linkedevents,kooditiimi/linkedevents,kooditiimi/linkedevents,City-of-Helsinki/linkedevents,aapris/linkedevents,kooditiimi/linkedevents,City-of-Helsinki/linkedevents,tuomas777/linkedevents,tuomas777/linkedevents
|
Add migration for previous commit
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20150607_2005'),
]
operations = [
migrations.AlterField(
model_name='event',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='event',
name='keywords',
field=models.ManyToManyField(to='events.Keyword'),
),
migrations.AlterField(
model_name='keyword',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='organization',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='place',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
]
|
<commit_before><commit_msg>Add migration for previous commit<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20150607_2005'),
]
operations = [
migrations.AlterField(
model_name='event',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='event',
name='keywords',
field=models.ManyToManyField(to='events.Keyword'),
),
migrations.AlterField(
model_name='keyword',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='organization',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='place',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
]
|
Add migration for previous commit# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20150607_2005'),
]
operations = [
migrations.AlterField(
model_name='event',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='event',
name='keywords',
field=models.ManyToManyField(to='events.Keyword'),
),
migrations.AlterField(
model_name='keyword',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='organization',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='place',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
]
|
<commit_before><commit_msg>Add migration for previous commit<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20150607_2005'),
]
operations = [
migrations.AlterField(
model_name='event',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='event',
name='keywords',
field=models.ManyToManyField(to='events.Keyword'),
),
migrations.AlterField(
model_name='keyword',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='organization',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
migrations.AlterField(
model_name='place',
name='image',
field=models.URLField(blank=True, null=True, max_length=400, verbose_name='Image URL'),
),
]
|
|
03f365579df63f35ccee02fa1e6186f31acdac15
|
tests/api/auth_test.py
|
tests/api/auth_test.py
|
import base64
from flask import Response
from flask.testing import FlaskClient
import pytest
import config
from skylines.api import auth
from skylines.app import SkyLines
from skylines.database import db
pytestmark = pytest.mark.usefixtures('db')
@pytest.fixture(scope='session')
def app():
app = SkyLines(config_file=config.TESTING_CONF_PATH)
db.init_app(app)
app.before_request(auth.check)
@app.route('/')
@auth.required
def index():
return 'success'
return app
def test_access_denied_without_authorization(client, default_headers):
assert isinstance(client, FlaskClient)
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 401
def test_access_denied_with_wrong_authorization(client, default_headers):
assert isinstance(client, FlaskClient)
default_headers['Authorization'] = 'Basic ' + base64.b64encode('test:password')
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 401
def test_access_granted_with_correct_authorization(client, default_headers, test_user):
assert isinstance(client, FlaskClient)
default_headers['Authorization'] = 'Basic ' + base64.b64encode(
test_user.email_address + ':' + test_user.original_password
)
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 200
|
Add test for the "auth" module
|
api: Add test for the "auth" module
|
Python
|
agpl-3.0
|
RBE-Avionik/skylines,snip/skylines,kerel-fs/skylines,shadowoneau/skylines,snip/skylines,kerel-fs/skylines,shadowoneau/skylines,Turbo87/skylines,Harry-R/skylines,Harry-R/skylines,skylines-project/skylines,skylines-project/skylines,Turbo87/skylines,TobiasLohner/SkyLines,TobiasLohner/SkyLines,TobiasLohner/SkyLines,RBE-Avionik/skylines,Turbo87/skylines,snip/skylines,RBE-Avionik/skylines,kerel-fs/skylines,Harry-R/skylines,RBE-Avionik/skylines,shadowoneau/skylines,Harry-R/skylines,shadowoneau/skylines,Turbo87/skylines,skylines-project/skylines,skylines-project/skylines
|
api: Add test for the "auth" module
|
import base64
from flask import Response
from flask.testing import FlaskClient
import pytest
import config
from skylines.api import auth
from skylines.app import SkyLines
from skylines.database import db
pytestmark = pytest.mark.usefixtures('db')
@pytest.fixture(scope='session')
def app():
app = SkyLines(config_file=config.TESTING_CONF_PATH)
db.init_app(app)
app.before_request(auth.check)
@app.route('/')
@auth.required
def index():
return 'success'
return app
def test_access_denied_without_authorization(client, default_headers):
assert isinstance(client, FlaskClient)
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 401
def test_access_denied_with_wrong_authorization(client, default_headers):
assert isinstance(client, FlaskClient)
default_headers['Authorization'] = 'Basic ' + base64.b64encode('test:password')
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 401
def test_access_granted_with_correct_authorization(client, default_headers, test_user):
assert isinstance(client, FlaskClient)
default_headers['Authorization'] = 'Basic ' + base64.b64encode(
test_user.email_address + ':' + test_user.original_password
)
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 200
|
<commit_before><commit_msg>api: Add test for the "auth" module<commit_after>
|
import base64
from flask import Response
from flask.testing import FlaskClient
import pytest
import config
from skylines.api import auth
from skylines.app import SkyLines
from skylines.database import db
pytestmark = pytest.mark.usefixtures('db')
@pytest.fixture(scope='session')
def app():
app = SkyLines(config_file=config.TESTING_CONF_PATH)
db.init_app(app)
app.before_request(auth.check)
@app.route('/')
@auth.required
def index():
return 'success'
return app
def test_access_denied_without_authorization(client, default_headers):
assert isinstance(client, FlaskClient)
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 401
def test_access_denied_with_wrong_authorization(client, default_headers):
assert isinstance(client, FlaskClient)
default_headers['Authorization'] = 'Basic ' + base64.b64encode('test:password')
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 401
def test_access_granted_with_correct_authorization(client, default_headers, test_user):
assert isinstance(client, FlaskClient)
default_headers['Authorization'] = 'Basic ' + base64.b64encode(
test_user.email_address + ':' + test_user.original_password
)
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 200
|
api: Add test for the "auth" moduleimport base64
from flask import Response
from flask.testing import FlaskClient
import pytest
import config
from skylines.api import auth
from skylines.app import SkyLines
from skylines.database import db
pytestmark = pytest.mark.usefixtures('db')
@pytest.fixture(scope='session')
def app():
app = SkyLines(config_file=config.TESTING_CONF_PATH)
db.init_app(app)
app.before_request(auth.check)
@app.route('/')
@auth.required
def index():
return 'success'
return app
def test_access_denied_without_authorization(client, default_headers):
assert isinstance(client, FlaskClient)
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 401
def test_access_denied_with_wrong_authorization(client, default_headers):
assert isinstance(client, FlaskClient)
default_headers['Authorization'] = 'Basic ' + base64.b64encode('test:password')
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 401
def test_access_granted_with_correct_authorization(client, default_headers, test_user):
assert isinstance(client, FlaskClient)
default_headers['Authorization'] = 'Basic ' + base64.b64encode(
test_user.email_address + ':' + test_user.original_password
)
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 200
|
<commit_before><commit_msg>api: Add test for the "auth" module<commit_after>import base64
from flask import Response
from flask.testing import FlaskClient
import pytest
import config
from skylines.api import auth
from skylines.app import SkyLines
from skylines.database import db
pytestmark = pytest.mark.usefixtures('db')
@pytest.fixture(scope='session')
def app():
app = SkyLines(config_file=config.TESTING_CONF_PATH)
db.init_app(app)
app.before_request(auth.check)
@app.route('/')
@auth.required
def index():
return 'success'
return app
def test_access_denied_without_authorization(client, default_headers):
assert isinstance(client, FlaskClient)
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 401
def test_access_denied_with_wrong_authorization(client, default_headers):
assert isinstance(client, FlaskClient)
default_headers['Authorization'] = 'Basic ' + base64.b64encode('test:password')
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 401
def test_access_granted_with_correct_authorization(client, default_headers, test_user):
assert isinstance(client, FlaskClient)
default_headers['Authorization'] = 'Basic ' + base64.b64encode(
test_user.email_address + ':' + test_user.original_password
)
response = client.get('/', headers=default_headers)
assert isinstance(response, Response)
assert response.status_code == 200
|
|
b7c90f17465232f4c49cc1f05c614a9a17f6bece
|
tests/test_vservice.py
|
tests/test_vservice.py
|
from .base import ServiceTestCase
class VServiceTests(ServiceTestCase):
def verifyCustomName(self):
self.assertEquals(self.service.name, 'VService')
self.assertEquals(self.service._name, 'VService')
self.service.name = 'MyService'
self.assertEquals(self.service.name, 'MyService')
self.assertEquals(self.service._name, 'MyService')
|
Add test to verify custom name functionality
|
Add test to verify custom name functionality
|
Python
|
bsd-3-clause
|
fmoo/sparts,djipko/sparts,bboozzoo/sparts,pshuff/sparts,djipko/sparts,facebook/sparts,pshuff/sparts,facebook/sparts,bboozzoo/sparts,fmoo/sparts
|
Add test to verify custom name functionality
|
from .base import ServiceTestCase
class VServiceTests(ServiceTestCase):
def verifyCustomName(self):
self.assertEquals(self.service.name, 'VService')
self.assertEquals(self.service._name, 'VService')
self.service.name = 'MyService'
self.assertEquals(self.service.name, 'MyService')
self.assertEquals(self.service._name, 'MyService')
|
<commit_before><commit_msg>Add test to verify custom name functionality<commit_after>
|
from .base import ServiceTestCase
class VServiceTests(ServiceTestCase):
def verifyCustomName(self):
self.assertEquals(self.service.name, 'VService')
self.assertEquals(self.service._name, 'VService')
self.service.name = 'MyService'
self.assertEquals(self.service.name, 'MyService')
self.assertEquals(self.service._name, 'MyService')
|
Add test to verify custom name functionalityfrom .base import ServiceTestCase
class VServiceTests(ServiceTestCase):
def verifyCustomName(self):
self.assertEquals(self.service.name, 'VService')
self.assertEquals(self.service._name, 'VService')
self.service.name = 'MyService'
self.assertEquals(self.service.name, 'MyService')
self.assertEquals(self.service._name, 'MyService')
|
<commit_before><commit_msg>Add test to verify custom name functionality<commit_after>from .base import ServiceTestCase
class VServiceTests(ServiceTestCase):
def verifyCustomName(self):
self.assertEquals(self.service.name, 'VService')
self.assertEquals(self.service._name, 'VService')
self.service.name = 'MyService'
self.assertEquals(self.service.name, 'MyService')
self.assertEquals(self.service._name, 'MyService')
|
|
c5e5e34482730fa50329d513bbe4ccb2ef7d51ff
|
data_preparation/crawl_bug_page/GetBug.py
|
data_preparation/crawl_bug_page/GetBug.py
|
#!/usr/bin/python
import os
import re
import subprocess
bugIds = []
def prepareCookieData():
out = subprocess.call("curl --cookie-jar cookie.data \"https://bugzilla-hostname/index.cgi?Bugzilla_login=name&Bugzilla_password=password&GoAheadAndLogIn=LogIn\" > login.txt", shell=True)
def gerateBugFile(bugid):
out = subprocess.call("touch bugsSample/bug_" + str(bugid) + ".txt", shell=True)
out = subprocess.call("curl --cookie cookie.data \"https://bugzilla-hostname/show_bug.cgi?id=" + str(bugid) + "\" > bugsSample/bug_" + str(bugid) + ".txt ", shell=True)
def mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
print path+' created.'
os.makedirs(path)
return True
else:
print path+' exist!'
return False
def regxBugId(line):
m = re.search(r'<\s*a\s.*?href\s*="show_bug\.cgi\?id=[0-9]+', line)
if m:
bid= re.search(r'[0-9]+',m.group(0))
if bid:
if bid.group(0) not in bugIds:
print bid.group(0)
gerateBugFile(bid.group(0))
bugIds.append(bid.group(0))
return True
else:
return False
else:
#print "Bug id search error!"
return False
else:
#print "Bug URL search error!"
return False
def readBugId(path):
f = open(path)
line = f.readline()
count = 0
while line:
line = f.readline()
isOK = regxBugId(line)
if isOK:
count += 1
f.close()
print "Total " + str(count) + " bugs have been store!"
mkpath="bugsSample"
buglistPath="buglist.txt"
# create directory where bugs situate
mkdir(mkpath)
prepareCookieData()
readBugId(buglistPath)
|
Implement a tool to crawl bug pages.
|
Implement a tool to crawl bug pages.
|
Python
|
apache-2.0
|
bug-analysis/DDVP
|
Implement a tool to crawl bug pages.
|
#!/usr/bin/python
import os
import re
import subprocess
bugIds = []
def prepareCookieData():
out = subprocess.call("curl --cookie-jar cookie.data \"https://bugzilla-hostname/index.cgi?Bugzilla_login=name&Bugzilla_password=password&GoAheadAndLogIn=LogIn\" > login.txt", shell=True)
def gerateBugFile(bugid):
out = subprocess.call("touch bugsSample/bug_" + str(bugid) + ".txt", shell=True)
out = subprocess.call("curl --cookie cookie.data \"https://bugzilla-hostname/show_bug.cgi?id=" + str(bugid) + "\" > bugsSample/bug_" + str(bugid) + ".txt ", shell=True)
def mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
print path+' created.'
os.makedirs(path)
return True
else:
print path+' exist!'
return False
def regxBugId(line):
m = re.search(r'<\s*a\s.*?href\s*="show_bug\.cgi\?id=[0-9]+', line)
if m:
bid= re.search(r'[0-9]+',m.group(0))
if bid:
if bid.group(0) not in bugIds:
print bid.group(0)
gerateBugFile(bid.group(0))
bugIds.append(bid.group(0))
return True
else:
return False
else:
#print "Bug id search error!"
return False
else:
#print "Bug URL search error!"
return False
def readBugId(path):
f = open(path)
line = f.readline()
count = 0
while line:
line = f.readline()
isOK = regxBugId(line)
if isOK:
count += 1
f.close()
print "Total " + str(count) + " bugs have been store!"
mkpath="bugsSample"
buglistPath="buglist.txt"
# create directory where bugs situate
mkdir(mkpath)
prepareCookieData()
readBugId(buglistPath)
|
<commit_before><commit_msg>Implement a tool to crawl bug pages.<commit_after>
|
#!/usr/bin/python
import os
import re
import subprocess
bugIds = []
def prepareCookieData():
out = subprocess.call("curl --cookie-jar cookie.data \"https://bugzilla-hostname/index.cgi?Bugzilla_login=name&Bugzilla_password=password&GoAheadAndLogIn=LogIn\" > login.txt", shell=True)
def gerateBugFile(bugid):
out = subprocess.call("touch bugsSample/bug_" + str(bugid) + ".txt", shell=True)
out = subprocess.call("curl --cookie cookie.data \"https://bugzilla-hostname/show_bug.cgi?id=" + str(bugid) + "\" > bugsSample/bug_" + str(bugid) + ".txt ", shell=True)
def mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
print path+' created.'
os.makedirs(path)
return True
else:
print path+' exist!'
return False
def regxBugId(line):
m = re.search(r'<\s*a\s.*?href\s*="show_bug\.cgi\?id=[0-9]+', line)
if m:
bid= re.search(r'[0-9]+',m.group(0))
if bid:
if bid.group(0) not in bugIds:
print bid.group(0)
gerateBugFile(bid.group(0))
bugIds.append(bid.group(0))
return True
else:
return False
else:
#print "Bug id search error!"
return False
else:
#print "Bug URL search error!"
return False
def readBugId(path):
f = open(path)
line = f.readline()
count = 0
while line:
line = f.readline()
isOK = regxBugId(line)
if isOK:
count += 1
f.close()
print "Total " + str(count) + " bugs have been store!"
mkpath="bugsSample"
buglistPath="buglist.txt"
# create directory where bugs situate
mkdir(mkpath)
prepareCookieData()
readBugId(buglistPath)
|
Implement a tool to crawl bug pages.#!/usr/bin/python
import os
import re
import subprocess
bugIds = []
def prepareCookieData():
out = subprocess.call("curl --cookie-jar cookie.data \"https://bugzilla-hostname/index.cgi?Bugzilla_login=name&Bugzilla_password=password&GoAheadAndLogIn=LogIn\" > login.txt", shell=True)
def gerateBugFile(bugid):
out = subprocess.call("touch bugsSample/bug_" + str(bugid) + ".txt", shell=True)
out = subprocess.call("curl --cookie cookie.data \"https://bugzilla-hostname/show_bug.cgi?id=" + str(bugid) + "\" > bugsSample/bug_" + str(bugid) + ".txt ", shell=True)
def mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
print path+' created.'
os.makedirs(path)
return True
else:
print path+' exist!'
return False
def regxBugId(line):
m = re.search(r'<\s*a\s.*?href\s*="show_bug\.cgi\?id=[0-9]+', line)
if m:
bid= re.search(r'[0-9]+',m.group(0))
if bid:
if bid.group(0) not in bugIds:
print bid.group(0)
gerateBugFile(bid.group(0))
bugIds.append(bid.group(0))
return True
else:
return False
else:
#print "Bug id search error!"
return False
else:
#print "Bug URL search error!"
return False
def readBugId(path):
f = open(path)
line = f.readline()
count = 0
while line:
line = f.readline()
isOK = regxBugId(line)
if isOK:
count += 1
f.close()
print "Total " + str(count) + " bugs have been store!"
mkpath="bugsSample"
buglistPath="buglist.txt"
# create directory where bugs situate
mkdir(mkpath)
prepareCookieData()
readBugId(buglistPath)
|
<commit_before><commit_msg>Implement a tool to crawl bug pages.<commit_after>#!/usr/bin/python
import os
import re
import subprocess
bugIds = []
def prepareCookieData():
out = subprocess.call("curl --cookie-jar cookie.data \"https://bugzilla-hostname/index.cgi?Bugzilla_login=name&Bugzilla_password=password&GoAheadAndLogIn=LogIn\" > login.txt", shell=True)
def gerateBugFile(bugid):
out = subprocess.call("touch bugsSample/bug_" + str(bugid) + ".txt", shell=True)
out = subprocess.call("curl --cookie cookie.data \"https://bugzilla-hostname/show_bug.cgi?id=" + str(bugid) + "\" > bugsSample/bug_" + str(bugid) + ".txt ", shell=True)
def mkdir(path):
path=path.strip()
path=path.rstrip("\\")
isExists=os.path.exists(path)
if not isExists:
print path+' created.'
os.makedirs(path)
return True
else:
print path+' exist!'
return False
def regxBugId(line):
m = re.search(r'<\s*a\s.*?href\s*="show_bug\.cgi\?id=[0-9]+', line)
if m:
bid= re.search(r'[0-9]+',m.group(0))
if bid:
if bid.group(0) not in bugIds:
print bid.group(0)
gerateBugFile(bid.group(0))
bugIds.append(bid.group(0))
return True
else:
return False
else:
#print "Bug id search error!"
return False
else:
#print "Bug URL search error!"
return False
def readBugId(path):
f = open(path)
line = f.readline()
count = 0
while line:
line = f.readline()
isOK = regxBugId(line)
if isOK:
count += 1
f.close()
print "Total " + str(count) + " bugs have been store!"
mkpath="bugsSample"
buglistPath="buglist.txt"
# create directory where bugs situate
mkdir(mkpath)
prepareCookieData()
readBugId(buglistPath)
|
|
ad57bd124aea637520696857f0b9897ed28f4a96
|
2018/clone/plot-time.py
|
2018/clone/plot-time.py
|
# Helper script for making run-time plots.
#
# Requires a Python installation with the full numeric stack (Numpy, Matplotlib)
# including Seaborn (for prettier plots).
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import numpy as np
import matplotlib.pyplot as plt
import seaborn
launch_0mb = (5.5, 22.3)
launch_2mb = (5.5, 34.5)
launch_4mb = (5.5, 44.7)
launch_8mb = (5.5, 66.8)
N = len(launch_0mb)
ind = np.arange(N) # the x locations for the groups
width = 0.13 # the width of the bars
fig, ax = plt.subplots()
rects4 = ax.bar(ind, launch_0mb, width, color='#7c9acc')
rects5 = ax.bar(ind + 1 * width, launch_2mb, width, color='#5c8add')
rects6 = ax.bar(ind + 2 * width, launch_4mb, width, color='#3c7aee')
rects7 = ax.bar(ind + 3 * width, launch_8mb, width, color='#1c6aff')
# add some text for labels, title and axes ticks
ax.set_ylabel('Launch-time (usec)', fontsize=14)
ax.set_xticks(ind + 2 * width)
ax.set_xticklabels(('thread', 'fork'), fontsize=14)
ax.legend((#rects1[0],
#rects2[0],
#rects3[0],
rects4[0],
rects5[0],
rects6[0],
rects7[0],
),
(
#'simpleinterp',
#'optinterp',
#'optinterp2',
'0 MB',
'2 MB',
'4 MB',
'8 MB',
), fontsize=14, loc='best')
fig = plt.gcf()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
plt.savefig('plot-launch.png', dpi=80)
plt.show()
|
Add plotting of launch times
|
Add plotting of launch times
|
Python
|
unlicense
|
eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog,eliben/code-for-blog
|
Add plotting of launch times
|
# Helper script for making run-time plots.
#
# Requires a Python installation with the full numeric stack (Numpy, Matplotlib)
# including Seaborn (for prettier plots).
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import numpy as np
import matplotlib.pyplot as plt
import seaborn
launch_0mb = (5.5, 22.3)
launch_2mb = (5.5, 34.5)
launch_4mb = (5.5, 44.7)
launch_8mb = (5.5, 66.8)
N = len(launch_0mb)
ind = np.arange(N) # the x locations for the groups
width = 0.13 # the width of the bars
fig, ax = plt.subplots()
rects4 = ax.bar(ind, launch_0mb, width, color='#7c9acc')
rects5 = ax.bar(ind + 1 * width, launch_2mb, width, color='#5c8add')
rects6 = ax.bar(ind + 2 * width, launch_4mb, width, color='#3c7aee')
rects7 = ax.bar(ind + 3 * width, launch_8mb, width, color='#1c6aff')
# add some text for labels, title and axes ticks
ax.set_ylabel('Launch-time (usec)', fontsize=14)
ax.set_xticks(ind + 2 * width)
ax.set_xticklabels(('thread', 'fork'), fontsize=14)
ax.legend((#rects1[0],
#rects2[0],
#rects3[0],
rects4[0],
rects5[0],
rects6[0],
rects7[0],
),
(
#'simpleinterp',
#'optinterp',
#'optinterp2',
'0 MB',
'2 MB',
'4 MB',
'8 MB',
), fontsize=14, loc='best')
fig = plt.gcf()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
plt.savefig('plot-launch.png', dpi=80)
plt.show()
|
<commit_before><commit_msg>Add plotting of launch times<commit_after>
|
# Helper script for making run-time plots.
#
# Requires a Python installation with the full numeric stack (Numpy, Matplotlib)
# including Seaborn (for prettier plots).
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import numpy as np
import matplotlib.pyplot as plt
import seaborn
launch_0mb = (5.5, 22.3)
launch_2mb = (5.5, 34.5)
launch_4mb = (5.5, 44.7)
launch_8mb = (5.5, 66.8)
N = len(launch_0mb)
ind = np.arange(N) # the x locations for the groups
width = 0.13 # the width of the bars
fig, ax = plt.subplots()
rects4 = ax.bar(ind, launch_0mb, width, color='#7c9acc')
rects5 = ax.bar(ind + 1 * width, launch_2mb, width, color='#5c8add')
rects6 = ax.bar(ind + 2 * width, launch_4mb, width, color='#3c7aee')
rects7 = ax.bar(ind + 3 * width, launch_8mb, width, color='#1c6aff')
# add some text for labels, title and axes ticks
ax.set_ylabel('Launch-time (usec)', fontsize=14)
ax.set_xticks(ind + 2 * width)
ax.set_xticklabels(('thread', 'fork'), fontsize=14)
ax.legend((#rects1[0],
#rects2[0],
#rects3[0],
rects4[0],
rects5[0],
rects6[0],
rects7[0],
),
(
#'simpleinterp',
#'optinterp',
#'optinterp2',
'0 MB',
'2 MB',
'4 MB',
'8 MB',
), fontsize=14, loc='best')
fig = plt.gcf()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
plt.savefig('plot-launch.png', dpi=80)
plt.show()
|
Add plotting of launch times# Helper script for making run-time plots.
#
# Requires a Python installation with the full numeric stack (Numpy, Matplotlib)
# including Seaborn (for prettier plots).
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import numpy as np
import matplotlib.pyplot as plt
import seaborn
launch_0mb = (5.5, 22.3)
launch_2mb = (5.5, 34.5)
launch_4mb = (5.5, 44.7)
launch_8mb = (5.5, 66.8)
N = len(launch_0mb)
ind = np.arange(N) # the x locations for the groups
width = 0.13 # the width of the bars
fig, ax = plt.subplots()
rects4 = ax.bar(ind, launch_0mb, width, color='#7c9acc')
rects5 = ax.bar(ind + 1 * width, launch_2mb, width, color='#5c8add')
rects6 = ax.bar(ind + 2 * width, launch_4mb, width, color='#3c7aee')
rects7 = ax.bar(ind + 3 * width, launch_8mb, width, color='#1c6aff')
# add some text for labels, title and axes ticks
ax.set_ylabel('Launch-time (usec)', fontsize=14)
ax.set_xticks(ind + 2 * width)
ax.set_xticklabels(('thread', 'fork'), fontsize=14)
ax.legend((#rects1[0],
#rects2[0],
#rects3[0],
rects4[0],
rects5[0],
rects6[0],
rects7[0],
),
(
#'simpleinterp',
#'optinterp',
#'optinterp2',
'0 MB',
'2 MB',
'4 MB',
'8 MB',
), fontsize=14, loc='best')
fig = plt.gcf()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
plt.savefig('plot-launch.png', dpi=80)
plt.show()
|
<commit_before><commit_msg>Add plotting of launch times<commit_after># Helper script for making run-time plots.
#
# Requires a Python installation with the full numeric stack (Numpy, Matplotlib)
# including Seaborn (for prettier plots).
#
# Eli Bendersky [http://eli.thegreenplace.net]
# This code is in the public domain.
import numpy as np
import matplotlib.pyplot as plt
import seaborn
launch_0mb = (5.5, 22.3)
launch_2mb = (5.5, 34.5)
launch_4mb = (5.5, 44.7)
launch_8mb = (5.5, 66.8)
N = len(launch_0mb)
ind = np.arange(N) # the x locations for the groups
width = 0.13 # the width of the bars
fig, ax = plt.subplots()
rects4 = ax.bar(ind, launch_0mb, width, color='#7c9acc')
rects5 = ax.bar(ind + 1 * width, launch_2mb, width, color='#5c8add')
rects6 = ax.bar(ind + 2 * width, launch_4mb, width, color='#3c7aee')
rects7 = ax.bar(ind + 3 * width, launch_8mb, width, color='#1c6aff')
# add some text for labels, title and axes ticks
ax.set_ylabel('Launch-time (usec)', fontsize=14)
ax.set_xticks(ind + 2 * width)
ax.set_xticklabels(('thread', 'fork'), fontsize=14)
ax.legend((#rects1[0],
#rects2[0],
#rects3[0],
rects4[0],
rects5[0],
rects6[0],
rects7[0],
),
(
#'simpleinterp',
#'optinterp',
#'optinterp2',
'0 MB',
'2 MB',
'4 MB',
'8 MB',
), fontsize=14, loc='best')
fig = plt.gcf()
fig.set_tight_layout(True)
fig.set_size_inches((8, 6))
plt.savefig('plot-launch.png', dpi=80)
plt.show()
|
|
b8b02b41ee5c5712f90c0621d5fd7130004eafb6
|
fil_finder/tests/test_profiles.py
|
fil_finder/tests/test_profiles.py
|
'''
Tests for functions in fil_finder.width_profiles.
'''
import pytest
import numpy as np
import numpy.testing as npt
from ..width_profiles.profile_line_width import walk_through_skeleton, return_ends
def make_test_skeleton(shape=(10, 10)):
skel = np.zeros(shape)
crds = np.array([[3, 3, 4], [4, 5, 6]])
crds = crds.T
walk_idx = (crds[:, 0], crds[:, 1])
skel[walk_idx] = 1
return skel, crds
def test_walk_through_skeleton():
skel, crds = make_test_skeleton()
out_crds = walk_through_skeleton(skel)
assert out_crds == zip(crds[:, 0], crds[:, 1])[::-1]
def test_return_ends():
skel, crds = make_test_skeleton()
ends = return_ends(skel)
# Check both the first and last point, since the direction isn't
# really important.
if (crds[0] == ends[0]).all() or (crds[0] == ends[-1]).all():
first_end = True
else:
first_end = False
if (crds[-1] == ends[0]).all() or (crds[-1] == ends[-1]).all():
second_end = True
else:
second_end = False
if not first_end and not second_end:
raise Exception("At least one end point was not found.")
|
Add tests for end finding and walk through a skeleton
|
Add tests for end finding and walk through a skeleton
|
Python
|
mit
|
e-koch/FilFinder
|
Add tests for end finding and walk through a skeleton
|
'''
Tests for functions in fil_finder.width_profiles.
'''
import pytest
import numpy as np
import numpy.testing as npt
from ..width_profiles.profile_line_width import walk_through_skeleton, return_ends
def make_test_skeleton(shape=(10, 10)):
skel = np.zeros(shape)
crds = np.array([[3, 3, 4], [4, 5, 6]])
crds = crds.T
walk_idx = (crds[:, 0], crds[:, 1])
skel[walk_idx] = 1
return skel, crds
def test_walk_through_skeleton():
skel, crds = make_test_skeleton()
out_crds = walk_through_skeleton(skel)
assert out_crds == zip(crds[:, 0], crds[:, 1])[::-1]
def test_return_ends():
skel, crds = make_test_skeleton()
ends = return_ends(skel)
# Check both the first and last point, since the direction isn't
# really important.
if (crds[0] == ends[0]).all() or (crds[0] == ends[-1]).all():
first_end = True
else:
first_end = False
if (crds[-1] == ends[0]).all() or (crds[-1] == ends[-1]).all():
second_end = True
else:
second_end = False
if not first_end and not second_end:
raise Exception("At least one end point was not found.")
|
<commit_before><commit_msg>Add tests for end finding and walk through a skeleton<commit_after>
|
'''
Tests for functions in fil_finder.width_profiles.
'''
import pytest
import numpy as np
import numpy.testing as npt
from ..width_profiles.profile_line_width import walk_through_skeleton, return_ends
def make_test_skeleton(shape=(10, 10)):
skel = np.zeros(shape)
crds = np.array([[3, 3, 4], [4, 5, 6]])
crds = crds.T
walk_idx = (crds[:, 0], crds[:, 1])
skel[walk_idx] = 1
return skel, crds
def test_walk_through_skeleton():
skel, crds = make_test_skeleton()
out_crds = walk_through_skeleton(skel)
assert out_crds == zip(crds[:, 0], crds[:, 1])[::-1]
def test_return_ends():
skel, crds = make_test_skeleton()
ends = return_ends(skel)
# Check both the first and last point, since the direction isn't
# really important.
if (crds[0] == ends[0]).all() or (crds[0] == ends[-1]).all():
first_end = True
else:
first_end = False
if (crds[-1] == ends[0]).all() or (crds[-1] == ends[-1]).all():
second_end = True
else:
second_end = False
if not first_end and not second_end:
raise Exception("At least one end point was not found.")
|
Add tests for end finding and walk through a skeleton
'''
Tests for functions in fil_finder.width_profiles.
'''
import pytest
import numpy as np
import numpy.testing as npt
from ..width_profiles.profile_line_width import walk_through_skeleton, return_ends
def make_test_skeleton(shape=(10, 10)):
skel = np.zeros(shape)
crds = np.array([[3, 3, 4], [4, 5, 6]])
crds = crds.T
walk_idx = (crds[:, 0], crds[:, 1])
skel[walk_idx] = 1
return skel, crds
def test_walk_through_skeleton():
skel, crds = make_test_skeleton()
out_crds = walk_through_skeleton(skel)
assert out_crds == zip(crds[:, 0], crds[:, 1])[::-1]
def test_return_ends():
skel, crds = make_test_skeleton()
ends = return_ends(skel)
# Check both the first and last point, since the direction isn't
# really important.
if (crds[0] == ends[0]).all() or (crds[0] == ends[-1]).all():
first_end = True
else:
first_end = False
if (crds[-1] == ends[0]).all() or (crds[-1] == ends[-1]).all():
second_end = True
else:
second_end = False
if not first_end and not second_end:
raise Exception("At least one end point was not found.")
|
<commit_before><commit_msg>Add tests for end finding and walk through a skeleton<commit_after>
'''
Tests for functions in fil_finder.width_profiles.
'''
import pytest
import numpy as np
import numpy.testing as npt
from ..width_profiles.profile_line_width import walk_through_skeleton, return_ends
def make_test_skeleton(shape=(10, 10)):
skel = np.zeros(shape)
crds = np.array([[3, 3, 4], [4, 5, 6]])
crds = crds.T
walk_idx = (crds[:, 0], crds[:, 1])
skel[walk_idx] = 1
return skel, crds
def test_walk_through_skeleton():
skel, crds = make_test_skeleton()
out_crds = walk_through_skeleton(skel)
assert out_crds == zip(crds[:, 0], crds[:, 1])[::-1]
def test_return_ends():
skel, crds = make_test_skeleton()
ends = return_ends(skel)
# Check both the first and last point, since the direction isn't
# really important.
if (crds[0] == ends[0]).all() or (crds[0] == ends[-1]).all():
first_end = True
else:
first_end = False
if (crds[-1] == ends[0]).all() or (crds[-1] == ends[-1]).all():
second_end = True
else:
second_end = False
if not first_end and not second_end:
raise Exception("At least one end point was not found.")
|
|
9874b911fa04cd5b6883845ee627c7d91c6376a2
|
telemetry/telemetry/page/page_test_runner.py
|
telemetry/telemetry/page/page_test_runner.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
from telemetry.core import browser_finder
from telemetry.core import browser_options
from telemetry.page import page_test
from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.test import discover
def Main(test_dir, page_set_filenames):
"""Turns a PageTest into a command-line program.
Args:
test_dir: Path to directory containing PageTests.
"""
tests = discover.Discover(test_dir,
os.path.join(test_dir, '..'),
'',
page_test.PageTest)
# Naively find the test. If we use the browser options parser, we run
# the risk of failing to parse if we use a test-specific parameter.
test_name = None
for arg in sys.argv:
if arg in tests:
test_name = arg
options = browser_options.BrowserOptions()
parser = options.CreateParser('%prog [options] <test> <page_set>')
page_runner.PageRunner.AddCommandLineOptions(parser)
test = None
if test_name is not None:
if test_name not in tests:
sys.stderr.write('No test name %s found' % test_name)
sys.exit(1)
test = tests[test_name]()
test.AddCommandLineOptions(parser)
_, args = parser.parse_args()
if test is None or len(args) != 2:
parser.print_usage()
print >> sys.stderr, 'Available tests:\n%s\n' % ',\n'.join(
sorted(tests.keys()))
print >> sys.stderr, 'Available page_sets:\n%s\n' % ',\n'.join(
sorted([os.path.relpath(f)
for f in page_set_filenames]))
sys.exit(1)
ps = page_set.PageSet.FromFile(args[1])
results = page_test.PageTestResults()
return RunTestOnPageSet(options, ps, test, results)
def RunTestOnPageSet(options, ps, test, results):
test.CustomizeBrowserOptions(options)
possible_browser = browser_finder.FindBrowser(options)
if not possible_browser:
print >> sys.stderr, """No browser found.\n
Use --browser=list to figure out which are available.\n"""
sys.exit(1)
with page_runner.PageRunner(ps) as runner:
runner.Run(options, possible_browser, test, results)
print '%i pages succeed\n' % len(results.page_successes)
if len(results.page_failures):
logging.warning('Failed pages: %s', '\n'.join(
[failure['page'].url for failure in results.page_failures]))
if len(results.skipped_pages):
logging.warning('Skipped pages: %s', '\n'.join(
[skipped['page'].url for skipped in results.skipped_pages]))
return min(255, len(results.page_failures))
|
Add a telemetry-based gpu_tests folder
|
Add a telemetry-based gpu_tests folder
BUG=143317
Review URL: https://chromiumcodereview.appspot.com/12252020
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@183915 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
sahiljain/catapult,benschmaus/catapult,benschmaus/catapult,SummerLW/Perf-Insight-Report,sahiljain/catapult,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult-csm,benschmaus/catapult,sahiljain/catapult,catapult-project/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,sahiljain/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult,SummerLW/Perf-Insight-Report,sahiljain/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,benschmaus/catapult,sahiljain/catapult,benschmaus/catapult,catapult-project/catapult,catapult-project/catapult,catapult-project/catapult-csm
|
Add a telemetry-based gpu_tests folder
BUG=143317
Review URL: https://chromiumcodereview.appspot.com/12252020
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@183915 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
from telemetry.core import browser_finder
from telemetry.core import browser_options
from telemetry.page import page_test
from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.test import discover
def Main(test_dir, page_set_filenames):
"""Turns a PageTest into a command-line program.
Args:
test_dir: Path to directory containing PageTests.
"""
tests = discover.Discover(test_dir,
os.path.join(test_dir, '..'),
'',
page_test.PageTest)
# Naively find the test. If we use the browser options parser, we run
# the risk of failing to parse if we use a test-specific parameter.
test_name = None
for arg in sys.argv:
if arg in tests:
test_name = arg
options = browser_options.BrowserOptions()
parser = options.CreateParser('%prog [options] <test> <page_set>')
page_runner.PageRunner.AddCommandLineOptions(parser)
test = None
if test_name is not None:
if test_name not in tests:
sys.stderr.write('No test name %s found' % test_name)
sys.exit(1)
test = tests[test_name]()
test.AddCommandLineOptions(parser)
_, args = parser.parse_args()
if test is None or len(args) != 2:
parser.print_usage()
print >> sys.stderr, 'Available tests:\n%s\n' % ',\n'.join(
sorted(tests.keys()))
print >> sys.stderr, 'Available page_sets:\n%s\n' % ',\n'.join(
sorted([os.path.relpath(f)
for f in page_set_filenames]))
sys.exit(1)
ps = page_set.PageSet.FromFile(args[1])
results = page_test.PageTestResults()
return RunTestOnPageSet(options, ps, test, results)
def RunTestOnPageSet(options, ps, test, results):
test.CustomizeBrowserOptions(options)
possible_browser = browser_finder.FindBrowser(options)
if not possible_browser:
print >> sys.stderr, """No browser found.\n
Use --browser=list to figure out which are available.\n"""
sys.exit(1)
with page_runner.PageRunner(ps) as runner:
runner.Run(options, possible_browser, test, results)
print '%i pages succeed\n' % len(results.page_successes)
if len(results.page_failures):
logging.warning('Failed pages: %s', '\n'.join(
[failure['page'].url for failure in results.page_failures]))
if len(results.skipped_pages):
logging.warning('Skipped pages: %s', '\n'.join(
[skipped['page'].url for skipped in results.skipped_pages]))
return min(255, len(results.page_failures))
|
<commit_before><commit_msg>Add a telemetry-based gpu_tests folder
BUG=143317
Review URL: https://chromiumcodereview.appspot.com/12252020
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@183915 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
from telemetry.core import browser_finder
from telemetry.core import browser_options
from telemetry.page import page_test
from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.test import discover
def Main(test_dir, page_set_filenames):
"""Turns a PageTest into a command-line program.
Args:
test_dir: Path to directory containing PageTests.
"""
tests = discover.Discover(test_dir,
os.path.join(test_dir, '..'),
'',
page_test.PageTest)
# Naively find the test. If we use the browser options parser, we run
# the risk of failing to parse if we use a test-specific parameter.
test_name = None
for arg in sys.argv:
if arg in tests:
test_name = arg
options = browser_options.BrowserOptions()
parser = options.CreateParser('%prog [options] <test> <page_set>')
page_runner.PageRunner.AddCommandLineOptions(parser)
test = None
if test_name is not None:
if test_name not in tests:
sys.stderr.write('No test name %s found' % test_name)
sys.exit(1)
test = tests[test_name]()
test.AddCommandLineOptions(parser)
_, args = parser.parse_args()
if test is None or len(args) != 2:
parser.print_usage()
print >> sys.stderr, 'Available tests:\n%s\n' % ',\n'.join(
sorted(tests.keys()))
print >> sys.stderr, 'Available page_sets:\n%s\n' % ',\n'.join(
sorted([os.path.relpath(f)
for f in page_set_filenames]))
sys.exit(1)
ps = page_set.PageSet.FromFile(args[1])
results = page_test.PageTestResults()
return RunTestOnPageSet(options, ps, test, results)
def RunTestOnPageSet(options, ps, test, results):
test.CustomizeBrowserOptions(options)
possible_browser = browser_finder.FindBrowser(options)
if not possible_browser:
print >> sys.stderr, """No browser found.\n
Use --browser=list to figure out which are available.\n"""
sys.exit(1)
with page_runner.PageRunner(ps) as runner:
runner.Run(options, possible_browser, test, results)
print '%i pages succeed\n' % len(results.page_successes)
if len(results.page_failures):
logging.warning('Failed pages: %s', '\n'.join(
[failure['page'].url for failure in results.page_failures]))
if len(results.skipped_pages):
logging.warning('Skipped pages: %s', '\n'.join(
[skipped['page'].url for skipped in results.skipped_pages]))
return min(255, len(results.page_failures))
|
Add a telemetry-based gpu_tests folder
BUG=143317
Review URL: https://chromiumcodereview.appspot.com/12252020
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@183915 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
from telemetry.core import browser_finder
from telemetry.core import browser_options
from telemetry.page import page_test
from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.test import discover
def Main(test_dir, page_set_filenames):
"""Turns a PageTest into a command-line program.
Args:
test_dir: Path to directory containing PageTests.
"""
tests = discover.Discover(test_dir,
os.path.join(test_dir, '..'),
'',
page_test.PageTest)
# Naively find the test. If we use the browser options parser, we run
# the risk of failing to parse if we use a test-specific parameter.
test_name = None
for arg in sys.argv:
if arg in tests:
test_name = arg
options = browser_options.BrowserOptions()
parser = options.CreateParser('%prog [options] <test> <page_set>')
page_runner.PageRunner.AddCommandLineOptions(parser)
test = None
if test_name is not None:
if test_name not in tests:
sys.stderr.write('No test name %s found' % test_name)
sys.exit(1)
test = tests[test_name]()
test.AddCommandLineOptions(parser)
_, args = parser.parse_args()
if test is None or len(args) != 2:
parser.print_usage()
print >> sys.stderr, 'Available tests:\n%s\n' % ',\n'.join(
sorted(tests.keys()))
print >> sys.stderr, 'Available page_sets:\n%s\n' % ',\n'.join(
sorted([os.path.relpath(f)
for f in page_set_filenames]))
sys.exit(1)
ps = page_set.PageSet.FromFile(args[1])
results = page_test.PageTestResults()
return RunTestOnPageSet(options, ps, test, results)
def RunTestOnPageSet(options, ps, test, results):
test.CustomizeBrowserOptions(options)
possible_browser = browser_finder.FindBrowser(options)
if not possible_browser:
print >> sys.stderr, """No browser found.\n
Use --browser=list to figure out which are available.\n"""
sys.exit(1)
with page_runner.PageRunner(ps) as runner:
runner.Run(options, possible_browser, test, results)
print '%i pages succeed\n' % len(results.page_successes)
if len(results.page_failures):
logging.warning('Failed pages: %s', '\n'.join(
[failure['page'].url for failure in results.page_failures]))
if len(results.skipped_pages):
logging.warning('Skipped pages: %s', '\n'.join(
[skipped['page'].url for skipped in results.skipped_pages]))
return min(255, len(results.page_failures))
|
<commit_before><commit_msg>Add a telemetry-based gpu_tests folder
BUG=143317
Review URL: https://chromiumcodereview.appspot.com/12252020
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@183915 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
from telemetry.core import browser_finder
from telemetry.core import browser_options
from telemetry.page import page_test
from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.test import discover
def Main(test_dir, page_set_filenames):
"""Turns a PageTest into a command-line program.
Args:
test_dir: Path to directory containing PageTests.
"""
tests = discover.Discover(test_dir,
os.path.join(test_dir, '..'),
'',
page_test.PageTest)
# Naively find the test. If we use the browser options parser, we run
# the risk of failing to parse if we use a test-specific parameter.
test_name = None
for arg in sys.argv:
if arg in tests:
test_name = arg
options = browser_options.BrowserOptions()
parser = options.CreateParser('%prog [options] <test> <page_set>')
page_runner.PageRunner.AddCommandLineOptions(parser)
test = None
if test_name is not None:
if test_name not in tests:
sys.stderr.write('No test name %s found' % test_name)
sys.exit(1)
test = tests[test_name]()
test.AddCommandLineOptions(parser)
_, args = parser.parse_args()
if test is None or len(args) != 2:
parser.print_usage()
print >> sys.stderr, 'Available tests:\n%s\n' % ',\n'.join(
sorted(tests.keys()))
print >> sys.stderr, 'Available page_sets:\n%s\n' % ',\n'.join(
sorted([os.path.relpath(f)
for f in page_set_filenames]))
sys.exit(1)
ps = page_set.PageSet.FromFile(args[1])
results = page_test.PageTestResults()
return RunTestOnPageSet(options, ps, test, results)
def RunTestOnPageSet(options, ps, test, results):
test.CustomizeBrowserOptions(options)
possible_browser = browser_finder.FindBrowser(options)
if not possible_browser:
print >> sys.stderr, """No browser found.\n
Use --browser=list to figure out which are available.\n"""
sys.exit(1)
with page_runner.PageRunner(ps) as runner:
runner.Run(options, possible_browser, test, results)
print '%i pages succeed\n' % len(results.page_successes)
if len(results.page_failures):
logging.warning('Failed pages: %s', '\n'.join(
[failure['page'].url for failure in results.page_failures]))
if len(results.skipped_pages):
logging.warning('Skipped pages: %s', '\n'.join(
[skipped['page'].url for skipped in results.skipped_pages]))
return min(255, len(results.page_failures))
|
|
cd9a26a7ffbe44b89912d7d862b394f4dfc47269
|
yithlibraryserver/oauth2/tests/test_utils.py
|
yithlibraryserver/oauth2/tests/test_utils.py
|
# Yith Library Server is a password storage server.
# Copyright (C) 2014 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import unittest
from pyramid import testing
from yithlibraryserver.oauth2.utils import (
create_response,
decode_base64,
extract_params,
response_from_error,
)
class Error(object):
def __init__(self, error):
self.error = error
class ExtractParamsTests(unittest.TestCase):
def test_extract_params(self):
request = testing.DummyRequest(headers={
'wsgi.input': 'foo',
'wsgi.errors': 'none',
})
request.body = 'loren ipsum'
request.url = 'http://example.com/foo/bar'
uri, method, body, headers = extract_params(request)
self.assertEqual(uri, 'http://example.com/foo/bar')
self.assertEqual(method, 'GET')
self.assertEqual(body, 'loren ipsum')
self.assertEqual(headers, {})
def test_create_response(self):
response = create_response(200, {'Content-Type': 'text/html'}, 'body')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.headers.items(), [
('Content-Type', 'text/html'),
('Content-Length', '4'),
])
self.assertEqual(response.body, 'body')
def test_response_from_error(self):
response = response_from_error(Error('testing error'))
self.assertEqual(response.status, '400 Bad Request')
self.assertEqual(response.body, 'Evil client is unable to send a proper request. Error is: testing error')
def test_decode_base64(self):
self.assertEqual('foobar', decode_base64('Zm9vYmFy'))
|
Add tests for oauth2 utils
|
Add tests for oauth2 utils
|
Python
|
agpl-3.0
|
lorenzogil/yith-library-server,lorenzogil/yith-library-server,lorenzogil/yith-library-server
|
Add tests for oauth2 utils
|
# Yith Library Server is a password storage server.
# Copyright (C) 2014 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import unittest
from pyramid import testing
from yithlibraryserver.oauth2.utils import (
create_response,
decode_base64,
extract_params,
response_from_error,
)
class Error(object):
def __init__(self, error):
self.error = error
class ExtractParamsTests(unittest.TestCase):
def test_extract_params(self):
request = testing.DummyRequest(headers={
'wsgi.input': 'foo',
'wsgi.errors': 'none',
})
request.body = 'loren ipsum'
request.url = 'http://example.com/foo/bar'
uri, method, body, headers = extract_params(request)
self.assertEqual(uri, 'http://example.com/foo/bar')
self.assertEqual(method, 'GET')
self.assertEqual(body, 'loren ipsum')
self.assertEqual(headers, {})
def test_create_response(self):
response = create_response(200, {'Content-Type': 'text/html'}, 'body')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.headers.items(), [
('Content-Type', 'text/html'),
('Content-Length', '4'),
])
self.assertEqual(response.body, 'body')
def test_response_from_error(self):
response = response_from_error(Error('testing error'))
self.assertEqual(response.status, '400 Bad Request')
self.assertEqual(response.body, 'Evil client is unable to send a proper request. Error is: testing error')
def test_decode_base64(self):
self.assertEqual('foobar', decode_base64('Zm9vYmFy'))
|
<commit_before><commit_msg>Add tests for oauth2 utils<commit_after>
|
# Yith Library Server is a password storage server.
# Copyright (C) 2014 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import unittest
from pyramid import testing
from yithlibraryserver.oauth2.utils import (
create_response,
decode_base64,
extract_params,
response_from_error,
)
class Error(object):
def __init__(self, error):
self.error = error
class ExtractParamsTests(unittest.TestCase):
def test_extract_params(self):
request = testing.DummyRequest(headers={
'wsgi.input': 'foo',
'wsgi.errors': 'none',
})
request.body = 'loren ipsum'
request.url = 'http://example.com/foo/bar'
uri, method, body, headers = extract_params(request)
self.assertEqual(uri, 'http://example.com/foo/bar')
self.assertEqual(method, 'GET')
self.assertEqual(body, 'loren ipsum')
self.assertEqual(headers, {})
def test_create_response(self):
response = create_response(200, {'Content-Type': 'text/html'}, 'body')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.headers.items(), [
('Content-Type', 'text/html'),
('Content-Length', '4'),
])
self.assertEqual(response.body, 'body')
def test_response_from_error(self):
response = response_from_error(Error('testing error'))
self.assertEqual(response.status, '400 Bad Request')
self.assertEqual(response.body, 'Evil client is unable to send a proper request. Error is: testing error')
def test_decode_base64(self):
self.assertEqual('foobar', decode_base64('Zm9vYmFy'))
|
Add tests for oauth2 utils# Yith Library Server is a password storage server.
# Copyright (C) 2014 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import unittest
from pyramid import testing
from yithlibraryserver.oauth2.utils import (
create_response,
decode_base64,
extract_params,
response_from_error,
)
class Error(object):
def __init__(self, error):
self.error = error
class ExtractParamsTests(unittest.TestCase):
def test_extract_params(self):
request = testing.DummyRequest(headers={
'wsgi.input': 'foo',
'wsgi.errors': 'none',
})
request.body = 'loren ipsum'
request.url = 'http://example.com/foo/bar'
uri, method, body, headers = extract_params(request)
self.assertEqual(uri, 'http://example.com/foo/bar')
self.assertEqual(method, 'GET')
self.assertEqual(body, 'loren ipsum')
self.assertEqual(headers, {})
def test_create_response(self):
response = create_response(200, {'Content-Type': 'text/html'}, 'body')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.headers.items(), [
('Content-Type', 'text/html'),
('Content-Length', '4'),
])
self.assertEqual(response.body, 'body')
def test_response_from_error(self):
response = response_from_error(Error('testing error'))
self.assertEqual(response.status, '400 Bad Request')
self.assertEqual(response.body, 'Evil client is unable to send a proper request. Error is: testing error')
def test_decode_base64(self):
self.assertEqual('foobar', decode_base64('Zm9vYmFy'))
|
<commit_before><commit_msg>Add tests for oauth2 utils<commit_after># Yith Library Server is a password storage server.
# Copyright (C) 2014 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of Yith Library Server.
#
# Yith Library Server is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Yith Library Server is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Yith Library Server. If not, see <http://www.gnu.org/licenses/>.
import unittest
from pyramid import testing
from yithlibraryserver.oauth2.utils import (
create_response,
decode_base64,
extract_params,
response_from_error,
)
class Error(object):
def __init__(self, error):
self.error = error
class ExtractParamsTests(unittest.TestCase):
def test_extract_params(self):
request = testing.DummyRequest(headers={
'wsgi.input': 'foo',
'wsgi.errors': 'none',
})
request.body = 'loren ipsum'
request.url = 'http://example.com/foo/bar'
uri, method, body, headers = extract_params(request)
self.assertEqual(uri, 'http://example.com/foo/bar')
self.assertEqual(method, 'GET')
self.assertEqual(body, 'loren ipsum')
self.assertEqual(headers, {})
def test_create_response(self):
response = create_response(200, {'Content-Type': 'text/html'}, 'body')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.headers.items(), [
('Content-Type', 'text/html'),
('Content-Length', '4'),
])
self.assertEqual(response.body, 'body')
def test_response_from_error(self):
response = response_from_error(Error('testing error'))
self.assertEqual(response.status, '400 Bad Request')
self.assertEqual(response.body, 'Evil client is unable to send a proper request. Error is: testing error')
def test_decode_base64(self):
self.assertEqual('foobar', decode_base64('Zm9vYmFy'))
|
|
c0e9a1a741a16548a466b427338fab5ac8603537
|
tests/test_main.py
|
tests/test_main.py
|
import copy
import os
from ecs_deplojo import main
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def test_register_task_definitions(cluster):
task_definitions = {
"service-1": {
"definition": {
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
}
}
connection = main.Connection()
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 0
main.register_task_definitions(connection, task_definitions)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 1
def test_deregister_task_definitions(cluster):
task_definitions = {
"service-1": {
"definition": {
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
}
}
connection = main.Connection()
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 0
for i in range(10):
task_def = copy.deepcopy(task_definitions)
main.register_task_definitions(connection, task_def)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 10
main.deregister_task_definitions(connection, task_def)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 1
|
Add a number of unittests for registering and deregistering task definitions
|
Add a number of unittests for registering and deregistering task definitions
|
Python
|
mit
|
LabD/ecs-deplojo
|
Add a number of unittests for registering and deregistering task definitions
|
import copy
import os
from ecs_deplojo import main
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def test_register_task_definitions(cluster):
task_definitions = {
"service-1": {
"definition": {
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
}
}
connection = main.Connection()
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 0
main.register_task_definitions(connection, task_definitions)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 1
def test_deregister_task_definitions(cluster):
task_definitions = {
"service-1": {
"definition": {
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
}
}
connection = main.Connection()
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 0
for i in range(10):
task_def = copy.deepcopy(task_definitions)
main.register_task_definitions(connection, task_def)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 10
main.deregister_task_definitions(connection, task_def)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 1
|
<commit_before><commit_msg>Add a number of unittests for registering and deregistering task definitions<commit_after>
|
import copy
import os
from ecs_deplojo import main
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def test_register_task_definitions(cluster):
task_definitions = {
"service-1": {
"definition": {
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
}
}
connection = main.Connection()
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 0
main.register_task_definitions(connection, task_definitions)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 1
def test_deregister_task_definitions(cluster):
task_definitions = {
"service-1": {
"definition": {
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
}
}
connection = main.Connection()
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 0
for i in range(10):
task_def = copy.deepcopy(task_definitions)
main.register_task_definitions(connection, task_def)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 10
main.deregister_task_definitions(connection, task_def)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 1
|
Add a number of unittests for registering and deregistering task definitionsimport copy
import os
from ecs_deplojo import main
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def test_register_task_definitions(cluster):
task_definitions = {
"service-1": {
"definition": {
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
}
}
connection = main.Connection()
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 0
main.register_task_definitions(connection, task_definitions)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 1
def test_deregister_task_definitions(cluster):
task_definitions = {
"service-1": {
"definition": {
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
}
}
connection = main.Connection()
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 0
for i in range(10):
task_def = copy.deepcopy(task_definitions)
main.register_task_definitions(connection, task_def)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 10
main.deregister_task_definitions(connection, task_def)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 1
|
<commit_before><commit_msg>Add a number of unittests for registering and deregistering task definitions<commit_after>import copy
import os
from ecs_deplojo import main
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
def test_register_task_definitions(cluster):
task_definitions = {
"service-1": {
"definition": {
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
}
}
connection = main.Connection()
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 0
main.register_task_definitions(connection, task_definitions)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 1
def test_deregister_task_definitions(cluster):
task_definitions = {
"service-1": {
"definition": {
"family": "my-task-def",
"volumes": [],
"containerDefinitions": [
{
"name": "default",
"image": "my-docker-image:1.0",
"essential": True,
"command": ["hello", "world"],
"hostname": "my-task-def",
"memory": 256,
"cpu": 0,
"portMappings": [{"containerPort": 8080, "hostPort": 0}],
"environment": {},
}
],
"tags": [{"key": "createdBy", "value": "ecs-deplojo"}],
}
}
}
connection = main.Connection()
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 0
for i in range(10):
task_def = copy.deepcopy(task_definitions)
main.register_task_definitions(connection, task_def)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 10
main.deregister_task_definitions(connection, task_def)
result = connection.ecs.list_task_definitions()
assert len(result['taskDefinitionArns']) == 1
|
|
5158ecbec55f142e79cd5efa0360e01b50142464
|
changes/migrations/0006_auto_20170718_1414.py
|
changes/migrations/0006_auto_20170718_1414.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-07-18 14:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('changes', '0005_auto_20170601_1354'),
]
operations = [
migrations.AlterField(
model_name='change',
name='action',
field=models.CharField(choices=[(b'baby_switch', b'Change from pregnancy to baby messaging'), (b'pmtct_loss_switch', b'Change to loss messaging via pmtct app'), (b'pmtct_loss_optout', b'Optout due to loss via pmtct app'), (b'pmtct_nonloss_optout', b'Optout not due to loss via pmtct app'), (b'nurse_update_detail', b'Update nurseconnect detail'), (b'nurse_change_msisdn', b'Change nurseconnect msisdn'), (b'nurse_optout', b'Optout from nurseconnect'), (b'momconnect_loss_switch', b'Change to loss messaging via momconnect app'), (b'momconnect_loss_optout', b'Optout due to loss via momconnect app'), (b'momconnect_nonloss_optout', b'Optout not due to loss via momconnect app'), (b'momconnect_change_language', b'Change the language of the messages via momconnect app'), (b'momconnect_change_msisdn', b'Change the MSISDN to send messages to via momconnect app'), (b'momconnect_change_identification', b'Change the identification type and number via momconnect app'), (b'admin_change_subscription', b'Change the message set and/or language of the specified subscription from admin')], max_length=255),
),
]
|
Add migration for admin change
|
Add migration for admin change
|
Python
|
bsd-3-clause
|
praekeltfoundation/ndoh-hub,praekeltfoundation/ndoh-hub,praekeltfoundation/ndoh-hub
|
Add migration for admin change
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-07-18 14:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('changes', '0005_auto_20170601_1354'),
]
operations = [
migrations.AlterField(
model_name='change',
name='action',
field=models.CharField(choices=[(b'baby_switch', b'Change from pregnancy to baby messaging'), (b'pmtct_loss_switch', b'Change to loss messaging via pmtct app'), (b'pmtct_loss_optout', b'Optout due to loss via pmtct app'), (b'pmtct_nonloss_optout', b'Optout not due to loss via pmtct app'), (b'nurse_update_detail', b'Update nurseconnect detail'), (b'nurse_change_msisdn', b'Change nurseconnect msisdn'), (b'nurse_optout', b'Optout from nurseconnect'), (b'momconnect_loss_switch', b'Change to loss messaging via momconnect app'), (b'momconnect_loss_optout', b'Optout due to loss via momconnect app'), (b'momconnect_nonloss_optout', b'Optout not due to loss via momconnect app'), (b'momconnect_change_language', b'Change the language of the messages via momconnect app'), (b'momconnect_change_msisdn', b'Change the MSISDN to send messages to via momconnect app'), (b'momconnect_change_identification', b'Change the identification type and number via momconnect app'), (b'admin_change_subscription', b'Change the message set and/or language of the specified subscription from admin')], max_length=255),
),
]
|
<commit_before><commit_msg>Add migration for admin change<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-07-18 14:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('changes', '0005_auto_20170601_1354'),
]
operations = [
migrations.AlterField(
model_name='change',
name='action',
field=models.CharField(choices=[(b'baby_switch', b'Change from pregnancy to baby messaging'), (b'pmtct_loss_switch', b'Change to loss messaging via pmtct app'), (b'pmtct_loss_optout', b'Optout due to loss via pmtct app'), (b'pmtct_nonloss_optout', b'Optout not due to loss via pmtct app'), (b'nurse_update_detail', b'Update nurseconnect detail'), (b'nurse_change_msisdn', b'Change nurseconnect msisdn'), (b'nurse_optout', b'Optout from nurseconnect'), (b'momconnect_loss_switch', b'Change to loss messaging via momconnect app'), (b'momconnect_loss_optout', b'Optout due to loss via momconnect app'), (b'momconnect_nonloss_optout', b'Optout not due to loss via momconnect app'), (b'momconnect_change_language', b'Change the language of the messages via momconnect app'), (b'momconnect_change_msisdn', b'Change the MSISDN to send messages to via momconnect app'), (b'momconnect_change_identification', b'Change the identification type and number via momconnect app'), (b'admin_change_subscription', b'Change the message set and/or language of the specified subscription from admin')], max_length=255),
),
]
|
Add migration for admin change# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-07-18 14:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('changes', '0005_auto_20170601_1354'),
]
operations = [
migrations.AlterField(
model_name='change',
name='action',
field=models.CharField(choices=[(b'baby_switch', b'Change from pregnancy to baby messaging'), (b'pmtct_loss_switch', b'Change to loss messaging via pmtct app'), (b'pmtct_loss_optout', b'Optout due to loss via pmtct app'), (b'pmtct_nonloss_optout', b'Optout not due to loss via pmtct app'), (b'nurse_update_detail', b'Update nurseconnect detail'), (b'nurse_change_msisdn', b'Change nurseconnect msisdn'), (b'nurse_optout', b'Optout from nurseconnect'), (b'momconnect_loss_switch', b'Change to loss messaging via momconnect app'), (b'momconnect_loss_optout', b'Optout due to loss via momconnect app'), (b'momconnect_nonloss_optout', b'Optout not due to loss via momconnect app'), (b'momconnect_change_language', b'Change the language of the messages via momconnect app'), (b'momconnect_change_msisdn', b'Change the MSISDN to send messages to via momconnect app'), (b'momconnect_change_identification', b'Change the identification type and number via momconnect app'), (b'admin_change_subscription', b'Change the message set and/or language of the specified subscription from admin')], max_length=255),
),
]
|
<commit_before><commit_msg>Add migration for admin change<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2017-07-18 14:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('changes', '0005_auto_20170601_1354'),
]
operations = [
migrations.AlterField(
model_name='change',
name='action',
field=models.CharField(choices=[(b'baby_switch', b'Change from pregnancy to baby messaging'), (b'pmtct_loss_switch', b'Change to loss messaging via pmtct app'), (b'pmtct_loss_optout', b'Optout due to loss via pmtct app'), (b'pmtct_nonloss_optout', b'Optout not due to loss via pmtct app'), (b'nurse_update_detail', b'Update nurseconnect detail'), (b'nurse_change_msisdn', b'Change nurseconnect msisdn'), (b'nurse_optout', b'Optout from nurseconnect'), (b'momconnect_loss_switch', b'Change to loss messaging via momconnect app'), (b'momconnect_loss_optout', b'Optout due to loss via momconnect app'), (b'momconnect_nonloss_optout', b'Optout not due to loss via momconnect app'), (b'momconnect_change_language', b'Change the language of the messages via momconnect app'), (b'momconnect_change_msisdn', b'Change the MSISDN to send messages to via momconnect app'), (b'momconnect_change_identification', b'Change the identification type and number via momconnect app'), (b'admin_change_subscription', b'Change the message set and/or language of the specified subscription from admin')], max_length=255),
),
]
|
|
e07e436b461015365b2cbbdb96daa8bfc3ae31a4
|
{{cookiecutter.repo_name}}/config/urls.py
|
{{cookiecutter.repo_name}}/config/urls.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Comment the next two lines to disable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('', # noqa
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin (Comment the next line to disable the admin)
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = patterns('', # noqa
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
Remove admin.autodiscover() call, it's called automatically in 1.7+
|
Remove admin.autodiscover() call, it's called automatically in 1.7+
|
Python
|
bsd-3-clause
|
interaktiviti/cookiecutter-django,asyncee/cookiecutter-django,kappataumu/cookiecutter-django,Sushantgakhar/cookiecutter-django,hairychris/cookiecutter-django,Sushantgakhar/cookiecutter-django,thornomad/cookiecutter-django,stepmr/cookiecutter-django,javipalanca/cookiecutter-django,wldcordeiro/cookiecutter-django-essentials,pydanny/cookiecutter-django,janusnic/cookiecutter-django,b-kolodziej/cookiecutter-django,rtorr/cookiecutter-django,crdoconnor/cookiecutter-django,kaidokert/cookiecutter-django,ryankanno/cookiecutter-django,trungdong/cookiecutter-django,ingenioustechie/cookiecutter-django-openshift,luzfcb/cookiecutter-django,drxos/cookiecutter-django-dokku,thornomad/cookiecutter-django,thisjustin/cookiecutter-django,Nene-Padi/cookiecutter-django,thisjustin/cookiecutter-django,ad-m/cookiecutter-django,ingenioustechie/cookiecutter-django-openshift,aleprovencio/cookiecutter-django,chrisfranzen/cookiecutter-django,webyneter/cookiecutter-django,stepanovsh/project_template,hackultura/django-project-template,calculuscowboy/cookiecutter-django,IanLee1521/cookiecutter-django,Parbhat/cookiecutter-django-foundation,primoz-k/cookiecutter-django,gappsexperts/cookiecutter-django,yunti/cookiecutter-django,Sushantgakhar/cookiecutter-django,siauPatrick/cookiecutter-django,nunchaks/cookiecutter-django,andresgz/cookiecutter-django,gengue/django-new-marana,asyncee/cookiecutter-django,webspired/cookiecutter-django,ryankanno/cookiecutter-django,gappsexperts/cookiecutter-django,audreyr/cookiecutter-django,martinblech/cookiecutter-django,hackebrot/cookiecutter-django,bopo/cookiecutter-django,javipalanca/cookiecutter-django,javipalanca/cookiecutter-django,stepmr/cookiecutter-django,primoz-k/cookiecutter-django,janusnic/cookiecutter-django,ryankanno/cookiecutter-django,siauPatrick/cookiecutter-django,schacki/cookiecutter-django,kappataumu/cookiecutter-django,ad-m/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,stepanovsh/project_template,trungdong/cookiecutter-django,jondelmil/cookiecutter-django,the3ballsoft/django-new-marana,asyncee/cookiecutter-django,bopo/cookiecutter-django,Nene-Padi/cookiecutter-django,gappsexperts/cookiecutter-django,yehoshuk/cookiecutter-django,yehoshuk/cookiecutter-django,ujjwalwahi/cookiecutter-django,andela-ijubril/cookiecutter-django,IanLee1521/cookiecutter-django,andela-ijubril/cookiecutter-django,chrisfranzen/cookiecutter-django,mjhea0/cookiecutter-django,topwebmaster/cookiecutter-django,mistalaba/cookiecutter-django,janusnic/cookiecutter-django,HellerCommaA/cookiecutter-django,thornomad/cookiecutter-django,ddiazpinto/cookiecutter-django,pydanny/cookiecutter-django,kappataumu/cookiecutter-django,calculuscowboy/cookiecutter-django,andresgz/cookiecutter-django,martinblech/cookiecutter-django,chrisfranzen/cookiecutter-django,kaidokert/cookiecutter-django,ovidner/cookiecutter-django,webspired/cookiecutter-django,mjhea0/cookiecutter-django,b-kolodziej/cookiecutter-django,ddiazpinto/cookiecutter-django,ingenioustechie/cookiecutter-django-openshift,HellerCommaA/cookiecutter-django,rtorr/cookiecutter-django,ovidner/cookiecutter-django,topwebmaster/cookiecutter-django,wy123123/cookiecutter-django,IanLee1521/cookiecutter-django,kappataumu/cookiecutter-django,wldcordeiro/cookiecutter-django-essentials,Nene-Padi/cookiecutter-django,interaktiviti/cookiecutter-django,hackebrot/cookiecutter-django,wy123123/cookiecutter-django,audreyr/cookiecutter-django,stepmr/cookiecutter-django,thisjustin/cookiecutter-django,bopo/cookiecutter-django,HellerCommaA/cookiecutter-django,Sushantgakhar/cookiecutter-django,HandyCodeJob/hcj-django-temp,bogdal/cookiecutter-django,HellerCommaA/cookiecutter-django,webyneter/cookiecutter-django,hairychris/cookiecutter-django,rtorr/cookiecutter-django,audreyr/cookiecutter-django,interaktiviti/cookiecutter-django,the3ballsoft/django-new-marana,ad-m/cookiecutter-django,stepanovsh/project_template,luzfcb/cookiecutter-django,hairychris/cookiecutter-django,yehoshuk/cookiecutter-django,stepanovsh/project_template,rtorr/cookiecutter-django,mistalaba/cookiecutter-django,HandyCodeJob/hcj-django-temp,audreyr/cookiecutter-django,andela-ijubril/cookiecutter-django,nunchaks/cookiecutter-django,yunti/cookiecutter-django,trungdong/cookiecutter-django,wldcordeiro/cookiecutter-django-essentials,drxos/cookiecutter-django-dokku,HandyCodeJob/hcj-django-temp,wy123123/cookiecutter-django,asyncee/cookiecutter-django,ujjwalwahi/cookiecutter-django,drxos/cookiecutter-django-dokku,javipalanca/cookiecutter-django,schacki/cookiecutter-django,andresgz/cookiecutter-django,bogdal/cookiecutter-django,janusnic/cookiecutter-django,hackultura/django-project-template,IanLee1521/cookiecutter-django,bogdal/cookiecutter-django,kaidokert/cookiecutter-django,thisjustin/cookiecutter-django,wy123123/cookiecutter-django,martinblech/cookiecutter-django,gengue/django-new-marana,ingenioustechie/cookiecutter-django-openshift,chrisfranzen/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,gappsexperts/cookiecutter-django,ujjwalwahi/cookiecutter-django,topwebmaster/cookiecutter-django,hairychris/cookiecutter-django,ddiazpinto/cookiecutter-django,schacki/cookiecutter-django,martinblech/cookiecutter-django,webyneter/cookiecutter-django,jondelmil/cookiecutter-django,webspired/cookiecutter-django,hackultura/django-project-template,primoz-k/cookiecutter-django,aleprovencio/cookiecutter-django,Parbhat/cookiecutter-django-foundation,topwebmaster/cookiecutter-django,pydanny/cookiecutter-django,aeikenberry/cookiecutter-django-rest-babel,the3ballsoft/django-new-marana,nunchaks/cookiecutter-django,andresgz/cookiecutter-django,javipalanca/cookiecutter-django,ad-m/cookiecutter-django,crdoconnor/cookiecutter-django,ovidner/cookiecutter-django,crdoconnor/cookiecutter-django,ddiazpinto/cookiecutter-django,siauPatrick/cookiecutter-django,kaidokert/cookiecutter-django,Nene-Padi/cookiecutter-django,luzfcb/cookiecutter-django,hackebrot/cookiecutter-django,yunti/cookiecutter-django,HandyCodeJob/hcj-django-temp,ryankanno/cookiecutter-django,mjhea0/cookiecutter-django,aleprovencio/cookiecutter-django,thornomad/cookiecutter-django,pydanny/cookiecutter-django,stepanovsh/project_template,b-kolodziej/cookiecutter-django,mistalaba/cookiecutter-django,nunchaks/cookiecutter-django,ovidner/cookiecutter-django,jondelmil/cookiecutter-django,calculuscowboy/cookiecutter-django,gengue/django-new-marana,jondelmil/cookiecutter-django,webyneter/cookiecutter-django,aleprovencio/cookiecutter-django,mjhea0/cookiecutter-django,yunti/cookiecutter-django,bopo/cookiecutter-django,drxos/cookiecutter-django-dokku,crdoconnor/cookiecutter-django,mistalaba/cookiecutter-django,b-kolodziej/cookiecutter-django,interaktiviti/cookiecutter-django,webspired/cookiecutter-django,stepmr/cookiecutter-django,trungdong/cookiecutter-django,ujjwalwahi/cookiecutter-django,andela-ijubril/cookiecutter-django,hackebrot/cookiecutter-django,hackultura/django-project-template,aeikenberry/cookiecutter-django-rest-babel,Parbhat/cookiecutter-django-foundation,calculuscowboy/cookiecutter-django,Parbhat/cookiecutter-django-foundation,schacki/cookiecutter-django,luzfcb/cookiecutter-django,bogdal/cookiecutter-django,siauPatrick/cookiecutter-django,primoz-k/cookiecutter-django,yehoshuk/cookiecutter-django
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Comment the next two lines to disable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('', # noqa
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin (Comment the next line to disable the admin)
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
Remove admin.autodiscover() call, it's called automatically in 1.7+
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = patterns('', # noqa
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Comment the next two lines to disable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('', # noqa
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin (Comment the next line to disable the admin)
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<commit_msg>Remove admin.autodiscover() call, it's called automatically in 1.7+<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = patterns('', # noqa
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Comment the next two lines to disable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('', # noqa
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin (Comment the next line to disable the admin)
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
Remove admin.autodiscover() call, it's called automatically in 1.7+# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = patterns('', # noqa
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
<commit_before># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Comment the next two lines to disable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('', # noqa
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin (Comment the next line to disable the admin)
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
<commit_msg>Remove admin.autodiscover() call, it's called automatically in 1.7+<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = patterns('', # noqa
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("{{ cookiecutter.repo_name }}.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
7321bb9986ce1836ab65ac82f65576d6412494f2
|
auth0/v2/test/test_device_credentials.py
|
auth0/v2/test/test_device_credentials.py
|
import unittest
import mock
from ..device_credentials import DeviceCredentials
class TestDeviceCredentials(unittest.TestCase):
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.get(user_id='uid', client_id='cid', type='type')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/device-credentials', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true',
'user_id': 'uid',
'client_id': 'cid',
'type': 'type'})
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.create({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/device-credentials', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.delete('an-id')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/device-credentials/an-id',
)
|
Add unit tests for DeviceCredentials
|
Add unit tests for DeviceCredentials
|
Python
|
mit
|
auth0/auth0-python,auth0/auth0-python
|
Add unit tests for DeviceCredentials
|
import unittest
import mock
from ..device_credentials import DeviceCredentials
class TestDeviceCredentials(unittest.TestCase):
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.get(user_id='uid', client_id='cid', type='type')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/device-credentials', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true',
'user_id': 'uid',
'client_id': 'cid',
'type': 'type'})
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.create({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/device-credentials', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.delete('an-id')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/device-credentials/an-id',
)
|
<commit_before><commit_msg>Add unit tests for DeviceCredentials<commit_after>
|
import unittest
import mock
from ..device_credentials import DeviceCredentials
class TestDeviceCredentials(unittest.TestCase):
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.get(user_id='uid', client_id='cid', type='type')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/device-credentials', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true',
'user_id': 'uid',
'client_id': 'cid',
'type': 'type'})
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.create({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/device-credentials', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.delete('an-id')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/device-credentials/an-id',
)
|
Add unit tests for DeviceCredentialsimport unittest
import mock
from ..device_credentials import DeviceCredentials
class TestDeviceCredentials(unittest.TestCase):
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.get(user_id='uid', client_id='cid', type='type')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/device-credentials', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true',
'user_id': 'uid',
'client_id': 'cid',
'type': 'type'})
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.create({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/device-credentials', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.delete('an-id')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/device-credentials/an-id',
)
|
<commit_before><commit_msg>Add unit tests for DeviceCredentials<commit_after>import unittest
import mock
from ..device_credentials import DeviceCredentials
class TestDeviceCredentials(unittest.TestCase):
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.get(user_id='uid', client_id='cid', type='type')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/device-credentials', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true',
'user_id': 'uid',
'client_id': 'cid',
'type': 'type'})
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.create({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/device-credentials', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v2.device_credentials.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
c = DeviceCredentials(domain='domain', jwt_token='jwttoken')
c.delete('an-id')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/device-credentials/an-id',
)
|
|
274511db02b95c14c19564d1e0b3d30ccf1bf532
|
core/migrations/0011_auto_20150602_0128.py
|
core/migrations/0011_auto_20150602_0128.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0010_currentrate'),
]
operations = [
migrations.AlterModelOptions(
name='price',
options={'ordering': ['-datetime']},
),
]
|
Add migration for meta options
|
Add migration for meta options
|
Python
|
unlicense
|
kvikshaug/btc.kvikshaug.no,kvikshaug/btc.kvikshaug.no,kvikshaug/btc.kvikshaug.no,kvikshaug/btc.kvikshaug.no
|
Add migration for meta options
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0010_currentrate'),
]
operations = [
migrations.AlterModelOptions(
name='price',
options={'ordering': ['-datetime']},
),
]
|
<commit_before><commit_msg>Add migration for meta options<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0010_currentrate'),
]
operations = [
migrations.AlterModelOptions(
name='price',
options={'ordering': ['-datetime']},
),
]
|
Add migration for meta options# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0010_currentrate'),
]
operations = [
migrations.AlterModelOptions(
name='price',
options={'ordering': ['-datetime']},
),
]
|
<commit_before><commit_msg>Add migration for meta options<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0010_currentrate'),
]
operations = [
migrations.AlterModelOptions(
name='price',
options={'ordering': ['-datetime']},
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.