repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
ahmadio/edx-platform | refs/heads/master | common/djangoapps/external_auth/migrations/__init__.py | 12133432 | |
creativepsyco/python-phabricator | refs/heads/master | phabricator/base/__init__.py | 12133432 | |
WALR/taiga-back | refs/heads/master | taiga/stats/apps.py | 14 | # Copyright (C) 2015 Taiga Agile LLC <support@taiga.io>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.apps import AppConfig
from django.apps import apps
from django.conf import settings
from django.conf.urls import include, url
from .routers import router
class StatsAppConfig(AppConfig):
name = "taiga.stats"
verbose_name = "Stats"
def ready(self):
if settings.STATS_ENABLED:
from taiga.urls import urlpatterns
urlpatterns.append(url(r'^api/v1/', include(router.urls)))
|
attilahorvath/phantomjs | refs/heads/master | src/qt/qtwebkit/Tools/Scripts/webkitpy/performance_tests/perftestsrunner_integrationtest.py | 115 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Integration tests for run_perf_tests."""
import StringIO
import datetime
import json
import re
import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.port.driver import DriverOutput
from webkitpy.port.test import TestPort
from webkitpy.performance_tests.perftest import PerfTest
from webkitpy.performance_tests.perftestsrunner import PerfTestsRunner
class EventTargetWrapperTestData:
text = """Running 20 times
Ignoring warm-up run (1502)
1504
1505
1510
1504
1507
1509
1510
1487
1488
1472
1472
1488
1473
1472
1475
1487
1486
1486
1475
1471
Time:
values 1486, 1471, 1510, 1505, 1478, 1490 ms
avg 1490 ms
median 1488 ms
stdev 15.13935 ms
min 1471 ms
max 1510 ms
"""
output = """Running Bindings/event-target-wrapper.html (1 of 2)
RESULT Bindings: event-target-wrapper: Time= 1490.0 ms
median= 1488.0 ms, stdev= 14.11751 ms, min= 1471.0 ms, max= 1510.0 ms
Finished: 0.1 s
"""
results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}}
class SomeParserTestData:
text = """Running 20 times
Ignoring warm-up run (1115)
Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50861 ms
min 1080 ms
max 1120 ms
"""
output = """Running Parser/some-parser.html (2 of 2)
RESULT Parser: some-parser: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
Finished: 0.1 s
"""
results = {'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Parser/some-parser.html',
'metrics': {'Time': {'current': [[1080.0, 1120.0, 1095.0, 1101.0, 1104.0]] * 4}}}
class MemoryTestData:
text = """Running 20 times
Ignoring warm-up run (1115)
Time:
values 1080, 1120, 1095, 1101, 1104 ms
avg 1100 ms
median 1101 ms
stdev 14.50861 ms
min 1080 ms
max 1120 ms
JS Heap:
values 825000, 811000, 848000, 837000, 829000 bytes
avg 830000 bytes
median 829000 bytes
stdev 13784.04875 bytes
min 811000 bytes
max 848000 bytes
Malloc:
values 529000, 511000, 548000, 536000, 521000 bytes
avg 529000 bytes
median 529000 bytes
stdev 14124.44689 bytes
min 511000 bytes
max 548000 bytes
"""
output = """Running 1 tests
Running Parser/memory-test.html (1 of 1)
RESULT Parser: memory-test: Time= 1100.0 ms
median= 1101.0 ms, stdev= 13.31402 ms, min= 1080.0 ms, max= 1120.0 ms
RESULT Parser: memory-test: JSHeap= 830000.0 bytes
median= 829000.0 bytes, stdev= 12649.11064 bytes, min= 811000.0 bytes, max= 848000.0 bytes
RESULT Parser: memory-test: Malloc= 529000.0 bytes
median= 529000.0 bytes, stdev= 12961.48139 bytes, min= 511000.0 bytes, max= 548000.0 bytes
Finished: 0.1 s
"""
results = {'current': [[1080, 1120, 1095, 1101, 1104]] * 4}
js_heap_results = {'current': [[825000, 811000, 848000, 837000, 829000]] * 4}
malloc_results = {'current': [[529000, 511000, 548000, 536000, 521000]] * 4}
class TestDriver:
def run_test(self, driver_input, stop_when_done):
text = ''
timeout = False
crash = False
if driver_input.test_name.endswith('pass.html'):
text = SomeParserTestData.text
elif driver_input.test_name.endswith('timeout.html'):
timeout = True
elif driver_input.test_name.endswith('failed.html'):
text = None
elif driver_input.test_name.endswith('tonguey.html'):
text = 'we are not expecting an output from perf tests but RESULT blablabla'
elif driver_input.test_name.endswith('crash.html'):
crash = True
elif driver_input.test_name.endswith('event-target-wrapper.html'):
text = EventTargetWrapperTestData.text
elif driver_input.test_name.endswith('some-parser.html'):
text = SomeParserTestData.text
elif driver_input.test_name.endswith('memory-test.html'):
text = MemoryTestData.text
return DriverOutput(text, '', '', '', crash=crash, timeout=timeout)
def start(self):
"""do nothing"""
def stop(self):
"""do nothing"""
class MainTest(unittest.TestCase):
def _normalize_output(self, log):
return re.sub(r'(stdev=\s+\d+\.\d{5})\d+', r'\1', re.sub(r'Finished: [0-9\.]+ s', 'Finished: 0.1 s', log))
def _load_output_json(self, runner):
json_content = runner._host.filesystem.read_text_file(runner._output_json_path())
return json.loads(re.sub(r'("stdev":\s*\d+\.\d{5})\d+', r'\1', json_content))
def create_runner(self, args=[], driver_class=TestDriver):
options, parsed_args = PerfTestsRunner._parse_args(args)
test_port = TestPort(host=MockHost(), options=options)
test_port.create_driver = lambda worker_number=None, no_timeout=False: driver_class()
runner = PerfTestsRunner(args=args, port=test_port)
runner._host.filesystem.maybe_make_directory(runner._base_path, 'inspector')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Bindings')
runner._host.filesystem.maybe_make_directory(runner._base_path, 'Parser')
return runner, test_port
def run_test(self, test_name):
runner, port = self.create_runner()
tests = [PerfTest(port, test_name, runner._host.filesystem.join('some-dir', test_name))]
return runner._run_tests_set(tests) == 0
def test_run_passing_test(self):
self.assertTrue(self.run_test('pass.html'))
def test_run_silent_test(self):
self.assertFalse(self.run_test('silent.html'))
def test_run_failed_test(self):
self.assertFalse(self.run_test('failed.html'))
def test_run_tonguey_test(self):
self.assertFalse(self.run_test('tonguey.html'))
def test_run_timeout_test(self):
self.assertFalse(self.run_test('timeout.html'))
def test_run_crash_test(self):
self.assertFalse(self.run_test('crash.html'))
def _tests_for_runner(self, runner, test_names):
filesystem = runner._host.filesystem
tests = []
for test in test_names:
path = filesystem.join(runner._base_path, test)
dirname = filesystem.dirname(path)
tests.append(PerfTest(runner._port, test, path))
return tests
def test_run_test_set_kills_drt_per_run(self):
class TestDriverWithStopCount(TestDriver):
stop_count = 0
def stop(self):
TestDriverWithStopCount.stop_count += 1
runner, port = self.create_runner(driver_class=TestDriverWithStopCount)
tests = self._tests_for_runner(runner, ['inspector/pass.html', 'inspector/silent.html', 'inspector/failed.html',
'inspector/tonguey.html', 'inspector/timeout.html', 'inspector/crash.html'])
unexpected_result_count = runner._run_tests_set(tests)
self.assertEqual(TestDriverWithStopCount.stop_count, 9)
def test_run_test_set_for_parser_tests(self):
runner, port = self.create_runner()
tests = self._tests_for_runner(runner, ['Bindings/event-target-wrapper.html', 'Parser/some-parser.html'])
output = OutputCapture()
output.capture_output()
try:
unexpected_result_count = runner._run_tests_set(tests)
finally:
stdout, stderr, log = output.restore_output()
self.assertEqual(unexpected_result_count, 0)
self.assertEqual(self._normalize_output(log), EventTargetWrapperTestData.output + SomeParserTestData.output)
def test_run_memory_test(self):
runner, port = self.create_runner_and_setup_results_template()
runner._timestamp = 123456789
port.host.filesystem.write_text_file(runner._base_path + '/Parser/memory-test.html', 'some content')
output = OutputCapture()
output.capture_output()
try:
unexpected_result_count = runner.run()
finally:
stdout, stderr, log = output.restore_output()
self.assertEqual(unexpected_result_count, 0)
self.assertEqual(self._normalize_output(log), MemoryTestData.output + '\nMOCK: user.open_url: file://...\n')
parser_tests = self._load_output_json(runner)[0]['tests']['Parser']['tests']
self.assertEqual(parser_tests['memory-test']['metrics']['Time'], MemoryTestData.results)
self.assertEqual(parser_tests['memory-test']['metrics']['JSHeap'], MemoryTestData.js_heap_results)
self.assertEqual(parser_tests['memory-test']['metrics']['Malloc'], MemoryTestData.malloc_results)
def _test_run_with_json_output(self, runner, filesystem, upload_succeeds=False, results_shown=True, expected_exit_code=0, repeat=1, compare_logs=True):
filesystem.write_text_file(runner._base_path + '/Parser/some-parser.html', 'some content')
filesystem.write_text_file(runner._base_path + '/Bindings/event-target-wrapper.html', 'some content')
uploaded = [False]
def mock_upload_json(hostname, json_path, host_path=None):
# FIXME: Get rid of the hard-coded perf.webkit.org once we've completed the transition.
self.assertIn(hostname, ['some.host'])
self.assertIn(json_path, ['/mock-checkout/output.json'])
self.assertIn(host_path, [None, '/api/report'])
uploaded[0] = upload_succeeds
return upload_succeeds
runner._upload_json = mock_upload_json
runner._timestamp = 123456789
runner._utc_timestamp = datetime.datetime(2013, 2, 8, 15, 19, 37, 460000)
output_capture = OutputCapture()
output_capture.capture_output()
try:
self.assertEqual(runner.run(), expected_exit_code)
finally:
stdout, stderr, logs = output_capture.restore_output()
if not expected_exit_code and compare_logs:
expected_logs = ''
for i in xrange(repeat):
runs = ' (Run %d of %d)' % (i + 1, repeat) if repeat > 1 else ''
expected_logs += 'Running 2 tests%s\n' % runs + EventTargetWrapperTestData.output + SomeParserTestData.output
if results_shown:
expected_logs += 'MOCK: user.open_url: file://...\n'
self.assertEqual(self._normalize_output(logs), expected_logs)
self.assertEqual(uploaded[0], upload_succeeds)
return logs
_event_target_wrapper_and_inspector_results = {
"Bindings":
{"url": "http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings",
"tests": {"event-target-wrapper": EventTargetWrapperTestData.results}},
"Parser":
{"url": "http://trac.webkit.org/browser/trunk/PerformanceTests/Parser",
"tests": {"some-parser": SomeParserTestData.results}}}
def test_run_with_json_output(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
filesystem = port.host.filesystem
self.assertTrue(filesystem.isfile(runner._output_json_path()))
self.assertTrue(filesystem.isfile(filesystem.splitext(runner._output_json_path())[0] + '.html'))
def test_run_with_description(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--description', 'some description'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "description": "some description",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
def create_runner_and_setup_results_template(self, args=[]):
runner, port = self.create_runner(args)
filesystem = port.host.filesystem
filesystem.write_text_file(runner._base_path + '/resources/results-template.html',
'BEGIN<script src="%AbsolutePathToWebKitTrunk%/some.js"></script>'
'<script src="%AbsolutePathToWebKitTrunk%/other.js"></script><script>%PeformanceTestsResultsJSON%</script>END')
filesystem.write_text_file(runner._base_path + '/Dromaeo/resources/dromaeo/web/lib/jquery-1.6.4.js', 'jquery content')
return runner, port
def test_run_respects_no_results(self):
runner, port = self.create_runner(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--no-results'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, results_shown=False)
self.assertFalse(port.host.filesystem.isfile('/mock-checkout/output.json'))
def test_run_generates_json_by_default(self):
runner, port = self.create_runner_and_setup_results_template()
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
results_page_path = filesystem.splitext(output_json_path)[0] + '.html'
self.assertFalse(filesystem.isfile(output_json_path))
self.assertFalse(filesystem.isfile(results_page_path))
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
self.assertTrue(filesystem.isfile(output_json_path))
self.assertTrue(filesystem.isfile(results_page_path))
def test_run_merges_output_by_default(self):
runner, port = self.create_runner_and_setup_results_template()
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(self._load_output_json(runner), [{"previous": "results"}, {
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
def test_run_respects_reset_results(self):
runner, port = self.create_runner_and_setup_results_template(args=["--reset-results"])
filesystem = port.host.filesystem
output_json_path = runner._output_json_path()
filesystem.write_text_file(output_json_path, '[{"previous": "results"}]')
self._test_run_with_json_output(runner, port.host.filesystem)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
self.assertTrue(filesystem.isfile(filesystem.splitext(output_json_path)[0] + '.html'))
pass
def test_run_generates_and_show_results_page(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
page_shown = []
port.show_results_html_file = lambda path: page_shown.append(path)
filesystem = port.host.filesystem
self._test_run_with_json_output(runner, filesystem, results_shown=False)
expected_entry = {"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}
self.maxDiff = None
self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
self.assertEqual(self._load_output_json(runner), [expected_entry])
self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
'<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
self.assertEqual(page_shown[0], '/mock-checkout/output.html')
self._test_run_with_json_output(runner, filesystem, results_shown=False)
self.assertEqual(runner._output_json_path(), '/mock-checkout/output.json')
self.assertEqual(self._load_output_json(runner), [expected_entry, expected_entry])
self.assertEqual(filesystem.read_text_file('/mock-checkout/output.html'),
'BEGIN<script src="/test.checkout/some.js"></script><script src="/test.checkout/other.js"></script>'
'<script>%s</script>END' % port.host.filesystem.read_text_file(runner._output_json_path()))
def test_run_respects_no_show_results(self):
show_results_html_file = lambda path: page_shown.append(path)
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
page_shown = []
port.show_results_html_file = show_results_html_file
self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
self.assertEqual(page_shown[0], '/mock-checkout/output.html')
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--no-show-results'])
page_shown = []
port.show_results_html_file = show_results_html_file
self._test_run_with_json_output(runner, port.host.filesystem, results_shown=False)
self.assertEqual(page_shown, [])
def test_run_with_bad_output_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json'])
port.host.filesystem.write_text_file('/mock-checkout/output.json', 'bad json')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
port.host.filesystem.write_text_file('/mock-checkout/output.json', '{"another bad json": "1"}')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_MERGE)
def test_run_with_slave_config_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value"}')
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}, "builderKey": "value"}])
def test_run_with_bad_slave_config_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--slave-config-json-path=/mock-checkout/slave-config.json', '--test-results-server=some.host'])
logs = self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
self.assertTrue('Missing slave configuration JSON file: /mock-checkout/slave-config.json' in logs)
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', 'bad json')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '["another bad json"]')
self._test_run_with_json_output(runner, port.host.filesystem, expected_exit_code=PerfTestsRunner.EXIT_CODE_BAD_SOURCE_JSON)
def test_run_with_multiple_repositories(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host'])
port.repository_paths = lambda: [('webkit', '/mock-checkout'), ('some', '/mock-checkout/some')]
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
self.assertEqual(self._load_output_json(runner), [{
"buildTime": "2013-02-08T15:19:37.460000", "tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"webkit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"},
"some": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
def test_run_with_upload_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertEqual(generated_json[0]['platform'], 'platform1')
self.assertEqual(generated_json[0]['builderName'], 'builder1')
self.assertEqual(generated_json[0]['buildNumber'], 123)
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=False, expected_exit_code=PerfTestsRunner.EXIT_CODE_FAILED_UPLOADING)
def test_run_with_upload_json_should_generate_perf_webkit_json(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server', 'some.host', '--platform', 'platform1', '--builder-name', 'builder1', '--build-number', '123',
'--slave-config-json-path=/mock-checkout/slave-config.json'])
port.host.filesystem.write_text_file('/mock-checkout/slave-config.json', '{"key": "value1"}')
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertTrue(isinstance(generated_json, list))
self.assertEqual(len(generated_json), 1)
output = generated_json[0]
self.maxDiff = None
self.assertEqual(output['platform'], 'platform1')
self.assertEqual(output['buildNumber'], 123)
self.assertEqual(output['buildTime'], '2013-02-08T15:19:37.460000')
self.assertEqual(output['builderName'], 'builder1')
self.assertEqual(output['builderKey'], 'value1')
self.assertEqual(output['revisions'], {'WebKit': {'revision': '5678', 'timestamp': '2013-02-01 08:48:05 +0000'}})
self.assertEqual(output['tests'].keys(), ['Bindings', 'Parser'])
self.assertEqual(sorted(output['tests']['Bindings'].keys()), ['tests', 'url'])
self.assertEqual(output['tests']['Bindings']['url'], 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings')
self.assertEqual(output['tests']['Bindings']['tests'].keys(), ['event-target-wrapper'])
self.assertEqual(output['tests']['Bindings']['tests']['event-target-wrapper'], {
'url': 'http://trac.webkit.org/browser/trunk/PerformanceTests/Bindings/event-target-wrapper.html',
'metrics': {'Time': {'current': [[1486.0, 1471.0, 1510.0, 1505.0, 1478.0, 1490.0]] * 4}}})
def test_run_with_repeat(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-results-server=some.host', '--repeat', '5'])
self._test_run_with_json_output(runner, port.host.filesystem, upload_succeeds=True, repeat=5)
self.assertEqual(self._load_output_json(runner), [
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}},
{"buildTime": "2013-02-08T15:19:37.460000",
"tests": self._event_target_wrapper_and_inspector_results,
"revisions": {"WebKit": {"timestamp": "2013-02-01 08:48:05 +0000", "revision": "5678"}}}])
def test_run_with_test_runner_count(self):
runner, port = self.create_runner_and_setup_results_template(args=['--output-json-path=/mock-checkout/output.json',
'--test-runner-count=3'])
self._test_run_with_json_output(runner, port.host.filesystem, compare_logs=False)
generated_json = json.loads(port.host.filesystem.files['/mock-checkout/output.json'])
self.assertTrue(isinstance(generated_json, list))
self.assertEqual(len(generated_json), 1)
output = generated_json[0]['tests']['Bindings']['tests']['event-target-wrapper']['metrics']['Time']['current']
self.assertEqual(len(output), 3)
expectedMetrics = EventTargetWrapperTestData.results['metrics']['Time']['current'][0]
for metrics in output:
self.assertEqual(metrics, expectedMetrics)
|
FlipperPA/wagtailpress | refs/heads/master | wagtailpress/migrations/0003_config_site.py | 1 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-03 22:17
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0023_alter_page_revision_on_delete_behaviour'),
('wagtailpress', '0002_auto_20160228_0014'),
]
operations = [
migrations.AddField(
model_name='config',
name='site',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='site', to='wagtailcore.Site'),
),
]
|
CloverHealth/airflow | refs/heads/clover | tests/models.py | 2 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import pendulum
import unittest
import time
import six
import re
import urllib
import textwrap
import inspect
from airflow import configuration, models, settings, AirflowException
from airflow.exceptions import AirflowDagCycleException, AirflowSkipException
from airflow.jobs import BackfillJob
from airflow.models import DAG, TaskInstance as TI
from airflow.models import DagRun
from airflow.models import State as ST
from airflow.models import DagModel, DagRun, DagStat
from airflow.models import clear_task_instances
from airflow.models import XCom
from airflow.models import Connection
from airflow.jobs import LocalTaskJob
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import ShortCircuitOperator
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils import timezone
from airflow.utils.weight_rule import WeightRule
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from mock import patch, ANY
from parameterized import parameterized
from tempfile import NamedTemporaryFile
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class DagTest(unittest.TestCase):
def test_parms_not_passed_is_empty_dict(self):
"""
Test that when 'params' is _not_ passed to a new Dag, that the params
attribute is set to an empty dictionary.
"""
dag = models.DAG('test-dag')
self.assertEqual(dict, type(dag.params))
self.assertEqual(0, len(dag.params))
def test_params_passed_and_params_in_default_args_no_override(self):
"""
Test that when 'params' exists as a key passed to the default_args dict
in addition to params being passed explicitly as an argument to the
dag, that the 'params' key of the default_args dict is merged with the
dict of the params argument.
"""
params1 = {'parameter1': 1}
params2 = {'parameter2': 2}
dag = models.DAG('test-dag',
default_args={'params': params1},
params=params2)
params_combined = params1.copy()
params_combined.update(params2)
self.assertEqual(params_combined, dag.params)
def test_dag_as_context_manager(self):
"""
Test DAG as a context manager.
When used as a context manager, Operators are automatically added to
the DAG (unless they specifiy a different DAG)
"""
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
dag2 = DAG(
'dag2',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner2'})
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2', dag=dag2)
self.assertIs(op1.dag, dag)
self.assertEqual(op1.owner, 'owner1')
self.assertIs(op2.dag, dag2)
self.assertEqual(op2.owner, 'owner2')
with dag2:
op3 = DummyOperator(task_id='op3')
self.assertIs(op3.dag, dag2)
self.assertEqual(op3.owner, 'owner2')
with dag:
with dag2:
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
self.assertIs(op4.dag, dag2)
self.assertIs(op5.dag, dag)
self.assertEqual(op4.owner, 'owner2')
self.assertEqual(op5.owner, 'owner1')
with DAG('creating_dag_in_cm', start_date=DEFAULT_DATE) as dag:
DummyOperator(task_id='op6')
self.assertEqual(dag.dag_id, 'creating_dag_in_cm')
self.assertEqual(dag.tasks[0].task_id, 'op6')
def test_dag_topological_sort(self):
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
topological_list = dag.topological_sort()
logging.info(topological_list)
tasks = [op2, op3, op4]
self.assertTrue(topological_list[0] in tasks)
tasks.remove(topological_list[0])
self.assertTrue(topological_list[1] in tasks)
tasks.remove(topological_list[1])
self.assertTrue(topological_list[2] in tasks)
tasks.remove(topological_list[2])
self.assertTrue(topological_list[3] == op1)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# C -> (A u B) -> D
# C -> E
# ordered: E | D, A | B, C
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op5 = DummyOperator(task_id='E')
op1.set_downstream(op3)
op2.set_downstream(op3)
op1.set_upstream(op4)
op2.set_upstream(op4)
op5.set_downstream(op3)
topological_list = dag.topological_sort()
logging.info(topological_list)
set1 = [op4, op5]
self.assertTrue(topological_list[0] in set1)
set1.remove(topological_list[0])
set2 = [op1, op2]
set2.extend(set1)
self.assertTrue(topological_list[1] in set2)
set2.remove(topological_list[1])
self.assertTrue(topological_list[2] in set2)
set2.remove(topological_list[2])
self.assertTrue(topological_list[3] in set2)
self.assertTrue(topological_list[4] == op3)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertEquals(tuple(), dag.topological_sort())
def test_dag_none_default_args_start_date(self):
"""
Tests if a start_date of None in default_args
works.
"""
dag = DAG('DAG', default_args={'start_date': None})
self.assertEqual(dag.timezone, settings.TIMEZONE)
def test_dag_task_priority_weight_total(self):
width = 5
depth = 5
weight = 5
pattern = re.compile('stage(\\d*).(\\d*)')
# Fully connected parallel tasks. i.e. every task at each parallel
# stage is dependent on every task in the previous stage.
# Default weight should be calculated using downstream descendants
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = ((depth - (task_depth + 1)) * width + 1) * weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Same test as above except use 'upstream' for weight calculation
weight = 3
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight,
weight_rule=WeightRule.UPSTREAM)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = ((task_depth) * width + 1) * weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Same test as above except use 'absolute' for weight calculation
weight = 10
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight,
weight_rule=WeightRule.ABSOLUTE)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Test if we enter an invalid weight rule
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
with self.assertRaises(AirflowException):
DummyOperator(task_id='should_fail', weight_rule='no rule')
def test_get_num_task_instances(self):
test_dag_id = 'test_get_num_task_instances_dag'
test_task_id = 'task_1'
test_dag = DAG(dag_id=test_dag_id, start_date=DEFAULT_DATE)
test_task = DummyOperator(task_id=test_task_id, dag=test_dag)
ti1 = TI(task=test_task, execution_date=DEFAULT_DATE)
ti1.state = None
ti2 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti2.state = State.RUNNING
ti3 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=2))
ti3.state = State.QUEUED
ti4 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=3))
ti4.state = State.RUNNING
session = settings.Session()
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(0, DAG.get_num_task_instances(test_dag_id, ['fakename'],
session=session))
self.assertEqual(4, DAG.get_num_task_instances(test_dag_id, [test_task_id],
session=session))
self.assertEqual(4, DAG.get_num_task_instances(test_dag_id,
['fakename', test_task_id], session=session))
self.assertEqual(1, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[None], session=session))
self.assertEqual(2, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[State.RUNNING], session=session))
self.assertEqual(3, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[None, State.RUNNING], session=session))
self.assertEqual(4, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[None, State.QUEUED, State.RUNNING], session=session))
session.close()
def test_render_template_field(self):
"""Tests if render_template from a field works"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE)
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', '{{ foo }}', dict(foo='bar'))
self.assertEqual(result, 'bar')
def test_render_template_field_macro(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_macros = dict(foo='bar'))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', '{{ foo }}', dict())
self.assertEqual(result, 'bar')
def test_render_template_numeric_field(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_macros=dict(foo='bar'))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', 1, dict())
self.assertEqual(result, 1)
def test_user_defined_filters(self):
def jinja_udf(name):
return 'Hello %s' % name
dag = models.DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_filters=dict(hello=jinja_udf))
jinja_env = dag.get_template_env()
self.assertIn('hello', jinja_env.filters)
self.assertEqual(jinja_env.filters['hello'], jinja_udf)
def test_render_template_field_filter(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
def jinja_udf(name):
return 'Hello %s' %name
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_filters = dict(hello=jinja_udf))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', "{{ 'world' | hello}}", dict())
self.assertEqual(result, 'Hello world')
def test_cycle(self):
# test empty
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertFalse(dag.test_cycle())
# test single task
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
opA = DummyOperator(task_id='A')
self.assertFalse(dag.test_cycle())
# test no cycle
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C
# B -> D
# E -> F
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opF = DummyOperator(task_id='F')
opA.set_downstream(opB)
opB.set_downstream(opC)
opB.set_downstream(opD)
opE.set_downstream(opF)
self.assertFalse(dag.test_cycle())
# test self loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> A
with dag:
opA = DummyOperator(task_id='A')
opA.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# test downstream self loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C -> D -> E -> E
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opA.set_downstream(opB)
opB.set_downstream(opC)
opC.set_downstream(opD)
opD.set_downstream(opE)
opE.set_downstream(opE)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# large loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C -> D -> E -> A
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opA.set_downstream(opB)
opB.set_downstream(opC)
opC.set_downstream(opD)
opD.set_downstream(opE)
opE.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# test arbitrary loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# E-> A -> B -> F -> A
# -> C -> F
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opF = DummyOperator(task_id='F')
opA.set_downstream(opB)
opA.set_downstream(opC)
opE.set_downstream(opA)
opC.set_downstream(opF)
opB.set_downstream(opF)
opF.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
class DagStatTest(unittest.TestCase):
def test_dagstats_crud(self):
DagStat.create(dag_id='test_dagstats_crud')
session = settings.Session()
qry = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud')
self.assertEqual(len(qry.all()), len(State.dag_states))
DagStat.set_dirty(dag_id='test_dagstats_crud')
res = qry.all()
for stat in res:
self.assertTrue(stat.dirty)
# create missing
DagStat.set_dirty(dag_id='test_dagstats_crud_2')
qry2 = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud_2')
self.assertEqual(len(qry2.all()), len(State.dag_states))
dag = DAG(
'test_dagstats_crud',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='A')
now = timezone.utcnow()
dr = dag.create_dagrun(
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.FAILED,
external_trigger=False,
)
DagStat.update(dag_ids=['test_dagstats_crud'])
res = qry.all()
for stat in res:
if stat.state == State.FAILED:
self.assertEqual(stat.count, 1)
else:
self.assertEqual(stat.count, 0)
DagStat.update()
res = qry2.all()
for stat in res:
self.assertFalse(stat.dirty)
class DagRunTest(unittest.TestCase):
def create_dag_run(self, dag,
state=State.RUNNING,
task_states=None,
execution_date=None,
is_backfill=False,
):
now = timezone.utcnow()
if execution_date is None:
execution_date = now
if is_backfill:
run_id = BackfillJob.ID_PREFIX + now.isoformat()
else:
run_id = 'manual__' + now.isoformat()
dag_run = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
start_date=now,
state=state,
external_trigger=False,
)
if task_states is not None:
session = settings.Session()
for task_id, state in task_states.items():
ti = dag_run.get_task_instance(task_id)
ti.set_state(state, session)
session.close()
return dag_run
def test_clear_task_instances_for_backfill_dagrun(self):
now = timezone.utcnow()
session = settings.Session()
dag_id = 'test_clear_task_instances_for_backfill_dagrun'
dag = DAG(dag_id=dag_id, start_date=now)
self.create_dag_run(dag, execution_date=now, is_backfill=True)
task0 = DummyOperator(task_id='backfill_task_0', owner='test', dag=dag)
ti0 = TI(task=task0, execution_date=now)
ti0.run()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
ti0.refresh_from_db()
dr0 = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == now
).first()
self.assertEquals(dr0.state, State.RUNNING)
def test_id_for_date(self):
run_id = models.DagRun.id_for_date(
timezone.datetime(2015, 1, 2, 3, 4, 5, 6))
self.assertEqual(
'scheduled__2015-01-02T03:04:05', run_id,
'Generated run_id did not match expectations: {0}'.format(run_id))
def test_dagrun_find(self):
session = settings.Session()
now = timezone.utcnow()
dag_id1 = "test_dagrun_find_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id1,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=True,
)
session.add(dag_run)
dag_id2 = "test_dagrun_find_not_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id2,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
self.assertEqual(1, len(models.DagRun.find(dag_id=dag_id1, external_trigger=True)))
self.assertEqual(0, len(models.DagRun.find(dag_id=dag_id1, external_trigger=False)))
self.assertEqual(0, len(models.DagRun.find(dag_id=dag_id2, external_trigger=True)))
self.assertEqual(1, len(models.DagRun.find(dag_id=dag_id2, external_trigger=False)))
def test_dagrun_success_when_all_skipped(self):
"""
Tests that a DAG run succeeds when all tasks are skipped
"""
dag = DAG(
dag_id='test_dagrun_success_when_all_skipped',
start_date=timezone.datetime(2017, 1, 1)
)
dag_task1 = ShortCircuitOperator(
task_id='test_short_circuit_false',
dag=dag,
python_callable=lambda: False)
dag_task2 = DummyOperator(
task_id='test_state_skipped1',
dag=dag)
dag_task3 = DummyOperator(
task_id='test_state_skipped2',
dag=dag)
dag_task1.set_downstream(dag_task2)
dag_task2.set_downstream(dag_task3)
initial_task_states = {
'test_short_circuit_false': State.SUCCESS,
'test_state_skipped1': State.SKIPPED,
'test_state_skipped2': State.SKIPPED,
}
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.SUCCESS, updated_dag_state)
def test_dagrun_success_conditions(self):
session = settings.Session()
dag = DAG(
'test_dagrun_success_conditions',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_success_conditions',
state=State.RUNNING,
execution_date=now,
start_date=now)
# op1 = root
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op3 = dr.get_task_instance(task_id=op3.task_id)
ti_op4 = dr.get_task_instance(task_id=op4.task_id)
# root is successful, but unfinished tasks
state = dr.update_state()
self.assertEqual(State.RUNNING, state)
# one has failed, but root is successful
ti_op2.set_state(state=State.FAILED, session=session)
ti_op3.set_state(state=State.SUCCESS, session=session)
ti_op4.set_state(state=State.SUCCESS, session=session)
state = dr.update_state()
self.assertEqual(State.SUCCESS, state)
def test_dagrun_deadlock(self):
session = settings.Session()
dag = DAG(
'text_dagrun_deadlock',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op2.trigger_rule = TriggerRule.ONE_FAILED
op2.set_upstream(op1)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_deadlock',
state=State.RUNNING,
execution_date=now,
start_date=now)
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op2.set_state(state=State.NONE, session=session)
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
ti_op2.set_state(state=State.NONE, session=session)
op2.trigger_rule = 'invalid'
dr.update_state()
self.assertEqual(dr.state, State.FAILED)
def test_dagrun_no_deadlock(self):
session = settings.Session()
dag = DAG('test_dagrun_no_deadlock',
start_date=DEFAULT_DATE)
with dag:
op1 = DummyOperator(task_id='dop', depends_on_past=True)
op2 = DummyOperator(task_id='tc', task_concurrency=1)
dag.clear()
dr = dag.create_dagrun(run_id='test_dagrun_no_deadlock_1',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
dr2 = dag.create_dagrun(run_id='test_dagrun_no_deadlock_2',
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(days=1),
start_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti1_op1 = dr.get_task_instance(task_id='dop')
ti2_op1 = dr2.get_task_instance(task_id='dop')
ti2_op1 = dr.get_task_instance(task_id='tc')
ti2_op2 = dr.get_task_instance(task_id='tc')
ti1_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
ti2_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
def test_dagrun_success_callback(self):
def on_success_callable(context):
self.assertEqual(
context['dag_run'].dag_id,
'test_dagrun_success_callback'
)
dag = DAG(
dag_id='test_dagrun_success_callback',
start_date=datetime.datetime(2017, 1, 1),
on_success_callback=on_success_callable,
)
dag_task1 = DummyOperator(
task_id='test_state_succeeded1',
dag=dag)
dag_task2 = DummyOperator(
task_id='test_state_succeeded2',
dag=dag)
dag_task1.set_downstream(dag_task2)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_succeeded2': State.SUCCESS,
}
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.SUCCESS, updated_dag_state)
def test_dagrun_failure_callback(self):
def on_failure_callable(context):
self.assertEqual(
context['dag_run'].dag_id,
'test_dagrun_failure_callback'
)
dag = DAG(
dag_id='test_dagrun_failure_callback',
start_date=datetime.datetime(2017, 1, 1),
on_failure_callback=on_failure_callable,
)
dag_task1 = DummyOperator(
task_id='test_state_succeeded1',
dag=dag)
dag_task2 = DummyOperator(
task_id='test_state_failed2',
dag=dag)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_failed2': State.FAILED,
}
dag_task1.set_downstream(dag_task2)
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.FAILED, updated_dag_state)
def test_get_task_instance_on_empty_dagrun(self):
"""
Make sure that a proper value is returned when a dagrun has no task instances
"""
dag = DAG(
dag_id='test_get_task_instance_on_empty_dagrun',
start_date=timezone.datetime(2017, 1, 1)
)
dag_task1 = ShortCircuitOperator(
task_id='test_short_circuit_false',
dag=dag,
python_callable=lambda: False)
session = settings.Session()
now = timezone.utcnow()
# Don't use create_dagrun since it will create the task instances too which we
# don't want
dag_run = models.DagRun(
dag_id=dag.dag_id,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
ti = dag_run.get_task_instance('test_short_circuit_false')
self.assertEqual(None, ti)
def test_get_latest_runs(self):
session = settings.Session()
dag = DAG(
dag_id='test_latest_runs_1',
start_date=DEFAULT_DATE)
dag_1_run_1 = self.create_dag_run(dag,
execution_date=timezone.datetime(2015, 1, 1))
dag_1_run_2 = self.create_dag_run(dag,
execution_date=timezone.datetime(2015, 1, 2))
dagruns = models.DagRun.get_latest_runs(session)
session.close()
for dagrun in dagruns:
if dagrun.dag_id == 'test_latest_runs_1':
self.assertEqual(dagrun.execution_date, timezone.datetime(2015, 1, 2))
def test_is_backfill(self):
dag = DAG(dag_id='test_is_backfill', start_date=DEFAULT_DATE)
dagrun = self.create_dag_run(dag, execution_date=DEFAULT_DATE)
dagrun.run_id = BackfillJob.ID_PREFIX + '_sfddsffds'
dagrun2 = self.create_dag_run(dag, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(dagrun.is_backfill)
self.assertFalse(dagrun2.is_backfill)
def test_removed_task_instances_can_be_restored(self):
def with_all_tasks_removed(dag):
return DAG(dag_id=dag.dag_id, start_date=dag.start_date)
dag = DAG('test_task_restoration', start_date=DEFAULT_DATE)
dag.add_task(DummyOperator(task_id='flaky_task', owner='test'))
dagrun = self.create_dag_run(dag)
flaky_ti = dagrun.get_task_instances()[0]
self.assertEquals('flaky_task', flaky_ti.task_id)
self.assertEquals(State.NONE, flaky_ti.state)
dagrun.dag = with_all_tasks_removed(dag)
dagrun.verify_integrity()
flaky_ti.refresh_from_db()
self.assertEquals(State.REMOVED, flaky_ti.state)
dagrun.dag.add_task(DummyOperator(task_id='flaky_task', owner='test'))
dagrun.verify_integrity()
flaky_ti.refresh_from_db()
self.assertEquals(State.NONE, flaky_ti.state)
class DagBagTest(unittest.TestCase):
def test_get_existing_dag(self):
"""
test that were're able to parse some example DAGs and retrieve them
"""
dagbag = models.DagBag(include_examples=True)
some_expected_dag_ids = ["example_bash_operator",
"example_branch_operator"]
for dag_id in some_expected_dag_ids:
dag = dagbag.get_dag(dag_id)
self.assertIsNotNone(dag)
self.assertEqual(dag_id, dag.dag_id)
self.assertGreaterEqual(dagbag.size(), 7)
def test_get_non_existing_dag(self):
"""
test that retrieving a non existing dag id returns None without crashing
"""
dagbag = models.DagBag(include_examples=True)
non_existing_dag_id = "non_existing_dag_id"
self.assertIsNone(dagbag.get_dag(non_existing_dag_id))
def test_process_file_that_contains_multi_bytes_char(self):
"""
test that we're able to parse file that contains multi-byte char
"""
f = NamedTemporaryFile()
f.write('\u3042'.encode('utf8')) # write multi-byte char (hiragana)
f.flush()
dagbag = models.DagBag(include_examples=True)
self.assertEqual([], dagbag.process_file(f.name))
def test_zip_skip_log(self):
"""
test the loading of a DAG from within a zip file that skips another file because
it doesn't have "airflow" and "DAG"
"""
from mock import Mock
with patch('airflow.models.DagBag.log') as log_mock:
log_mock.info = Mock()
test_zip_path = os.path.join(TEST_DAGS_FOLDER, "test_zip.zip")
dagbag = models.DagBag(dag_folder=test_zip_path, include_examples=False)
self.assertTrue(dagbag.has_logged)
log_mock.info.assert_any_call("File %s assumed to contain no DAGs. Skipping.",
test_zip_path)
def test_zip(self):
"""
test the loading of a DAG within a zip file that includes dependencies
"""
dagbag = models.DagBag()
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, "test_zip.zip"))
self.assertTrue(dagbag.get_dag("test_zip_dag"))
@patch.object(DagModel,'get_current')
def test_get_dag_without_refresh(self, mock_dagmodel):
"""
Test that, once a DAG is loaded, it doesn't get refreshed again if it
hasn't been expired.
"""
dag_id = 'example_bash_operator'
mock_dagmodel.return_value = DagModel()
mock_dagmodel.return_value.last_expired = None
mock_dagmodel.return_value.fileloc = 'foo'
class TestDagBag(models.DagBag):
process_file_calls = 0
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
if 'example_bash_operator.py' == os.path.basename(filepath):
TestDagBag.process_file_calls += 1
super(TestDagBag, self).process_file(filepath, only_if_updated, safe_mode)
dagbag = TestDagBag(include_examples=True)
processed_files = dagbag.process_file_calls
# Should not call process_file agani, since it's already loaded during init.
self.assertEqual(1, dagbag.process_file_calls)
self.assertIsNotNone(dagbag.get_dag(dag_id))
self.assertEqual(1, dagbag.process_file_calls)
def test_get_dag_fileloc(self):
"""
Test that fileloc is correctly set when we load example DAGs,
specifically SubDAGs.
"""
dagbag = models.DagBag(include_examples=True)
expected = {
'example_bash_operator': 'example_bash_operator.py',
'example_subdag_operator': 'example_subdag_operator.py',
'example_subdag_operator.section-1': 'subdags/subdag.py'
}
for dag_id, path in expected.items():
dag = dagbag.get_dag(dag_id)
self.assertTrue(
dag.fileloc.endswith('airflow/example_dags/' + path))
def process_dag(self, create_dag):
"""
Helper method to process a file generated from the input create_dag function.
"""
# write source to file
source = textwrap.dedent(''.join(
inspect.getsource(create_dag).splitlines(True)[1:-1]))
f = NamedTemporaryFile()
f.write(source.encode('utf8'))
f.flush()
dagbag = models.DagBag(include_examples=False)
found_dags = dagbag.process_file(f.name)
return (dagbag, found_dags, f.name)
def validate_dags(self, expected_parent_dag, actual_found_dags, actual_dagbag,
should_be_found=True):
expected_dag_ids = list(map(lambda dag: dag.dag_id, expected_parent_dag.subdags))
expected_dag_ids.append(expected_parent_dag.dag_id)
actual_found_dag_ids = list(map(lambda dag: dag.dag_id, actual_found_dags))
for dag_id in expected_dag_ids:
actual_dagbag.log.info('validating %s' % dag_id)
self.assertEquals(
dag_id in actual_found_dag_ids, should_be_found,
'dag "%s" should %shave been found after processing dag "%s"' %
(dag_id, '' if should_be_found else 'not ', expected_parent_dag.dag_id)
)
self.assertEquals(
dag_id in actual_dagbag.dags, should_be_found,
'dag "%s" should %sbe in dagbag.dags after processing dag "%s"' %
(dag_id, '' if should_be_found else 'not ', expected_parent_dag.dag_id)
)
def test_load_subdags(self):
# Define Dag to load
def standard_subdag():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'master'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# master:
# A -> opSubDag_0
# master.opsubdag_0:
# -> subdag_0.task
# A -> opSubDag_1
# master.opsubdag_1:
# -> subdag_1.task
with dag:
def subdag_0():
subdag_0 = DAG('master.opSubdag_0', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_0.task', dag=subdag_0)
return subdag_0
def subdag_1():
subdag_1 = DAG('master.opSubdag_1', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_1.task', dag=subdag_1)
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = standard_subdag()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 2)
# Perform processing dag
dagbag, found_dags, _ = self.process_dag(standard_subdag)
# Validate correctness
# all dags from testDag should be listed
self.validate_dags(testDag, found_dags, dagbag)
# Define Dag to load
def nested_subdags():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'master'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# master:
# A -> opSubdag_0
# master.opSubdag_0:
# -> opSubDag_A
# master.opSubdag_0.opSubdag_A:
# -> subdag_A.task
# -> opSubdag_B
# master.opSubdag_0.opSubdag_B:
# -> subdag_B.task
# A -> opSubdag_1
# master.opSubdag_1:
# -> opSubdag_C
# master.opSubdag_1.opSubdag_C:
# -> subdag_C.task
# -> opSubDag_D
# master.opSubdag_1.opSubdag_D:
# -> subdag_D.task
with dag:
def subdag_A():
subdag_A = DAG(
'master.opSubdag_0.opSubdag_A', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_A.task', dag=subdag_A)
return subdag_A
def subdag_B():
subdag_B = DAG(
'master.opSubdag_0.opSubdag_B', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_B.task', dag=subdag_B)
return subdag_B
def subdag_C():
subdag_C = DAG(
'master.opSubdag_1.opSubdag_C', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_C.task', dag=subdag_C)
return subdag_C
def subdag_D():
subdag_D = DAG(
'master.opSubdag_1.opSubdag_D', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_D.task', dag=subdag_D)
return subdag_D
def subdag_0():
subdag_0 = DAG('master.opSubdag_0', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_A', dag=subdag_0, subdag=subdag_A())
SubDagOperator(task_id='opSubdag_B', dag=subdag_0, subdag=subdag_B())
return subdag_0
def subdag_1():
subdag_1 = DAG('master.opSubdag_1', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_C', dag=subdag_1, subdag=subdag_C())
SubDagOperator(task_id='opSubdag_D', dag=subdag_1, subdag=subdag_D())
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = nested_subdags()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 6)
# Perform processing dag
dagbag, found_dags, _ = self.process_dag(nested_subdags)
# Validate correctness
# all dags from testDag should be listed
self.validate_dags(testDag, found_dags, dagbag)
def test_skip_cycle_dags(self):
"""
Don't crash when loading an invalid (contains a cycle) DAG file.
Don't load the dag into the DagBag either
"""
# Define Dag to load
def basic_cycle():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
import datetime
DAG_NAME = 'cycle_dag'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# A -> A
with dag:
opA = DummyOperator(task_id='A')
opA.set_downstream(opA)
return dag
testDag = basic_cycle()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 0)
# Perform processing dag
dagbag, found_dags, file_path = self.process_dag(basic_cycle)
# #Validate correctness
# None of the dags should be found
self.validate_dags(testDag, found_dags, dagbag, should_be_found=False)
self.assertIn(file_path, dagbag.import_errors)
# Define Dag to load
def nested_subdag_cycle():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'nested_cycle'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# cycle:
# A -> opSubdag_0
# cycle.opSubdag_0:
# -> opSubDag_A
# cycle.opSubdag_0.opSubdag_A:
# -> subdag_A.task
# -> opSubdag_B
# cycle.opSubdag_0.opSubdag_B:
# -> subdag_B.task
# A -> opSubdag_1
# cycle.opSubdag_1:
# -> opSubdag_C
# cycle.opSubdag_1.opSubdag_C:
# -> subdag_C.task -> subdag_C.task >Invalid Loop<
# -> opSubDag_D
# cycle.opSubdag_1.opSubdag_D:
# -> subdag_D.task
with dag:
def subdag_A():
subdag_A = DAG(
'nested_cycle.opSubdag_0.opSubdag_A', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_A.task', dag=subdag_A)
return subdag_A
def subdag_B():
subdag_B = DAG(
'nested_cycle.opSubdag_0.opSubdag_B', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_B.task', dag=subdag_B)
return subdag_B
def subdag_C():
subdag_C = DAG(
'nested_cycle.opSubdag_1.opSubdag_C', default_args=DEFAULT_ARGS)
opSubdag_C_task = DummyOperator(
task_id='subdag_C.task', dag=subdag_C)
# introduce a loop in opSubdag_C
opSubdag_C_task.set_downstream(opSubdag_C_task)
return subdag_C
def subdag_D():
subdag_D = DAG(
'nested_cycle.opSubdag_1.opSubdag_D', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_D.task', dag=subdag_D)
return subdag_D
def subdag_0():
subdag_0 = DAG('nested_cycle.opSubdag_0', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_A', dag=subdag_0, subdag=subdag_A())
SubDagOperator(task_id='opSubdag_B', dag=subdag_0, subdag=subdag_B())
return subdag_0
def subdag_1():
subdag_1 = DAG('nested_cycle.opSubdag_1', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_C', dag=subdag_1, subdag=subdag_C())
SubDagOperator(task_id='opSubdag_D', dag=subdag_1, subdag=subdag_D())
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = nested_subdag_cycle()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 6)
# Perform processing dag
dagbag, found_dags, file_path = self.process_dag(nested_subdag_cycle)
# Validate correctness
# None of the dags should be found
self.validate_dags(testDag, found_dags, dagbag, should_be_found=False)
self.assertIn(file_path, dagbag.import_errors)
def test_process_file_with_none(self):
"""
test that process_file can handle Nones
"""
dagbag = models.DagBag(include_examples=True)
self.assertEqual([], dagbag.process_file(None))
@patch.object(TI, 'handle_failure')
def test_kill_zombies(self, mock_ti):
"""
Test that kill zombies call TIs failure handler with proper context
"""
dagbag = models.DagBag()
session = settings.Session
dag = dagbag.get_dag('example_branch_operator')
task = dag.get_task(task_id='run_this_first')
ti = TI(task, datetime.datetime.now() - datetime.timedelta(1), 'running')
lj = LocalTaskJob(ti)
lj.state = State.SHUTDOWN
session.add(lj)
session.commit()
ti.job_id = lj.id
session.add(ti)
session.commit()
dagbag.kill_zombies()
mock_ti.assert_called_with(ANY,
configuration.getboolean('core', 'unit_test_mode'),
ANY)
class TaskInstanceTest(unittest.TestCase):
def test_set_task_dates(self):
"""
Test that tasks properly take start/end dates from DAGs
"""
dag = DAG('dag', start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + datetime.timedelta(days=10))
op1 = DummyOperator(task_id='op_1', owner='test')
self.assertTrue(op1.start_date is None and op1.end_date is None)
# dag should assign its dates to op1 because op1 has no dates
dag.add_task(op1)
self.assertTrue(
op1.start_date == dag.start_date and op1.end_date == dag.end_date)
op2 = DummyOperator(
task_id='op_2',
owner='test',
start_date=DEFAULT_DATE - datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=11))
# dag should assign its dates to op2 because they are more restrictive
dag.add_task(op2)
self.assertTrue(
op2.start_date == dag.start_date and op2.end_date == dag.end_date)
op3 = DummyOperator(
task_id='op_3',
owner='test',
start_date=DEFAULT_DATE + datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=9))
# op3 should keep its dates because they are more restrictive
dag.add_task(op3)
self.assertTrue(
op3.start_date == DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(
op3.end_date == DEFAULT_DATE + datetime.timedelta(days=9))
def test_timezone_awareness(self):
NAIVE_DATETIME = DEFAULT_DATE.replace(tzinfo=None)
# check ti without dag (just for bw compat)
op_no_dag = DummyOperator(task_id='op_no_dag')
ti = TI(task=op_no_dag, execution_date=NAIVE_DATETIME)
self.assertEquals(ti.execution_date, DEFAULT_DATE)
# check with dag without localized execution_date
dag = DAG('dag', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='op_1')
dag.add_task(op1)
ti = TI(task=op1, execution_date=NAIVE_DATETIME)
self.assertEquals(ti.execution_date, DEFAULT_DATE)
# with dag and localized execution_date
tz = pendulum.timezone("Europe/Amsterdam")
execution_date = timezone.datetime(2016, 1, 1, 1, 0, 0, tzinfo=tz)
utc_date = timezone.convert_to_utc(execution_date)
ti = TI(task=op1, execution_date=execution_date)
self.assertEquals(ti.execution_date, utc_date)
def test_set_dag(self):
"""
Test assigning Operators to Dags, including deferred assignment
"""
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op = DummyOperator(task_id='op_1', owner='test')
# no dag assigned
self.assertFalse(op.has_dag())
self.assertRaises(AirflowException, getattr, op, 'dag')
# no improper assignment
with self.assertRaises(TypeError):
op.dag = 1
op.dag = dag
# no reassignment
with self.assertRaises(AirflowException):
op.dag = dag2
# but assigning the same dag is ok
op.dag = dag
self.assertIs(op.dag, dag)
self.assertIn(op, dag.tasks)
def test_infer_dag(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test', dag=dag)
op4 = DummyOperator(task_id='test_op_4', owner='test', dag=dag2)
# double check dags
self.assertEqual(
[i.has_dag() for i in [op1, op2, op3, op4]],
[False, False, True, True])
# can't combine operators with no dags
self.assertRaises(AirflowException, op1.set_downstream, op2)
# op2 should infer dag from op1
op1.dag = dag
op1.set_downstream(op2)
self.assertIs(op2.dag, dag)
# can't assign across multiple DAGs
self.assertRaises(AirflowException, op1.set_downstream, op4)
self.assertRaises(AirflowException, op1.set_downstream, [op3, op4])
def test_bitshift_compose_operators(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test')
op4 = DummyOperator(task_id='test_op_4', owner='test')
op5 = DummyOperator(task_id='test_op_5', owner='test')
# can't compose operators without dags
with self.assertRaises(AirflowException):
op1 >> op2
dag >> op1 >> op2 << op3
# make sure dag assignment carries through
# using __rrshift__
self.assertIs(op1.dag, dag)
self.assertIs(op2.dag, dag)
self.assertIs(op3.dag, dag)
# op2 should be downstream of both
self.assertIn(op2, op1.downstream_list)
self.assertIn(op2, op3.downstream_list)
# test dag assignment with __rlshift__
dag << op4
self.assertIs(op4.dag, dag)
# dag assignment with __rrshift__
dag >> op5
self.assertIs(op5.dag, dag)
@patch.object(DAG, 'concurrency_reached')
def test_requeue_over_concurrency(self, mock_concurrency_reached):
mock_concurrency_reached.return_value = True
dag = DAG(dag_id='test_requeue_over_concurrency', start_date=DEFAULT_DATE,
max_active_runs=1, concurrency=2)
task = DummyOperator(task_id='test_requeue_over_concurrency_op', dag=dag)
ti = TI(task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.state, models.State.NONE)
@patch.object(TI, 'pool_full')
def test_run_pooling_task(self, mock_pool_full):
"""
test that running task update task state as without running task.
(no dependency check in ti_deps anymore, so also -> SUCCESS)
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task')
task = DummyOperator(task_id='test_run_pooling_task_op', dag=dag,
pool='test_run_pooling_task_pool', owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.state, models.State.SUCCESS)
@patch.object(TI, 'pool_full')
def test_run_pooling_task_with_mark_success(self, mock_pool_full):
"""
test that running task with mark_success param update task state as SUCCESS
without running task.
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task_with_mark_success')
task = DummyOperator(
task_id='test_run_pooling_task_with_mark_success_op',
dag=dag,
pool='test_run_pooling_task_with_mark_success_pool',
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run(mark_success=True)
self.assertEqual(ti.state, models.State.SUCCESS)
def test_run_pooling_task_with_skip(self):
"""
test that running task which returns AirflowSkipOperator will end
up in a SKIPPED state.
"""
def raise_skip_exception():
raise AirflowSkipException
dag = models.DAG(dag_id='test_run_pooling_task_with_skip')
task = PythonOperator(
task_id='test_run_pooling_task_with_skip',
dag=dag,
python_callable=raise_skip_exception,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(models.State.SKIPPED, ti.state)
def test_retry_delay(self):
"""
Test that retry delays are respected
"""
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=3),
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti.try_number, 1)
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
# second run -- still up for retry because retry_delay hasn't expired
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
# third run -- failed
time.sleep(3)
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
@patch.object(TI, 'pool_full')
def test_retry_handling(self, mock_pool_full):
"""
Test that task retries are handled properly
"""
# Mock the pool with a pool with slots open since the pool doesn't actually exist
mock_pool_full.return_value = False
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=0),
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti.try_number, 1)
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti._try_number, 1)
self.assertEqual(ti.try_number, 2)
# second run -- fail
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti._try_number, 2)
self.assertEqual(ti.try_number, 3)
# Clear the TI state since you can't run a task with a FAILED state without
# clearing it first
dag.clear()
# third run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti._try_number, 3)
self.assertEqual(ti.try_number, 4)
# fourth run -- fail
run_with_error(ti)
ti.refresh_from_db()
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti._try_number, 4)
self.assertEqual(ti.try_number, 5)
def test_next_retry_datetime(self):
delay = datetime.timedelta(seconds=30)
max_delay = datetime.timedelta(minutes=60)
dag = models.DAG(dag_id='fail_dag')
task = BashOperator(
task_id='task_with_exp_backoff_and_max_delay',
bash_command='exit 1',
retries=3,
retry_delay=delay,
retry_exponential_backoff=True,
max_retry_delay=max_delay,
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=DEFAULT_DATE)
ti.end_date = pendulum.instance(timezone.utcnow())
dt = ti.next_retry_datetime()
# between 30 * 2^0.5 and 30 * 2^1 (15 and 30)
period = ti.end_date.add(seconds=30) - ti.end_date.add(seconds=15)
self.assertTrue(dt in period)
ti.try_number = 3
dt = ti.next_retry_datetime()
# between 30 * 2^2 and 30 * 2^3 (120 and 240)
period = ti.end_date.add(seconds=240) - ti.end_date.add(seconds=120)
self.assertTrue(dt in period)
ti.try_number = 5
dt = ti.next_retry_datetime()
# between 30 * 2^4 and 30 * 2^5 (480 and 960)
period = ti.end_date.add(seconds=960) - ti.end_date.add(seconds=480)
self.assertTrue(dt in period)
ti.try_number = 9
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+max_delay)
ti.try_number = 50
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+max_delay)
def test_depends_on_past(self):
dagbag = models.DagBag()
dag = dagbag.get_dag('test_depends_on_past')
dag.clear()
task = dag.tasks[0]
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(task, run_date)
# depends_on_past prevents the run
task.run(start_date=run_date, end_date=run_date)
ti.refresh_from_db()
self.assertIs(ti.state, None)
# ignore first depends_on_past to allow the run
task.run(
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
# Parameterized tests to check for the correct firing
# of the trigger_rule under various circumstances
# Numeric fields are in order:
# successes, skipped, failed, upstream_failed, done
@parameterized.expand([
#
# Tests for all_success
#
['all_success', 5, 0, 0, 0, 0, True, None, True],
['all_success', 2, 0, 0, 0, 0, True, None, False],
['all_success', 2, 0, 1, 0, 0, True, ST.UPSTREAM_FAILED, False],
['all_success', 2, 1, 0, 0, 0, True, ST.SKIPPED, False],
#
# Tests for one_success
#
['one_success', 5, 0, 0, 0, 5, True, None, True],
['one_success', 2, 0, 0, 0, 2, True, None, True],
['one_success', 2, 0, 1, 0, 3, True, None, True],
['one_success', 2, 1, 0, 0, 3, True, None, True],
#
# Tests for all_failed
#
['all_failed', 5, 0, 0, 0, 5, True, ST.SKIPPED, False],
['all_failed', 0, 0, 5, 0, 5, True, None, True],
['all_failed', 2, 0, 0, 0, 2, True, ST.SKIPPED, False],
['all_failed', 2, 0, 1, 0, 3, True, ST.SKIPPED, False],
['all_failed', 2, 1, 0, 0, 3, True, ST.SKIPPED, False],
#
# Tests for one_failed
#
['one_failed', 5, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 1, 0, 0, True, None, True],
['one_failed', 2, 1, 0, 0, 3, True, None, False],
['one_failed', 2, 3, 0, 0, 5, True, ST.SKIPPED, False],
#
# Tests for done
#
['all_done', 5, 0, 0, 0, 5, True, None, True],
['all_done', 2, 0, 0, 0, 2, True, None, False],
['all_done', 2, 0, 1, 0, 3, True, None, False],
['all_done', 2, 1, 0, 0, 3, True, None, False]
])
def test_check_task_dependencies(self, trigger_rule, successes, skipped,
failed, upstream_failed, done,
flag_upstream_failed,
expect_state, expect_completed):
start_date = timezone.datetime(2016, 2, 1, 0, 0, 0)
dag = models.DAG('test-dag', start_date=start_date)
downstream = DummyOperator(task_id='downstream',
dag=dag, owner='airflow',
trigger_rule=trigger_rule)
for i in range(5):
task = DummyOperator(task_id='runme_{}'.format(i),
dag=dag, owner='airflow')
task.set_downstream(downstream)
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(downstream, run_date)
dep_results = TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=flag_upstream_failed)
completed = all([dep.passed for dep in dep_results])
self.assertEqual(completed, expect_completed)
self.assertEqual(ti.state, expect_state)
def test_xcom_pull(self):
"""
Test xcom_pull, using different filtering methods.
"""
dag = models.DAG(
dag_id='test_xcom', schedule_interval='@monthly',
start_date=timezone.datetime(2016, 6, 1, 0, 0, 0))
exec_date = timezone.utcnow()
# Push a value
task1 = DummyOperator(task_id='test_xcom_1', dag=dag, owner='airflow')
ti1 = TI(task=task1, execution_date=exec_date)
ti1.xcom_push(key='foo', value='bar')
# Push another value with the same key (but by a different task)
task2 = DummyOperator(task_id='test_xcom_2', dag=dag, owner='airflow')
ti2 = TI(task=task2, execution_date=exec_date)
ti2.xcom_push(key='foo', value='baz')
# Pull with no arguments
result = ti1.xcom_pull()
self.assertEqual(result, None)
# Pull the value pushed most recently by any task.
result = ti1.xcom_pull(key='foo')
self.assertIn(result, 'baz')
# Pull the value pushed by the first task
result = ti1.xcom_pull(task_ids='test_xcom_1', key='foo')
self.assertEqual(result, 'bar')
# Pull the value pushed by the second task
result = ti1.xcom_pull(task_ids='test_xcom_2', key='foo')
self.assertEqual(result, 'baz')
# Pull the values pushed by both tasks
result = ti1.xcom_pull(
task_ids=['test_xcom_1', 'test_xcom_2'], key='foo')
self.assertEqual(result, ('bar', 'baz'))
def test_xcom_pull_after_success(self):
"""
tests xcom set/clear relative to a task in a 'success' rerun scenario
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=timezone.datetime(2016, 6, 2, 0, 0, 0))
exec_date = timezone.utcnow()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
# The second run and assert is to handle AIRFLOW-131 (don't clear on
# prior success)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Test AIRFLOW-703: Xcom shouldn't be cleared if the task doesn't
# execute, even if dependencies are ignored
ti.run(ignore_all_deps=True, mark_success=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Xcom IS finally cleared once task has executed
ti.run(ignore_all_deps=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
def test_xcom_pull_different_execution_date(self):
"""
tests xcom fetch behavior with different execution dates, using
both xcom_pull with "include_prior_dates" and without
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=timezone.datetime(2016, 6, 2, 0, 0, 0))
exec_date = timezone.utcnow()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
exec_date += datetime.timedelta(days=1)
ti = TI(
task=task, execution_date=exec_date)
ti.run()
# We have set a new execution date (and did not pass in
# 'include_prior_dates'which means this task should now have a cleared
# xcom value
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
# We *should* get a value using 'include_prior_dates'
self.assertEqual(ti.xcom_pull(task_ids='test_xcom',
key=key,
include_prior_dates=True),
value)
def test_post_execute_hook(self):
"""
Test that post_execute hook is called with the Operator's result.
The result ('error') will cause an error to be raised and trapped.
"""
class TestError(Exception):
pass
class TestOperator(PythonOperator):
def post_execute(self, context, result):
if result == 'error':
raise TestError('expected error.')
dag = models.DAG(dag_id='test_post_execute_dag')
task = TestOperator(
task_id='test_operator',
dag=dag,
python_callable=lambda: 'error',
owner='airflow',
start_date=timezone.datetime(2017, 2, 1))
ti = TI(task=task, execution_date=timezone.utcnow())
with self.assertRaises(TestError):
ti.run()
def test_check_and_change_state_before_execution(self):
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti._try_number, 0)
self.assertTrue(ti._check_and_change_state_before_execution())
# State should be running, and try_number column should be incremented
self.assertEqual(ti.state, State.RUNNING)
self.assertEqual(ti._try_number, 1)
def test_check_and_change_state_before_execution_dep_not_met(self):
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
task2= DummyOperator(task_id='task2', dag=dag, start_date=DEFAULT_DATE)
task >> task2
ti = TI(
task=task2, execution_date=timezone.utcnow())
self.assertFalse(ti._check_and_change_state_before_execution())
def test_try_number(self):
"""
Test the try_number accessor behaves in various running states
"""
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
ti = TI(task=task, execution_date=timezone.utcnow())
self.assertEqual(1, ti.try_number)
ti.try_number = 2
ti.state = State.RUNNING
self.assertEqual(2, ti.try_number)
ti.state = State.SUCCESS
self.assertEqual(3, ti.try_number)
def test_get_num_running_task_instances(self):
session = settings.Session()
dag = models.DAG(dag_id='test_get_num_running_task_instances')
dag2 = models.DAG(dag_id='test_get_num_running_task_instances_dummy')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
task2 = DummyOperator(task_id='task', dag=dag2, start_date=DEFAULT_DATE)
ti1 = TI(task=task, execution_date=DEFAULT_DATE)
ti2 = TI(task=task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti3 = TI(task=task2, execution_date=DEFAULT_DATE)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.RUNNING
session.add(ti1)
session.add(ti2)
session.add(ti3)
session.commit()
self.assertEquals(1, ti1.get_num_running_task_instances(session=session))
self.assertEquals(1, ti2.get_num_running_task_instances(session=session))
self.assertEquals(1, ti3.get_num_running_task_instances(session=session))
def test_log_url(self):
now = pendulum.now('Europe/Brussels')
dag = DAG('dag', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='op', dag=dag)
ti = TI(task=task, execution_date=now)
d = urllib.parse.parse_qs(
urllib.parse.urlparse(ti.log_url).query,
keep_blank_values=True, strict_parsing=True)
self.assertEqual(d['dag_id'][0], 'dag')
self.assertEqual(d['task_id'][0], 'op')
self.assertEqual(pendulum.parse(d['execution_date'][0]), now)
def test_mark_success_url(self):
now = pendulum.now('Europe/Brussels')
dag = DAG('dag', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='op', dag=dag)
ti = TI(task=task, execution_date=now)
d = urllib.parse.parse_qs(
urllib.parse.urlparse(ti.mark_success_url).query,
keep_blank_values=True, strict_parsing=True)
self.assertEqual(d['dag_id'][0], 'dag')
self.assertEqual(d['task_id'][0], 'op')
self.assertEqual(pendulum.parse(d['execution_date'][0]), now)
def test_overwrite_params_with_dag_run_conf(self):
task = DummyOperator(task_id='op')
ti = TI(task=task, execution_date=datetime.datetime.now())
dag_run = DagRun()
dag_run.conf = {"override": True}
params = {"override": False}
ti.overwrite_params_with_dag_run_conf(params, dag_run)
self.assertEqual(True, params["override"])
def test_overwrite_params_with_dag_run_none(self):
task = DummyOperator(task_id='op')
ti = TI(task=task, execution_date=datetime.datetime.now())
params = {"override": False}
ti.overwrite_params_with_dag_run_conf(params, None)
self.assertEqual(False, params["override"])
def test_overwrite_params_with_dag_run_conf_none(self):
task = DummyOperator(task_id='op')
ti = TI(task=task, execution_date=datetime.datetime.now())
params = {"override": False}
dag_run = DagRun()
ti.overwrite_params_with_dag_run_conf(params, dag_run)
self.assertEqual(False, params["override"])
class ClearTasksTest(unittest.TestCase):
def test_clear_task_instances(self):
dag = DAG('test_clear_task_instances', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='0', owner='test', dag=dag)
task1 = DummyOperator(task_id='1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session, dag=dag)
session.commit()
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 3)
def test_clear_task_instances_without_task(self):
dag = DAG('test_clear_task_instances_without_task', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
# Remove the task from dag.
dag.task_dict = {}
self.assertFalse(dag.has_task(task0.task_id))
self.assertFalse(dag.has_task(task1.task_id))
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_clear_task_instances_without_dag(self):
dag = DAG('test_clear_task_instances_without_dag', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task_0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task_1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_dag_clear(self):
dag = DAG('test_dag_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='test_dag_clear_task_0', owner='test', dag=dag)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
# Next try to run will be try 1
self.assertEqual(ti0.try_number, 1)
ti0.run()
self.assertEqual(ti0.try_number, 2)
dag.clear()
ti0.refresh_from_db()
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.state, State.NONE)
self.assertEqual(ti0.max_tries, 1)
task1 = DummyOperator(task_id='test_dag_clear_task_1', owner='test',
dag=dag, retries=2)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
self.assertEqual(ti1.max_tries, 2)
ti1.try_number = 1
# Next try will be 2
ti1.run()
self.assertEqual(ti1.try_number, 3)
self.assertEqual(ti1.max_tries, 2)
dag.clear()
ti0.refresh_from_db()
ti1.refresh_from_db()
# after clear dag, ti2 should show attempt 3 of 5
self.assertEqual(ti1.max_tries, 4)
self.assertEqual(ti1.try_number, 3)
# after clear dag, ti1 should show attempt 2 of 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
def test_dags_clear(self):
# setup
session = settings.Session()
dags, tis = [], []
num_of_dags = 5
for i in range(num_of_dags):
dag = DAG('test_dag_clear_' + str(i), start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
ti = TI(task=DummyOperator(task_id='test_task_clear_' + str(i), owner='test', dag=dag),
execution_date=DEFAULT_DATE)
dags.append(dag)
tis.append(ti)
# test clear all dags
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 0)
DAG.clear_dags(dags)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 1)
# test dry_run
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
DAG.clear_dags(dags, dry_run=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
# test only_failed
from random import randint
failed_dag_idx = randint(0, len(tis) - 1)
tis[failed_dag_idx].state = State.FAILED
session.merge(tis[failed_dag_idx])
session.commit()
DAG.clear_dags(dags, only_failed=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
if i != failed_dag_idx:
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
else:
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 2)
def test_operator_clear(self):
dag = DAG('test_operator_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
t1 = DummyOperator(task_id='bash_op', owner='test', dag=dag)
t2 = DummyOperator(task_id='dummy_op', owner='test', dag=dag, retries=1)
t2.set_upstream(t1)
ti1 = TI(task=t1, execution_date=DEFAULT_DATE)
ti2 = TI(task=t2, execution_date=DEFAULT_DATE)
ti2.run()
# Dependency not met
self.assertEqual(ti2.try_number, 1)
self.assertEqual(ti2.max_tries, 1)
t2.clear(upstream=True)
ti1.run()
ti2.run()
self.assertEqual(ti1.try_number, 2)
# max_tries is 0 because there is no task instance in db for ti1
# so clear won't change the max_tries.
self.assertEqual(ti1.max_tries, 0)
self.assertEqual(ti2.try_number, 2)
# try_number (0) + retries(1)
self.assertEqual(ti2.max_tries, 1)
def test_xcom_disable_pickle_type(self):
configuration.load_test_config()
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test1"
dag_id = "test_dag1"
task_id = "test_task1"
configuration.set("core", "enable_xcom_pickling", "False")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
ret_value = XCom.get_one(key=key,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
self.assertEqual(ret_value, json_obj)
session = settings.Session()
ret_value = session.query(XCom).filter(XCom.key == key, XCom.dag_id == dag_id,
XCom.task_id == task_id,
XCom.execution_date == execution_date
).first().value
self.assertEqual(ret_value, json_obj)
def test_xcom_enable_pickle_type(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test2"
dag_id = "test_dag2"
task_id = "test_task2"
configuration.set("core", "enable_xcom_pickling", "True")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
ret_value = XCom.get_one(key=key,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date)
self.assertEqual(ret_value, json_obj)
session = settings.Session()
ret_value = session.query(XCom).filter(XCom.key == key, XCom.dag_id == dag_id,
XCom.task_id == task_id,
XCom.execution_date == execution_date
).first().value
self.assertEqual(ret_value, json_obj)
def test_xcom_disable_pickle_type_fail_on_non_json(self):
class PickleRce(object):
def __reduce__(self):
return (os.system, ("ls -alt",))
configuration.set("core", "xcom_enable_pickling", "False")
self.assertRaises(TypeError, XCom.set,
key="xcom_test3",
value=PickleRce(),
dag_id="test_dag3",
task_id="test_task3",
execution_date=timezone.utcnow())
def test_xcom_get_many(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test4"
dag_id1 = "test_dag4"
task_id1 = "test_task4"
dag_id2 = "test_dag5"
task_id2 = "test_task5"
configuration.set("core", "xcom_enable_pickling", "True")
XCom.set(key=key,
value=json_obj,
dag_id=dag_id1,
task_id=task_id1,
execution_date=execution_date)
XCom.set(key=key,
value=json_obj,
dag_id=dag_id2,
task_id=task_id2,
execution_date=execution_date)
results = XCom.get_many(key=key,
execution_date=execution_date)
for result in results:
self.assertEqual(result.value, json_obj)
class ConnectionTest(unittest.TestCase):
@patch.object(configuration, 'get')
def test_connection_extra_no_encryption(self, mock_get):
"""
Tests extras on a new connection without encryption. The fernet key
is set to a non-base64-encoded string and the extra is stored without
encryption.
"""
mock_get.return_value = 'cryptography_not_found_storing_passwords_in_plain_text'
test_connection = Connection(extra='testextra')
self.assertEqual(test_connection.extra, 'testextra')
@patch.object(configuration, 'get')
def test_connection_extra_with_encryption(self, mock_get):
"""
Tests extras on a new connection with encryption. The fernet key
is set to a base64 encoded string and the extra is encrypted.
"""
# 'dGVzdA==' is base64 encoded 'test'
mock_get.return_value = 'dGVzdA=='
test_connection = Connection(extra='testextra')
self.assertEqual(test_connection.extra, 'testextra')
def test_connection_from_uri_without_extras(self):
uri = 'scheme://user:password@host%2flocation:1234/schema'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertIsNone(connection.extra)
def test_connection_from_uri_with_extras(self):
uri = 'scheme://user:password@host%2flocation:1234/schema?'\
'extra1=a%20value&extra2=%2fpath%2f'
connection = Connection(uri=uri)
self.assertEqual(connection.conn_type, 'scheme')
self.assertEqual(connection.host, 'host/location')
self.assertEqual(connection.schema, 'schema')
self.assertEqual(connection.login, 'user')
self.assertEqual(connection.password, 'password')
self.assertEqual(connection.port, 1234)
self.assertDictEqual(connection.extra_dejson, {'extra1': 'a value',
'extra2': '/path/'})
|
ita1024/samba | refs/heads/master | source3/stf/comfychair.py | 82 | #! /usr/bin/env python
# Copyright (C) 2002, 2003 by Martin Pool <mbp@samba.org>
# Copyright (C) 2003 by Tim Potter <tpot@samba.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""comfychair: a Python-based instrument of software torture.
Copyright (C) 2002, 2003 by Martin Pool <mbp@samba.org>
Copyright (C) 2003 by Tim Potter <tpot@samba.org>
This is a test framework designed for testing programs written in
Python, or (through a fork/exec interface) any other language.
For more information, see the file README.comfychair.
To run a test suite based on ComfyChair, just run it as a program.
"""
import sys, re
class TestCase:
"""A base class for tests. This class defines required functions which
can optionally be overridden by subclasses. It also provides some
utility functions for"""
def __init__(self):
self.test_log = ""
self.background_pids = []
self._cleanups = []
self._enter_rundir()
self._save_environment()
self.add_cleanup(self.teardown)
# --------------------------------------------------
# Save and restore directory
def _enter_rundir(self):
import os
self.basedir = os.getcwd()
self.add_cleanup(self._restore_directory)
self.rundir = os.path.join(self.basedir,
'testtmp',
self.__class__.__name__)
self.tmpdir = os.path.join(self.rundir, 'tmp')
os.system("rm -fr %s" % self.rundir)
os.makedirs(self.tmpdir)
os.system("mkdir -p %s" % self.rundir)
os.chdir(self.rundir)
def _restore_directory(self):
import os
os.chdir(self.basedir)
# --------------------------------------------------
# Save and restore environment
def _save_environment(self):
import os
self._saved_environ = os.environ.copy()
self.add_cleanup(self._restore_environment)
def _restore_environment(self):
import os
os.environ.clear()
os.environ.update(self._saved_environ)
def setup(self):
"""Set up test fixture."""
pass
def teardown(self):
"""Tear down test fixture."""
pass
def runtest(self):
"""Run the test."""
pass
def add_cleanup(self, c):
"""Queue a cleanup to be run when the test is complete."""
self._cleanups.append(c)
def fail(self, reason = ""):
"""Say the test failed."""
raise AssertionError(reason)
#############################################################
# Requisition methods
def require(self, predicate, message):
"""Check a predicate for running this test.
If the predicate value is not true, the test is skipped with a message explaining
why."""
if not predicate:
raise NotRunError, message
def require_root(self):
"""Skip this test unless run by root."""
import os
self.require(os.getuid() == 0,
"must be root to run this test")
#############################################################
# Assertion methods
def assert_(self, expr, reason = ""):
if not expr:
raise AssertionError(reason)
def assert_equal(self, a, b):
if not a == b:
raise AssertionError("assertEquals failed: %s" % `(a, b)`)
def assert_notequal(self, a, b):
if a == b:
raise AssertionError("assertNotEqual failed: %s" % `(a, b)`)
def assert_re_match(self, pattern, s):
"""Assert that a string matches a particular pattern
Inputs:
pattern string: regular expression
s string: to be matched
Raises:
AssertionError if not matched
"""
if not re.match(pattern, s):
raise AssertionError("string does not match regexp\n"
" string: %s\n"
" re: %s" % (`s`, `pattern`))
def assert_re_search(self, pattern, s):
"""Assert that a string *contains* a particular pattern
Inputs:
pattern string: regular expression
s string: to be searched
Raises:
AssertionError if not matched
"""
if not re.search(pattern, s):
raise AssertionError("string does not contain regexp\n"
" string: %s\n"
" re: %s" % (`s`, `pattern`))
def assert_no_file(self, filename):
import os.path
assert not os.path.exists(filename), ("file exists but should not: %s" % filename)
#############################################################
# Methods for running programs
def runcmd_background(self, cmd):
import os
self.test_log = self.test_log + "Run in background:\n" + `cmd` + "\n"
pid = os.fork()
if pid == 0:
# child
try:
os.execvp("/bin/sh", ["/bin/sh", "-c", cmd])
finally:
os._exit(127)
self.test_log = self.test_log + "pid: %d\n" % pid
return pid
def runcmd(self, cmd, expectedResult = 0):
"""Run a command, fail if the command returns an unexpected exit
code. Return the output produced."""
rc, output, stderr = self.runcmd_unchecked(cmd)
if rc != expectedResult:
raise AssertionError("""command returned %d; expected %s: \"%s\"
stdout:
%s
stderr:
%s""" % (rc, expectedResult, cmd, output, stderr))
return output, stderr
def run_captured(self, cmd):
"""Run a command, capturing stdout and stderr.
Based in part on popen2.py
Returns (waitstatus, stdout, stderr)."""
import os, types
pid = os.fork()
if pid == 0:
# child
try:
pid = os.getpid()
openmode = os.O_WRONLY|os.O_CREAT|os.O_TRUNC
outfd = os.open('%d.out' % pid, openmode, 0666)
os.dup2(outfd, 1)
os.close(outfd)
errfd = os.open('%d.err' % pid, openmode, 0666)
os.dup2(errfd, 2)
os.close(errfd)
if isinstance(cmd, types.StringType):
cmd = ['/bin/sh', '-c', cmd]
os.execvp(cmd[0], cmd)
finally:
os._exit(127)
else:
# parent
exited_pid, waitstatus = os.waitpid(pid, 0)
stdout = open('%d.out' % pid).read()
stderr = open('%d.err' % pid).read()
return waitstatus, stdout, stderr
def runcmd_unchecked(self, cmd, skip_on_noexec = 0):
"""Invoke a command; return (exitcode, stdout, stderr)"""
import os
waitstatus, stdout, stderr = self.run_captured(cmd)
assert not os.WIFSIGNALED(waitstatus), \
("%s terminated with signal %d" % (`cmd`, os.WTERMSIG(waitstatus)))
rc = os.WEXITSTATUS(waitstatus)
self.test_log = self.test_log + ("""Run command: %s
Wait status: %#x (exit code %d, signal %d)
stdout:
%s
stderr:
%s""" % (cmd, waitstatus, os.WEXITSTATUS(waitstatus), os.WTERMSIG(waitstatus),
stdout, stderr))
if skip_on_noexec and rc == 127:
# Either we could not execute the command or the command
# returned exit code 127. According to system(3) we can't
# tell the difference.
raise NotRunError, "could not execute %s" % `cmd`
return rc, stdout, stderr
def explain_failure(self, exc_info = None):
print "test_log:"
print self.test_log
def log(self, msg):
"""Log a message to the test log. This message is displayed if
the test fails, or when the runtests function is invoked with
the verbose option."""
self.test_log = self.test_log + msg + "\n"
class NotRunError(Exception):
"""Raised if a test must be skipped because of missing resources"""
def __init__(self, value = None):
self.value = value
def _report_error(case, debugger):
"""Ask the test case to explain failure, and optionally run a debugger
Input:
case TestCase instance
debugger if true, a debugger function to be applied to the traceback
"""
import sys
ex = sys.exc_info()
print "-----------------------------------------------------------------"
if ex:
import traceback
traceback.print_exc(file=sys.stdout)
case.explain_failure()
print "-----------------------------------------------------------------"
if debugger:
tb = ex[2]
debugger(tb)
def runtests(test_list, verbose = 0, debugger = None):
"""Run a series of tests.
Inputs:
test_list sequence of TestCase classes
verbose print more information as testing proceeds
debugger debugger object to be applied to errors
Returns:
unix return code: 0 for success, 1 for failures, 2 for test failure
"""
import traceback
ret = 0
for test_class in test_list:
print "%-30s" % _test_name(test_class),
# flush now so that long running tests are easier to follow
sys.stdout.flush()
obj = None
try:
try: # run test and show result
obj = test_class()
obj.setup()
obj.runtest()
print "OK"
except KeyboardInterrupt:
print "INTERRUPT"
_report_error(obj, debugger)
ret = 2
break
except NotRunError, msg:
print "NOTRUN, %s" % msg.value
except:
print "FAIL"
_report_error(obj, debugger)
ret = 1
finally:
while obj and obj._cleanups:
try:
apply(obj._cleanups.pop())
except KeyboardInterrupt:
print "interrupted during teardown"
_report_error(obj, debugger)
ret = 2
break
except:
print "error during teardown"
_report_error(obj, debugger)
ret = 1
# Display log file if we're verbose
if ret == 0 and verbose:
obj.explain_failure()
return ret
def _test_name(test_class):
"""Return a human-readable name for a test class.
"""
try:
return test_class.__name__
except:
return `test_class`
def print_help():
"""Help for people running tests"""
import sys
print """%s: software test suite based on ComfyChair
usage:
To run all tests, just run this program. To run particular tests,
list them on the command line.
options:
--help show usage message
--list list available tests
--verbose, -v show more information while running tests
--post-mortem, -p enter Python debugger on error
""" % sys.argv[0]
def print_list(test_list):
"""Show list of available tests"""
for test_class in test_list:
print " %s" % _test_name(test_class)
def main(tests, extra_tests=[]):
"""Main entry point for test suites based on ComfyChair.
inputs:
tests Sequence of TestCase subclasses to be run by default.
extra_tests Sequence of TestCase subclasses that are available but
not run by default.
Test suites should contain this boilerplate:
if __name__ == '__main__':
comfychair.main(tests)
This function handles standard options such as --help and --list, and
by default runs all tests in the suggested order.
Calls sys.exit() on completion.
"""
from sys import argv
import getopt, sys
opt_verbose = 0
debugger = None
opts, args = getopt.getopt(argv[1:], 'pv',
['help', 'list', 'verbose', 'post-mortem'])
for opt, opt_arg in opts:
if opt == '--help':
print_help()
return
elif opt == '--list':
print_list(tests + extra_tests)
return
elif opt == '--verbose' or opt == '-v':
opt_verbose = 1
elif opt == '--post-mortem' or opt == '-p':
import pdb
debugger = pdb.post_mortem
if args:
all_tests = tests + extra_tests
by_name = {}
for t in all_tests:
by_name[_test_name(t)] = t
which_tests = []
for name in args:
which_tests.append(by_name[name])
else:
which_tests = tests
sys.exit(runtests(which_tests, verbose=opt_verbose,
debugger=debugger))
if __name__ == '__main__':
print __doc__
|
rosswhitfield/mantid | refs/heads/master | qt/python/mantidqt/widgets/sliceviewer/peaksviewer/actions.py | 2 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# coding=utf-8
# This file is part of the mantidqt package.
# 3rd party
from qtpy import QtWidgets
from mantidqt.utils.qt import load_ui
# standard
from typing import Optional
class PeakActionsView(QtWidgets.QWidget):
def __init__(self, parent: Optional['PeaksViewerCollectionView'] = None):
super(PeakActionsView, self).__init__(parent=parent)
self._presenter: 'PeaksViewerCollectionPresenter' = None
self.ui = None
self._setup_ui()
@property
def presenter(self) -> 'PeaksViewerCollectionPresenter':
return self._presenter
def subscribe(self, presenter: 'PeaksViewerCollectionPresenter') -> None:
r"""
@brief Subscribe a presenter to the viever
@details The presenter must have method 'response_function' able to handle the event
"""
self._presenter = presenter
self._route_signals_to_presenter()
@property
def erasing_mode_on(self):
r"""Find if the button to remove peaks is checked"""
return self.ui.remove_peaks_button.isChecked()
@property
def adding_mode_on(self):
r"""Find if the button to add peaks is checked"""
return self.ui.add_peaks_button.isChecked()
@property
def active_peaksworkspace(self):
r"""Return the currently selected PeaksWorkspace."""
return self.ui.active_peaks_combobox.currentText()
def deactivate_peak_adding(self):
self.ui.add_peaks_button.setChecked(False)
self.ui.remove_peaks_button.setChecked(False)
def set_peaksworkspace(self, names):
"""Set the items in the combobox.
The names are sorted to prevent reordering when the workspace
is replaced in the ADS after adding or removing a peak.
The current name set back after we replace all the items.
"""
current_name = self.ui.active_peaks_combobox.currentText()
self.ui.active_peaks_combobox.clear()
self.ui.active_peaks_combobox.addItems(sorted(names))
self.ui.active_peaks_combobox.setCurrentText(current_name)
def _setup_ui(self):
self.ui = load_ui(__file__, 'actions.ui', self)
# Styling
self.ui.add_peaks_button.setStyleSheet("background-color:lightgrey")
self.ui.remove_peaks_button.setStyleSheet("background-color:lightgrey")
self.ui.add_peaks_button.clicked.connect(self._add_button_clicked)
self.ui.remove_peaks_button.clicked.connect(self._delete_button_clicked)
def _route_signals_to_presenter(self):
r"""Link viewer particular viewer signals to particular methods of the presenter"""
self.ui.add_peaks_button.clicked.connect(self.presenter.deactivate_zoom_pan)
self.ui.remove_peaks_button.clicked.connect(self.presenter.deactivate_zoom_pan)
def _add_button_clicked(self, state):
if state:
self.ui.remove_peaks_button.setChecked(False)
def _delete_button_clicked(self, state):
if state:
self.ui.add_peaks_button.setChecked(False)
|
terotic/digihel | refs/heads/master | search/fields.py | 1 | from wagtail.wagtailsearch import index
tag_search_field = index.RelatedFields('tags', [
index.SearchField('name'),
])
|
edlabh/SickRage | refs/heads/master | lib/simplejson/__init__.py | 448 | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> import decimal
>>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
__version__ = '2.0.9'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from decoder import JSONDecoder
from encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and object
members will be pretty-printed with that indent level. An indent level
of 0 will only insert newlines. ``None`` is the most compact representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
**kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
|
zzeleznick/zDjango | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/chardistribution.py | 2754 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
|
iemejia/beam | refs/heads/master | sdks/python/apache_beam/utils/__init__.py | 23 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A package containing internal utilities.
For internal use only; no backwards-compatibility guarantees.
"""
|
lmjohns3/theanets | refs/heads/master | test/util_test.py | 2 | import numpy as np
import theanets
import theanets.util
class TestRandomMatrix:
def test_sparsity(self):
x = theanets.util.random_matrix(1000, 200, sparsity=0.1, rng=5)
assert x.shape == (1000, 200)
assert np.allclose(x.mean(), 0, atol=1e-2), x.mean()
assert np.allclose(x.std(), 0.95, atol=1e-2), x.std()
assert np.allclose((x == 0).mean(), 0.1, atol=1e-1), (x == 0).mean()
def test_diagonal(self):
x = theanets.util.random_matrix(1000, 200, diagonal=0.9, rng=4)
assert x.shape == (1000, 200)
assert np.allclose(np.diag(x), 0.9), np.diag(x)
assert x.sum() == 180, x.sum()
def test_radius(self):
x = theanets.util.random_matrix(1000, 200, radius=2, rng=4)
assert x.shape == (1000, 200)
u, s, vT = np.linalg.svd(x)
assert s[0] == 2, s
assert s[1] < 2
class TestRandomVector:
def test_rng(self):
x = theanets.util.random_vector(10000, rng=4)
assert x.shape == (10000, )
assert np.allclose(x.mean(), 0, atol=1e-2)
assert np.allclose(x.std(), 1, atol=1e-2)
class TestMatching:
def test_params_matching(self):
net = theanets.Autoencoder([10, 20, 30, 10])
match = sorted(theanets.util.params_matching(net.layers, '*'))
assert len(match) == 6
assert [n for n, _ in match] == [
'hid1.b', 'hid1.w', 'hid2.b', 'hid2.w', 'out.b', 'out.w']
match = sorted(theanets.util.params_matching(net.layers, '*.w'))
assert len(match) == 3
assert [n for n, _ in match] == ['hid1.w', 'hid2.w', 'out.w']
match = sorted(theanets.util.params_matching(net.layers, 'o*.?'))
assert len(match) == 2
assert [n for n, _ in match] == ['out.b', 'out.w']
def test_outputs_matching(self):
outputs, _ = theanets.Autoencoder([10, 20, 30, 10]).build_graph()
match = sorted(theanets.util.outputs_matching(outputs, '*'))
assert len(match) == 7
assert [n for n, _ in match] == [
'hid1:out', 'hid1:pre', 'hid2:out', 'hid2:pre',
'in:out', 'out:out', 'out:pre']
match = sorted(theanets.util.outputs_matching(outputs, 'hid?:*'))
assert len(match) == 4
assert [n for n, _ in match] == [
'hid1:out', 'hid1:pre', 'hid2:out', 'hid2:pre']
match = sorted(theanets.util.outputs_matching(outputs, '*:pre'))
assert len(match) == 3
assert [n for n, _ in match] == ['hid1:pre', 'hid2:pre', 'out:pre']
|
fuhongliang/odoo | refs/heads/8.0 | openerp/service/wsgi_server.py | 335 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
WSGI stack, common code.
"""
import httplib
import urllib
import xmlrpclib
import StringIO
import errno
import logging
import platform
import socket
import sys
import threading
import traceback
import werkzeug.serving
import werkzeug.contrib.fixers
import openerp
import openerp.tools.config as config
import websrv_lib
_logger = logging.getLogger(__name__)
# XML-RPC fault codes. Some care must be taken when changing these: the
# constants are also defined client-side and must remain in sync.
# User code must use the exceptions defined in ``openerp.exceptions`` (not
# create directly ``xmlrpclib.Fault`` objects).
RPC_FAULT_CODE_CLIENT_ERROR = 1 # indistinguishable from app. error.
RPC_FAULT_CODE_APPLICATION_ERROR = 1
RPC_FAULT_CODE_WARNING = 2
RPC_FAULT_CODE_ACCESS_DENIED = 3
RPC_FAULT_CODE_ACCESS_ERROR = 4
def xmlrpc_return(start_response, service, method, params, string_faultcode=False):
"""
Helper to call a service's method with some params, using a wsgi-supplied
``start_response`` callback.
This is the place to look at to see the mapping between core exceptions
and XML-RPC fault codes.
"""
# Map OpenERP core exceptions to XML-RPC fault codes. Specific exceptions
# defined in ``openerp.exceptions`` are mapped to specific fault codes;
# all the other exceptions are mapped to the generic
# RPC_FAULT_CODE_APPLICATION_ERROR value.
# This also mimics SimpleXMLRPCDispatcher._marshaled_dispatch() for
# exception handling.
try:
result = openerp.http.dispatch_rpc(service, method, params)
response = xmlrpclib.dumps((result,), methodresponse=1, allow_none=False, encoding=None)
except Exception, e:
if string_faultcode:
response = xmlrpc_handle_exception_string(e)
else:
response = xmlrpc_handle_exception_int(e)
start_response("200 OK", [('Content-Type','text/xml'), ('Content-Length', str(len(response)))])
return [response]
def xmlrpc_handle_exception_int(e):
if isinstance(e, openerp.osv.orm.except_orm): # legacy
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, openerp.tools.ustr(e.value))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.Warning) or isinstance(e, openerp.exceptions.RedirectWarning):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_WARNING, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance (e, openerp.exceptions.AccessError):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_ERROR, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessDenied):
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.DeferredException):
info = e.traceback
# Which one is the best ?
formatted_info = "".join(traceback.format_exception(*info))
#formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info
fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info)
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
else:
if hasattr(e, 'message') and e.message == 'AccessDenied': # legacy
fault = xmlrpclib.Fault(RPC_FAULT_CODE_ACCESS_DENIED, str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
else:
info = sys.exc_info()
# Which one is the best ?
formatted_info = "".join(traceback.format_exception(*info))
#formatted_info = openerp.tools.exception_to_unicode(e) + '\n' + info
fault = xmlrpclib.Fault(RPC_FAULT_CODE_APPLICATION_ERROR, formatted_info)
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
return response
def xmlrpc_handle_exception_string(e):
if isinstance(e, openerp.osv.orm.except_orm):
fault = xmlrpclib.Fault('warning -- ' + e.name + '\n\n' + e.value, '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.Warning):
fault = xmlrpclib.Fault('warning -- Warning\n\n' + str(e), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessError):
fault = xmlrpclib.Fault('warning -- AccessError\n\n' + str(e), '')
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.AccessDenied):
fault = xmlrpclib.Fault('AccessDenied', str(e))
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
elif isinstance(e, openerp.exceptions.DeferredException):
info = e.traceback
formatted_info = "".join(traceback.format_exception(*info))
fault = xmlrpclib.Fault(openerp.tools.ustr(e.message), formatted_info)
response = xmlrpclib.dumps(fault, allow_none=False, encoding=None)
else:
info = sys.exc_info()
formatted_info = "".join(traceback.format_exception(*info))
fault = xmlrpclib.Fault(openerp.tools.exception_to_unicode(e), formatted_info)
response = xmlrpclib.dumps(fault, allow_none=None, encoding=None)
return response
def wsgi_xmlrpc(environ, start_response):
""" Two routes are available for XML-RPC
/xmlrpc/<service> route returns faultCode as strings. This is a historic
violation of the protocol kept for compatibility.
/xmlrpc/2/<service> is a new route that returns faultCode as int and is
therefore fully compliant.
"""
if environ['REQUEST_METHOD'] == 'POST' and environ['PATH_INFO'].startswith('/xmlrpc/'):
length = int(environ['CONTENT_LENGTH'])
data = environ['wsgi.input'].read(length)
# Distinguish betweed the 2 faultCode modes
string_faultcode = True
if environ['PATH_INFO'].startswith('/xmlrpc/2/'):
service = environ['PATH_INFO'][len('/xmlrpc/2/'):]
string_faultcode = False
else:
service = environ['PATH_INFO'][len('/xmlrpc/'):]
params, method = xmlrpclib.loads(data)
return xmlrpc_return(start_response, service, method, params, string_faultcode)
# WSGI handlers registered through the register_wsgi_handler() function below.
module_handlers = []
# RPC endpoints registered through the register_rpc_endpoint() function below.
rpc_handlers = {}
def register_wsgi_handler(handler):
""" Register a WSGI handler.
Handlers are tried in the order they are added. We might provide a way to
register a handler for specific routes later.
"""
module_handlers.append(handler)
def register_rpc_endpoint(endpoint, handler):
""" Register a handler for a given RPC enpoint.
"""
rpc_handlers[endpoint] = handler
def application_unproxied(environ, start_response):
""" WSGI entry point."""
# cleanup db/uid trackers - they're set at HTTP dispatch in
# web.session.OpenERPSession.send() and at RPC dispatch in
# openerp.service.web_services.objects_proxy.dispatch().
# /!\ The cleanup cannot be done at the end of this `application`
# method because werkzeug still produces relevant logging afterwards
if hasattr(threading.current_thread(), 'uid'):
del threading.current_thread().uid
if hasattr(threading.current_thread(), 'dbname'):
del threading.current_thread().dbname
with openerp.api.Environment.manage():
# Try all handlers until one returns some result (i.e. not None).
wsgi_handlers = [wsgi_xmlrpc]
wsgi_handlers += module_handlers
for handler in wsgi_handlers:
result = handler(environ, start_response)
if result is None:
continue
return result
# We never returned from the loop.
response = 'No handler found.\n'
start_response('404 Not Found', [('Content-Type', 'text/plain'), ('Content-Length', str(len(response)))])
return [response]
def application(environ, start_response):
if config['proxy_mode'] and 'HTTP_X_FORWARDED_HOST' in environ:
return werkzeug.contrib.fixers.ProxyFix(application_unproxied)(environ, start_response)
else:
return application_unproxied(environ, start_response)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
levelrf/level_basestation | refs/heads/master | gr-digital/examples/narrowband/digital_bert_rx.py | 1 | #!/usr/bin/env python
#
# Copyright 2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, eng_notation
from optparse import OptionParser
from gnuradio.eng_option import eng_option
import gnuradio.gr.gr_threading as _threading
import sys, time, math
from gnuradio import digital
# from current dir
from uhd_interface import uhd_receiver
n2s = eng_notation.num_to_str
class status_thread(_threading.Thread):
def __init__(self, tb):
_threading.Thread.__init__(self)
self.setDaemon(1)
self.tb = tb
self.done = False
self.start()
def run(self):
while not self.done:
print "Freq. Offset: {0:5.0f} Hz Timing Offset: {1:10.1f} ppm Estimated SNR: {2:4.1f} dB BER: {3:g}".format(
tb.frequency_offset(), tb.timing_offset()*1e6, tb.snr(), tb.ber())
try:
time.sleep(1.0)
except KeyboardInterrupt:
self.done = True
class bert_receiver(gr.hier_block2):
def __init__(self, bitrate,
constellation, samples_per_symbol,
differential, excess_bw, gray_coded,
freq_bw, timing_bw, phase_bw,
verbose, log):
gr.hier_block2.__init__(self, "bert_receive",
gr.io_signature(1, 1, gr.sizeof_gr_complex), # Input signature
gr.io_signature(0, 0, 0)) # Output signature
self._bitrate = bitrate
self._demod = digital.generic_demod(constellation, differential,
samples_per_symbol,
gray_coded, excess_bw,
freq_bw, timing_bw, phase_bw,
verbose, log)
self._symbol_rate = self._bitrate / self._demod.bits_per_symbol()
self._sample_rate = self._symbol_rate * samples_per_symbol
# Add an SNR probe on the demodulated constellation
self._snr_probe = digital.probe_mpsk_snr_est_c(digital.SNR_EST_M2M4, 1000,
alpha=10.0/self._symbol_rate)
self.connect(self._demod.time_recov, self._snr_probe)
# Descramble BERT sequence. A channel error will create 3 incorrect bits
self._descrambler = digital.descrambler_bb(0x8A, 0x7F, 7) # CCSDS 7-bit descrambler
# Measure BER by the density of 0s in the stream
self._ber = digital.probe_density_b(1.0/self._symbol_rate)
self.connect(self, self._demod, self._descrambler, self._ber)
def frequency_offset(self):
return self._demod.freq_recov.get_frequency()*self._sample_rate/(2*math.pi)
def timing_offset(self):
return self._demod.time_recov.clock_rate()
def snr(self):
return self._snr_probe.snr()
def ber(self):
return (1.0-self._ber.density())/3.0
class rx_psk_block(gr.top_block):
def __init__(self, demod, options):
gr.top_block.__init__(self, "rx_mpsk")
self._demodulator_class = demod
# Get demod_kwargs
demod_kwargs = self._demodulator_class.extract_kwargs_from_options(options)
# demodulator
self._demodulator = self._demodulator_class(**demod_kwargs)
if(options.rx_freq is not None):
symbol_rate = options.bitrate / self._demodulator.bits_per_symbol()
self._source = uhd_receiver(options.args, options.bitrate,
options.samples_per_symbol,
options.rx_freq, options.rx_gain,
options.spec,
options.antenna, options.verbose)
options.samples_per_symbol = self._source._sps
elif(options.from_file is not None):
self._source = gr.file_source(gr.sizeof_gr_complex, options.from_file)
else:
self._source = gr.null_source(gr.sizeof_gr_complex)
# Create the BERT receiver
self._receiver = bert_receiver(options.bitrate,
self._demodulator._constellation,
options.samples_per_symbol,
options.differential,
options.excess_bw,
gray_coded=True,
freq_bw=options.freq_bw,
timing_bw=options.timing_bw,
phase_bw=options.phase_bw,
verbose=options.verbose,
log=options.log)
self.connect(self._source, self._receiver)
def snr(self):
return self._receiver.snr()
def mag(self):
return self._receiver.signal_mean()
def var(self):
return self._receiver.noise_variance()
def ber(self):
return self._receiver.ber()
def frequency_offset(self):
return self._receiver.frequency_offset()
def timing_offset(self):
return self._receiver.timing_offset()
def get_options(demods):
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("","--from-file", default=None,
help="input file of samples to demod")
parser.add_option("-m", "--modulation", type="choice", choices=demods.keys(),
default='psk',
help="Select modulation from: %s [default=%%default]"
% (', '.join(demods.keys()),))
parser.add_option("-r", "--bitrate", type="eng_float", default=250e3,
help="Select modulation bit rate (default=%default)")
parser.add_option("-S", "--samples-per-symbol", type="float", default=2,
help="set samples/symbol [default=%default]")
if not parser.has_option("--verbose"):
parser.add_option("-v", "--verbose", action="store_true", default=False)
if not parser.has_option("--log"):
parser.add_option("", "--log", action="store_true", default=False,
help="Log all parts of flow graph to files (CAUTION: lots of data)")
uhd_receiver.add_options(parser)
demods = digital.modulation_utils.type_1_demods()
for mod in demods.values():
mod.add_options(parser)
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
return (options, args)
if __name__ == "__main__":
demods = digital.modulation_utils.type_1_demods()
(options, args) = get_options(demods)
demod = demods[options.modulation]
tb = rx_psk_block(demod, options)
print "\n*** SNR estimator is inaccurate below about 7dB"
print "*** BER estimator is inaccurate above about 10%\n"
updater = status_thread(tb)
try:
tb.run()
except KeyboardInterrupt:
updater.done = True
updater = None
|
nparrilla/is210-week-03-warmup | refs/heads/master | tests/test_task_02.py | 28 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests lesson 03 task 02."""
# Import Python libs
import unittest
# Import student file
import task_02
class L03T02TestCase(unittest.TestCase):
"""
Tests for lesson 03 task 02.
"""
def test_weeks(self):
"""
Tests that the WEEKS constant value is 52
"""
self.assertIs(task_02.WEEKS, 52)
if __name__ == '__main__':
unittest.main()
|
Elico-Corp/odoo_OCB | refs/heads/9.0 | addons/product_expiry/product_expiry.py | 20 | # Part of Odoo. See LICENSE file for full copyright and licensing details.
import datetime
import openerp
from openerp import api, models
from openerp.osv import fields, osv
class stock_production_lot(osv.osv):
_inherit = 'stock.production.lot'
def _get_date(dtype):
"""Return a function to compute the limit date for this type"""
def calc_date(self, cr, uid, context=None):
"""Compute the limit date for a given date"""
if context is None:
context = {}
if not context.get('product_id', False):
date = False
else:
product = openerp.registry(cr.dbname)['product.product'].browse(
cr, uid, context['product_id'])
duration = getattr(product, dtype)
# set date to False when no expiry time specified on the product
date = duration and (datetime.datetime.today()
+ datetime.timedelta(days=duration))
return date and date.strftime('%Y-%m-%d %H:%M:%S') or False
return calc_date
_columns = {
'life_date': fields.datetime('End of Life Date',
help='This is the date on which the goods with this Serial Number may become dangerous and must not be consumed.'),
'use_date': fields.datetime('Best before Date',
help='This is the date on which the goods with this Serial Number start deteriorating, without being dangerous yet.'),
'removal_date': fields.datetime('Removal Date',
help='This is the date on which the goods with this Serial Number should be removed from the stock.'),
'alert_date': fields.datetime('Alert Date',
help="This is the date on which an alert should be notified about the goods with this Serial Number."),
}
# Assign dates according to products data
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
context['product_id'] = vals.get('product_id', context.get('default_product_id') or context.get('product_id'))
return super(stock_production_lot, self).create(cr, uid, vals, context=context)
_defaults = {
'life_date': _get_date('life_time'),
'use_date': _get_date('use_time'),
'removal_date': _get_date('removal_time'),
'alert_date': _get_date('alert_time'),
}
# Onchange added in new api to avoid having to change views
class StockProductionLot(models.Model):
_inherit = 'stock.production.lot'
@api.onchange('product_id')
def _onchange_product(self):
defaults = self.with_context(
product_id=self.product_id.id).default_get(
['life_date', 'use_date', 'removal_date', 'alert_date'])
for field, value in defaults.items():
setattr(self, field, value)
class stock_quant(osv.osv):
_inherit = 'stock.quant'
def _get_quants(self, cr, uid, ids, context=None):
return self.pool.get('stock.quant').search(cr, uid, [('lot_id', 'in', ids)], context=context)
_columns = {
'removal_date': fields.related('lot_id', 'removal_date', type='datetime', string='Removal Date',
store={
'stock.quant': (lambda self, cr, uid, ids, ctx: ids, ['lot_id'], 20),
'stock.production.lot': (_get_quants, ['removal_date'], 20),
}),
}
def apply_removal_strategy(self, cr, uid, qty, move, ops=False, domain=None, removal_strategy='fifo', context=None):
if removal_strategy == 'fefo':
order = 'removal_date, in_date, id'
return self._quants_get_order(cr, uid, qty, move, ops=ops, domain=domain, orderby=order, context=context)
return super(stock_quant, self).apply_removal_strategy(cr, uid, qty, move, ops=ops, domain=domain,
removal_strategy=removal_strategy, context=context)
class product_product(osv.osv):
_inherit = 'product.template'
_columns = {
'life_time': fields.integer('Product Life Time',
help='When a new a Serial Number is issued, this is the number of days before the goods may become dangerous and must not be consumed.'),
'use_time': fields.integer('Product Use Time',
help='When a new a Serial Number is issued, this is the number of days before the goods starts deteriorating, without being dangerous yet.'),
'removal_time': fields.integer('Product Removal Time',
help='When a new a Serial Number is issued, this is the number of days before the goods should be removed from the stock.'),
'alert_time': fields.integer('Product Alert Time',
help='When a new a Serial Number is issued, this is the number of days before an alert should be notified.'),
}
|
markeTIC/OCB | refs/heads/8.0 | addons/mrp_byproduct/mrp_byproduct.py | 150 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv import osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
class mrp_subproduct(osv.osv):
_name = 'mrp.subproduct'
_description = 'Byproduct'
_columns={
'product_id': fields.many2one('product.product', 'Product', required=True),
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'subproduct_type': fields.selection([('fixed','Fixed'),('variable','Variable')], 'Quantity Type', required=True, help="Define how the quantity of byproducts will be set on the production orders using this BoM.\
'Fixed' depicts a situation where the quantity of created byproduct is always equal to the quantity set on the BoM, regardless of how many are created in the production order.\
By opposition, 'Variable' means that the quantity will be computed as\
'(quantity of byproduct set on the BoM / quantity of manufactured product set on the BoM * quantity of manufactured product in the production order.)'"),
'bom_id': fields.many2one('mrp.bom', 'BoM', ondelete='cascade'),
}
_defaults={
'subproduct_type': 'variable',
'product_qty': lambda *a: 1.0,
}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Changes UoM if product_id changes.
@param product_id: Changed product_id
@return: Dictionary of changed values
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {'product_uom': prod.uom_id.id}
return {'value': v}
return {}
def onchange_uom(self, cr, uid, ids, product_id, product_uom, context=None):
res = {'value':{}}
if not product_uom or not product_id:
return res
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
uom = self.pool.get('product.uom').browse(cr, uid, product_uom, context=context)
if uom.category_id.id != product.uom_id.category_id.id:
res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')}
res['value'].update({'product_uom': product.uom_id.id})
return res
class mrp_bom(osv.osv):
_name = 'mrp.bom'
_description = 'Bill of Material'
_inherit='mrp.bom'
_columns={
'sub_products':fields.one2many('mrp.subproduct', 'bom_id', 'Byproducts', copy=True),
}
class mrp_production(osv.osv):
_description = 'Production'
_inherit= 'mrp.production'
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms production order and calculates quantity based on subproduct_type.
@return: Newly generated picking Id.
"""
move_obj = self.pool.get('stock.move')
picking_id = super(mrp_production,self).action_confirm(cr, uid, ids, context=context)
product_uom_obj = self.pool.get('product.uom')
for production in self.browse(cr, uid, ids):
source = production.product_id.property_stock_production.id
if not production.bom_id:
continue
for sub_product in production.bom_id.sub_products:
product_uom_factor = product_uom_obj._compute_qty(cr, uid, production.product_uom.id, production.product_qty, production.bom_id.product_uom.id)
qty1 = sub_product.product_qty
if sub_product.subproduct_type == 'variable':
if production.product_qty:
qty1 *= product_uom_factor / (production.bom_id.product_qty or 1.0)
data = {
'name': 'PROD:'+production.name,
'date': production.date_planned,
'product_id': sub_product.product_id.id,
'product_uom_qty': qty1,
'product_uom': sub_product.product_uom.id,
'location_id': source,
'location_dest_id': production.location_dest_id.id,
'move_dest_id': production.move_prod_id.id,
'production_id': production.id
}
move_id = move_obj.create(cr, uid, data, context=context)
move_obj.action_confirm(cr, uid, [move_id], context=context)
return picking_id
def _get_subproduct_factor(self, cr, uid, production_id, move_id=None, context=None):
"""Compute the factor to compute the qty of procucts to produce for the given production_id. By default,
it's always equal to the quantity encoded in the production order or the production wizard, but with
the module mrp_byproduct installed it can differ for byproducts having type 'variable'.
:param production_id: ID of the mrp.order
:param move_id: ID of the stock move that needs to be produced. Identify the product to produce.
:return: The factor to apply to the quantity that we should produce for the given production order and stock move.
"""
sub_obj = self.pool.get('mrp.subproduct')
move_obj = self.pool.get('stock.move')
production_obj = self.pool.get('mrp.production')
production_browse = production_obj.browse(cr, uid, production_id, context=context)
move_browse = move_obj.browse(cr, uid, move_id, context=context)
subproduct_factor = 1
sub_id = sub_obj.search(cr, uid,[('product_id', '=', move_browse.product_id.id),('bom_id', '=', production_browse.bom_id.id), ('subproduct_type', '=', 'variable')], context=context)
if sub_id:
subproduct_record = sub_obj.browse(cr ,uid, sub_id[0], context=context)
if subproduct_record.bom_id.product_qty:
subproduct_factor = subproduct_record.product_qty / subproduct_record.bom_id.product_qty
return subproduct_factor
return super(mrp_production, self)._get_subproduct_factor(cr, uid, production_id, move_id, context=context)
class change_production_qty(osv.osv_memory):
_inherit = 'change.production.qty'
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
bom_obj = self.pool.get('mrp.bom')
move_lines_obj = self.pool.get('stock.move')
prod_obj = self.pool.get('mrp.production')
for m in prod.move_created_ids:
if m.product_id.id == prod.product_id.id:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
else:
for sub_product_line in prod.bom_id.sub_products:
if sub_product_line.product_id.id == m.product_id.id:
factor = prod_obj._get_subproduct_factor(cr, uid, prod.id, m.id, context=context)
subproduct_qty = sub_product_line.subproduct_type == 'variable' and qty * factor or sub_product_line.product_qty
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': subproduct_qty})
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
wujuguang/sentry | refs/heads/master | tests/sentry/tsdb/__init__.py | 12133432 | |
mottosso/be | refs/heads/master | be/vendor/requests/packages/chardet/chardistribution.py | 2754 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
|
roadmapper/ansible | refs/heads/devel | lib/ansible/modules/cloud/google/gcp_logging_metric.py | 9 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_logging_metric
description:
- Logs-based metric can also be used to extract values from logs and create a a distribution
of the values. The distribution records the statistics of the extracted values along
with an optional histogram of the values as specified by the bucket options.
short_description: Creates a GCP Metric
version_added: '2.10'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
name:
description:
- The client-assigned metric identifier. Examples - "error_count", "nginx/requests".
- Metric identifiers are limited to 100 characters and can include only the following
characters A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash
character (/) denotes a hierarchy of name pieces, and it cannot be the first
character of the name.
required: true
type: str
description:
description:
- A description of this metric, which is used in documentation. The maximum length
of the description is 8000 characters.
required: false
type: str
filter:
description:
- An advanced logs filter (U(https://cloud.google.com/logging/docs/view/advanced-filters))
which is used to match log entries.
required: true
type: str
metric_descriptor:
description:
- The metric descriptor associated with the logs-based metric.
required: true
type: dict
suboptions:
unit:
description:
- The unit in which the metric value is reported. It is only applicable if
the valueType is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The supported units
are a subset of [The Unified Code for Units of Measure](U(http://unitsofmeasure.org/ucum.html))
standard .
required: false
default: '1'
type: str
value_type:
description:
- Whether the measurement is an integer, a floating-point number, etc.
- Some combinations of metricKind and valueType might not be supported.
- For counter metrics, set this to INT64.
- 'Some valid choices include: "BOOL", "INT64", "DOUBLE", "STRING", "DISTRIBUTION",
"MONEY"'
required: true
type: str
metric_kind:
description:
- Whether the metric records instantaneous values, changes to a value, etc.
- Some combinations of metricKind and valueType might not be supported.
- For counter metrics, set this to DELTA.
- 'Some valid choices include: "DELTA", "GAUGE", "CUMULATIVE"'
required: true
type: str
labels:
description:
- The set of labels that can be used to describe a specific instance of this
metric type. For example, the appengine.googleapis.com/http/server/response_latencies
metric type has a label for the HTTP response code, response_code, so you
can look at latencies for successful responses or just for responses that
failed.
required: false
type: list
suboptions:
key:
description:
- The label key.
required: true
type: str
description:
description:
- A human-readable description for the label.
required: false
type: str
value_type:
description:
- The type of data that can be assigned to the label.
- 'Some valid choices include: "BOOL", "INT64", "STRING"'
required: false
default: STRING
type: str
display_name:
description:
- A concise name for the metric, which can be displayed in user interfaces.
Use sentence case without an ending period, for example "Request count".
This field is optional but it is recommended to be set for any metrics associated
with user-visible concepts, such as Quota.
required: false
type: str
label_extractors:
description:
- A map from a label key string to an extractor expression which is used to extract
data from a log entry field and assign as the label value. Each label key specified
in the LabelDescriptor must have an associated extractor expression in this
map. The syntax of the extractor expression is the same as for the valueExtractor
field.
required: false
type: dict
value_extractor:
description:
- A valueExtractor is required when using a distribution logs-based metric to
extract the values to record from a log entry. Two functions are supported for
value extraction - EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument
are 1. field - The name of the log entry field from which the value is to be
extracted. 2. regex - A regular expression using the Google RE2 syntax (U(https://github.com/google/re2/wiki/Syntax))
with a single capture group to extract data from the specified log entry field.
The value of the field is converted to a string before applying the regex. It
is an error to specify a regex that does not include exactly one capture group.
required: false
type: str
bucket_options:
description:
- The bucketOptions are required when the logs-based metric is using a DISTRIBUTION
value type and it describes the bucket boundaries used to create a histogram
of the extracted values.
required: false
type: dict
suboptions:
linear_buckets:
description:
- Specifies a linear sequence of buckets that all have the same width (except
overflow and underflow).
- Each bucket represents a constant absolute uncertainty on the specific value
in the bucket.
required: false
type: dict
suboptions:
num_finite_buckets:
description:
- Must be greater than 0.
required: false
type: int
width:
description:
- Must be greater than 0.
required: false
type: int
offset:
description:
- Lower bound of the first bucket.
required: false
type: str
exponential_buckets:
description:
- Specifies an exponential sequence of buckets that have a width that is proportional
to the value of the lower bound. Each bucket represents a constant relative
uncertainty on a specific value in the bucket.
required: false
type: dict
suboptions:
num_finite_buckets:
description:
- Must be greater than 0.
required: false
type: int
growth_factor:
description:
- Must be greater than 1.
required: false
type: str
scale:
description:
- Must be greater than 0.
required: false
type: str
explicit_buckets:
description:
- Specifies a set of buckets with arbitrary widths.
required: false
type: dict
suboptions:
bounds:
description:
- The values must be monotonically increasing.
required: true
type: list
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/create)'
- 'Official Documentation: U(https://cloud.google.com/logging/docs/apis)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a metric
gcp_logging_metric:
name: test_object
filter: resource.type=gae_app AND severity>=ERROR
metric_descriptor:
metric_kind: DELTA
value_type: DISTRIBUTION
unit: '1'
labels:
- key: mass
value_type: STRING
description: amount of matter
value_extractor: EXTRACT(jsonPayload.request)
label_extractors:
mass: EXTRACT(jsonPayload.request)
bucket_options:
linear_buckets:
num_finite_buckets: 3
width: 1
offset: 1
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- The client-assigned metric identifier. Examples - "error_count", "nginx/requests".
- Metric identifiers are limited to 100 characters and can include only the following
characters A-Z, a-z, 0-9, and the special characters _-.,+!*',()%/. The forward-slash
character (/) denotes a hierarchy of name pieces, and it cannot be the first character
of the name.
returned: success
type: str
description:
description:
- A description of this metric, which is used in documentation. The maximum length
of the description is 8000 characters.
returned: success
type: str
filter:
description:
- An advanced logs filter (U(https://cloud.google.com/logging/docs/view/advanced-filters))
which is used to match log entries.
returned: success
type: str
metricDescriptor:
description:
- The metric descriptor associated with the logs-based metric.
returned: success
type: complex
contains:
unit:
description:
- The unit in which the metric value is reported. It is only applicable if the
valueType is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The supported units are
a subset of [The Unified Code for Units of Measure](U(http://unitsofmeasure.org/ucum.html))
standard .
returned: success
type: str
valueType:
description:
- Whether the measurement is an integer, a floating-point number, etc.
- Some combinations of metricKind and valueType might not be supported.
- For counter metrics, set this to INT64.
returned: success
type: str
metricKind:
description:
- Whether the metric records instantaneous values, changes to a value, etc.
- Some combinations of metricKind and valueType might not be supported.
- For counter metrics, set this to DELTA.
returned: success
type: str
labels:
description:
- The set of labels that can be used to describe a specific instance of this
metric type. For example, the appengine.googleapis.com/http/server/response_latencies
metric type has a label for the HTTP response code, response_code, so you
can look at latencies for successful responses or just for responses that
failed.
returned: success
type: complex
contains:
key:
description:
- The label key.
returned: success
type: str
description:
description:
- A human-readable description for the label.
returned: success
type: str
valueType:
description:
- The type of data that can be assigned to the label.
returned: success
type: str
displayName:
description:
- A concise name for the metric, which can be displayed in user interfaces.
Use sentence case without an ending period, for example "Request count". This
field is optional but it is recommended to be set for any metrics associated
with user-visible concepts, such as Quota.
returned: success
type: str
labelExtractors:
description:
- A map from a label key string to an extractor expression which is used to extract
data from a log entry field and assign as the label value. Each label key specified
in the LabelDescriptor must have an associated extractor expression in this map.
The syntax of the extractor expression is the same as for the valueExtractor field.
returned: success
type: dict
valueExtractor:
description:
- A valueExtractor is required when using a distribution logs-based metric to extract
the values to record from a log entry. Two functions are supported for value extraction
- EXTRACT(field) or REGEXP_EXTRACT(field, regex). The argument are 1. field -
The name of the log entry field from which the value is to be extracted. 2. regex
- A regular expression using the Google RE2 syntax (U(https://github.com/google/re2/wiki/Syntax))
with a single capture group to extract data from the specified log entry field.
The value of the field is converted to a string before applying the regex. It
is an error to specify a regex that does not include exactly one capture group.
returned: success
type: str
bucketOptions:
description:
- The bucketOptions are required when the logs-based metric is using a DISTRIBUTION
value type and it describes the bucket boundaries used to create a histogram of
the extracted values.
returned: success
type: complex
contains:
linearBuckets:
description:
- Specifies a linear sequence of buckets that all have the same width (except
overflow and underflow).
- Each bucket represents a constant absolute uncertainty on the specific value
in the bucket.
returned: success
type: complex
contains:
numFiniteBuckets:
description:
- Must be greater than 0.
returned: success
type: int
width:
description:
- Must be greater than 0.
returned: success
type: int
offset:
description:
- Lower bound of the first bucket.
returned: success
type: str
exponentialBuckets:
description:
- Specifies an exponential sequence of buckets that have a width that is proportional
to the value of the lower bound. Each bucket represents a constant relative
uncertainty on a specific value in the bucket.
returned: success
type: complex
contains:
numFiniteBuckets:
description:
- Must be greater than 0.
returned: success
type: int
growthFactor:
description:
- Must be greater than 1.
returned: success
type: str
scale:
description:
- Must be greater than 0.
returned: success
type: str
explicitBuckets:
description:
- Specifies a set of buckets with arbitrary widths.
returned: success
type: complex
contains:
bounds:
description:
- The values must be monotonically increasing.
returned: success
type: list
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
name=dict(required=True, type='str'),
description=dict(type='str'),
filter=dict(required=True, type='str'),
metric_descriptor=dict(
required=True,
type='dict',
options=dict(
unit=dict(default='1', type='str'),
value_type=dict(required=True, type='str'),
metric_kind=dict(required=True, type='str'),
labels=dict(
type='list',
elements='dict',
options=dict(key=dict(required=True, type='str'), description=dict(type='str'), value_type=dict(default='STRING', type='str')),
),
display_name=dict(type='str'),
),
),
label_extractors=dict(type='dict'),
value_extractor=dict(type='str'),
bucket_options=dict(
type='dict',
options=dict(
linear_buckets=dict(type='dict', options=dict(num_finite_buckets=dict(type='int'), width=dict(type='int'), offset=dict(type='str'))),
exponential_buckets=dict(
type='dict', options=dict(num_finite_buckets=dict(type='int'), growth_factor=dict(type='str'), scale=dict(type='str'))
),
explicit_buckets=dict(type='dict', options=dict(bounds=dict(required=True, type='list', elements='str'))),
),
),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
state = module.params['state']
fetch = fetch_resource(module, self_link(module))
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module))
fetch = fetch_resource(module, self_link(module))
changed = True
else:
delete(module, self_link(module))
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module))
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link):
auth = GcpSession(module, 'logging')
return return_if_object(module, auth.post(link, resource_to_request(module)))
def update(module, link):
auth = GcpSession(module, 'logging')
return return_if_object(module, auth.put(link, resource_to_request(module)))
def delete(module, link):
auth = GcpSession(module, 'logging')
return return_if_object(module, auth.delete(link))
def resource_to_request(module):
request = {
u'name': module.params.get('name'),
u'description': module.params.get('description'),
u'filter': module.params.get('filter'),
u'metricDescriptor': MetricMetricdescriptor(module.params.get('metric_descriptor', {}), module).to_request(),
u'labelExtractors': module.params.get('label_extractors'),
u'valueExtractor': module.params.get('value_extractor'),
u'bucketOptions': MetricBucketoptions(module.params.get('bucket_options', {}), module).to_request(),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, allow_not_found=True):
auth = GcpSession(module, 'logging')
return return_if_object(module, auth.get(link), allow_not_found)
def self_link(module):
return "https://logging.googleapis.com/v2/projects/{project}/metrics/{name}".format(**module.params)
def collection(module):
return "https://logging.googleapis.com/v2/projects/{project}/metrics".format(**module.params)
def return_if_object(module, response, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'name': response.get(u'name'),
u'description': response.get(u'description'),
u'filter': response.get(u'filter'),
u'metricDescriptor': MetricMetricdescriptor(response.get(u'metricDescriptor', {}), module).from_response(),
u'labelExtractors': response.get(u'labelExtractors'),
u'valueExtractor': response.get(u'valueExtractor'),
u'bucketOptions': MetricBucketoptions(response.get(u'bucketOptions', {}), module).from_response(),
}
class MetricMetricdescriptor(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'unit': self.request.get('unit'),
u'valueType': self.request.get('value_type'),
u'metricKind': self.request.get('metric_kind'),
u'labels': MetricLabelsArray(self.request.get('labels', []), self.module).to_request(),
u'displayName': self.request.get('display_name'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'unit': self.request.get(u'unit'),
u'valueType': self.request.get(u'valueType'),
u'metricKind': self.request.get(u'metricKind'),
u'labels': MetricLabelsArray(self.request.get(u'labels', []), self.module).from_response(),
u'displayName': self.request.get(u'displayName'),
}
)
class MetricLabelsArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'key': item.get('key'), u'description': item.get('description'), u'valueType': item.get('value_type')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'key': item.get(u'key'), u'description': item.get(u'description'), u'valueType': item.get(u'valueType')})
class MetricBucketoptions(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'linearBuckets': MetricLinearbuckets(self.request.get('linear_buckets', {}), self.module).to_request(),
u'exponentialBuckets': MetricExponentialbuckets(self.request.get('exponential_buckets', {}), self.module).to_request(),
u'explicitBuckets': MetricExplicitbuckets(self.request.get('explicit_buckets', {}), self.module).to_request(),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'linearBuckets': MetricLinearbuckets(self.request.get(u'linearBuckets', {}), self.module).from_response(),
u'exponentialBuckets': MetricExponentialbuckets(self.request.get(u'exponentialBuckets', {}), self.module).from_response(),
u'explicitBuckets': MetricExplicitbuckets(self.request.get(u'explicitBuckets', {}), self.module).from_response(),
}
)
class MetricLinearbuckets(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{u'numFiniteBuckets': self.request.get('num_finite_buckets'), u'width': self.request.get('width'), u'offset': self.request.get('offset')}
)
def from_response(self):
return remove_nones_from_dict(
{u'numFiniteBuckets': self.request.get(u'numFiniteBuckets'), u'width': self.request.get(u'width'), u'offset': self.request.get(u'offset')}
)
class MetricExponentialbuckets(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'numFiniteBuckets': self.request.get('num_finite_buckets'),
u'growthFactor': self.request.get('growth_factor'),
u'scale': self.request.get('scale'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'numFiniteBuckets': self.request.get(u'numFiniteBuckets'),
u'growthFactor': self.request.get(u'growthFactor'),
u'scale': self.request.get(u'scale'),
}
)
class MetricExplicitbuckets(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'bounds': self.request.get('bounds')})
def from_response(self):
return remove_nones_from_dict({u'bounds': self.request.get(u'bounds')})
if __name__ == '__main__':
main()
|
jquacinella/IS602_Project | refs/heads/master | web/gluon/portalocker.py | 10 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# portalocker.py
# Cross-platform (posix/nt) API for flock-style file locking.
# Requires python 1.5.2 or better.
"""
Cross-platform (posix/nt) API for flock-style file locking.
Synopsis:
import portalocker
file = open(\"somefile\", \"r+\")
portalocker.lock(file, portalocker.LOCK_EX)
file.seek(12)
file.write(\"foo\")
file.close()
If you know what you're doing, you may choose to
portalocker.unlock(file)
before closing the file, but why?
Methods:
lock( file, flags )
unlock( file )
Constants:
LOCK_EX
LOCK_SH
LOCK_NB
I learned the win32 technique for locking files from sample code
provided by John Nielsen <nielsenjf@my-deja.com> in the documentation
that accompanies the win32 modules.
Author: Jonathan Feinberg <jdf@pobox.com>
Version: $Id: portalocker.py,v 1.3 2001/05/29 18:47:55 Administrator Exp $
"""
import logging
import platform
logger = logging.getLogger("web2py")
os_locking = None
try:
import google.appengine
os_locking = 'gae'
except:
try:
import fcntl
os_locking = 'posix'
except:
try:
import win32con
import win32file
import pywintypes
os_locking = 'windows'
except:
pass
if os_locking == 'windows':
LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK
LOCK_SH = 0 # the default
LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY
# is there any reason not to reuse the following structure?
__overlapped = pywintypes.OVERLAPPED()
def lock(file, flags):
hfile = win32file._get_osfhandle(file.fileno())
win32file.LockFileEx(hfile, flags, 0, 0x7fff0000, __overlapped)
def unlock(file):
hfile = win32file._get_osfhandle(file.fileno())
win32file.UnlockFileEx(hfile, 0, 0x7fff0000, __overlapped)
elif os_locking == 'posix':
LOCK_EX = fcntl.LOCK_EX
LOCK_SH = fcntl.LOCK_SH
LOCK_NB = fcntl.LOCK_NB
def lock(file, flags):
fcntl.flock(file.fileno(), flags)
def unlock(file):
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
else:
if platform.system() == 'Windows':
logger.error('no file locking, you must install the win32 extensions from: http://sourceforge.net/projects/pywin32/files/')
elif os_locking != 'gae':
logger.debug('no file locking, this will cause problems')
LOCK_EX = None
LOCK_SH = None
LOCK_NB = None
def lock(file, flags):
pass
def unlock(file):
pass
class LockedFile(object):
def __init__(self, filename, mode='rb'):
self.filename = filename
self.mode = mode
self.file = None
if 'r' in mode:
self.file = open(filename, mode)
lock(self.file, LOCK_SH)
elif 'w' in mode or 'a' in mode:
self.file = open(filename, mode.replace('w', 'a'))
lock(self.file, LOCK_EX)
if not 'a' in mode:
self.file.seek(0)
self.file.truncate()
else:
raise RuntimeError("invalid LockedFile(...,mode)")
def read(self, size=None):
return self.file.read() if size is None else self.file.read(size)
def readline(self):
return self.file.readline()
def readlines(self):
return self.file.readlines()
def write(self, data):
self.file.write(data)
self.file.flush()
def close(self):
if not self.file is None:
unlock(self.file)
self.file.close()
self.file = None
def __del__(self):
if not self.file is None:
self.close()
def read_locked(filename):
fp = LockedFile(filename, 'r')
data = fp.read()
fp.close()
return data
def write_locked(filename, data):
fp = LockedFile(filename, 'w')
data = fp.write(data)
fp.close()
if __name__ == '__main__':
f = LockedFile('test.txt', mode='wb')
f.write('test ok')
f.close()
f = LockedFile('test.txt', mode='rb')
sys.stdout.write(f.read()+'\n')
f.close()
|
andrewnc/scikit-learn | refs/heads/master | sklearn/isotonic.py | 206 | # Authors: Fabian Pedregosa <fabian@fseoane.net>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy import interpolate
from scipy.stats import spearmanr
from .base import BaseEstimator, TransformerMixin, RegressorMixin
from .utils import as_float_array, check_array, check_consistent_length
from .utils.fixes import astype
from ._isotonic import _isotonic_regression, _make_unique
import warnings
import math
__all__ = ['check_increasing', 'isotonic_regression',
'IsotonicRegression']
def check_increasing(x, y):
"""Determine whether y is monotonically correlated with x.
y is found increasing or decreasing with respect to x based on a Spearman
correlation test.
Parameters
----------
x : array-like, shape=(n_samples,)
Training data.
y : array-like, shape=(n_samples,)
Training target.
Returns
-------
`increasing_bool` : boolean
Whether the relationship is increasing or decreasing.
Notes
-----
The Spearman correlation coefficient is estimated from the data, and the
sign of the resulting estimate is used as the result.
In the event that the 95% confidence interval based on Fisher transform
spans zero, a warning is raised.
References
----------
Fisher transformation. Wikipedia.
http://en.wikipedia.org/w/index.php?title=Fisher_transformation
"""
# Calculate Spearman rho estimate and set return accordingly.
rho, _ = spearmanr(x, y)
increasing_bool = rho >= 0
# Run Fisher transform to get the rho CI, but handle rho=+/-1
if rho not in [-1.0, 1.0]:
F = 0.5 * math.log((1. + rho) / (1. - rho))
F_se = 1 / math.sqrt(len(x) - 3)
# Use a 95% CI, i.e., +/-1.96 S.E.
# http://en.wikipedia.org/wiki/Fisher_transformation
rho_0 = math.tanh(F - 1.96 * F_se)
rho_1 = math.tanh(F + 1.96 * F_se)
# Warn if the CI spans zero.
if np.sign(rho_0) != np.sign(rho_1):
warnings.warn("Confidence interval of the Spearman "
"correlation coefficient spans zero. "
"Determination of ``increasing`` may be "
"suspect.")
return increasing_bool
def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None,
increasing=True):
"""Solve the isotonic regression model::
min sum w[i] (y[i] - y_[i]) ** 2
subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max
where:
- y[i] are inputs (real numbers)
- y_[i] are fitted
- w[i] are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : iterable of floating-point values
The data.
sample_weight : iterable of floating-point values, optional, default: None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean, optional, default: True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False)
Returns
-------
y_ : list of floating-point values
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
"""
y = np.asarray(y, dtype=np.float)
if sample_weight is None:
sample_weight = np.ones(len(y), dtype=y.dtype)
else:
sample_weight = np.asarray(sample_weight, dtype=np.float)
if not increasing:
y = y[::-1]
sample_weight = sample_weight[::-1]
if y_min is not None or y_max is not None:
y = np.copy(y)
sample_weight = np.copy(sample_weight)
# upper bound on the cost function
C = np.dot(sample_weight, y * y) * 10
if y_min is not None:
y[0] = y_min
sample_weight[0] = C
if y_max is not None:
y[-1] = y_max
sample_weight[-1] = C
solution = np.empty(len(y))
y_ = _isotonic_regression(y, sample_weight, solution)
if increasing:
return y_
else:
return y_[::-1]
class IsotonicRegression(BaseEstimator, TransformerMixin, RegressorMixin):
"""Isotonic regression model.
The isotonic regression optimization problem is defined by::
min sum w_i (y[i] - y_[i]) ** 2
subject to y_[i] <= y_[j] whenever X[i] <= X[j]
and min(y_) = y_min, max(y_) = y_max
where:
- ``y[i]`` are inputs (real numbers)
- ``y_[i]`` are fitted
- ``X`` specifies the order.
If ``X`` is non-decreasing then ``y_`` is non-decreasing.
- ``w[i]`` are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean or string, optional, default: True
If boolean, whether or not to fit the isotonic regression with y
increasing or decreasing.
The string value "auto" determines whether y should
increase or decrease based on the Spearman correlation estimate's
sign.
out_of_bounds : string, optional, default: "nan"
The ``out_of_bounds`` parameter handles how x-values outside of the
training domain are handled. When set to "nan", predicted y-values
will be NaN. When set to "clip", predicted y-values will be
set to the value corresponding to the nearest train interval endpoint.
When set to "raise", allow ``interp1d`` to throw ValueError.
Attributes
----------
X_ : ndarray (n_samples, )
A copy of the input X.
y_ : ndarray (n_samples, )
Isotonic fit of y.
X_min_ : float
Minimum value of input array `X_` for left bound.
X_max_ : float
Maximum value of input array `X_` for right bound.
f_ : function
The stepwise interpolating function that covers the domain `X_`.
Notes
-----
Ties are broken using the secondary method from Leeuw, 1977.
References
----------
Isotonic Median Regression: A Linear Programming Approach
Nilotpal Chakravarti
Mathematics of Operations Research
Vol. 14, No. 2 (May, 1989), pp. 303-308
Isotone Optimization in R : Pool-Adjacent-Violators
Algorithm (PAVA) and Active Set Methods
Leeuw, Hornik, Mair
Journal of Statistical Software 2009
Correctness of Kruskal's algorithms for monotone regression with ties
Leeuw, Psychometrica, 1977
"""
def __init__(self, y_min=None, y_max=None, increasing=True,
out_of_bounds='nan'):
self.y_min = y_min
self.y_max = y_max
self.increasing = increasing
self.out_of_bounds = out_of_bounds
def _check_fit_data(self, X, y, sample_weight=None):
if len(X.shape) != 1:
raise ValueError("X should be a 1d array")
def _build_f(self, X, y):
"""Build the f_ interp1d function."""
# Handle the out_of_bounds argument by setting bounds_error
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError("The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}"
.format(self.out_of_bounds))
bounds_error = self.out_of_bounds == "raise"
if len(y) == 1:
# single y, constant prediction
self.f_ = lambda x: y.repeat(x.shape)
else:
self.f_ = interpolate.interp1d(X, y, kind='slinear',
bounds_error=bounds_error)
def _build_y(self, X, y, sample_weight):
"""Build the y_ IsotonicRegression."""
check_consistent_length(X, y, sample_weight)
X, y = [check_array(x, ensure_2d=False) for x in [X, y]]
y = as_float_array(y)
self._check_fit_data(X, y, sample_weight)
# Determine increasing if auto-determination requested
if self.increasing == 'auto':
self.increasing_ = check_increasing(X, y)
else:
self.increasing_ = self.increasing
# If sample_weights is passed, removed zero-weight values and clean order
if sample_weight is not None:
sample_weight = check_array(sample_weight, ensure_2d=False)
mask = sample_weight > 0
X, y, sample_weight = X[mask], y[mask], sample_weight[mask]
else:
sample_weight = np.ones(len(y))
order = np.lexsort((y, X))
order_inv = np.argsort(order)
X, y, sample_weight = [astype(array[order], np.float64, copy=False)
for array in [X, y, sample_weight]]
unique_X, unique_y, unique_sample_weight = _make_unique(X, y, sample_weight)
self.X_ = unique_X
self.y_ = isotonic_regression(unique_y, unique_sample_weight, self.y_min,
self.y_max, increasing=self.increasing_)
return order_inv
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape=(n_samples,)
Training data.
y : array-like, shape=(n_samples,)
Training target.
sample_weight : array-like, shape=(n_samples,), optional, default: None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as `transform` needs X to interpolate
new input data.
"""
# Build y_
self._build_y(X, y, sample_weight)
# Handle the left and right bounds on X
self.X_min_ = np.min(self.X_)
self.X_max_ = np.max(self.X_)
# Build f_
self._build_f(self.X_, self.y_)
return self
def transform(self, T):
"""Transform new data by linear interpolation
Parameters
----------
T : array-like, shape=(n_samples,)
Data to transform.
Returns
-------
T_ : array, shape=(n_samples,)
The transformed data
"""
T = as_float_array(T)
if len(T.shape) != 1:
raise ValueError("Isotonic regression input should be a 1d array")
# Handle the out_of_bounds argument by clipping if needed
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError("The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}"
.format(self.out_of_bounds))
if self.out_of_bounds == "clip":
T = np.clip(T, self.X_min_, self.X_max_)
return self.f_(T)
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape=(n_samples,)
Data to transform.
Returns
-------
T_ : array, shape=(n_samples,)
Transformed data.
"""
return self.transform(T)
def __getstate__(self):
"""Pickle-protocol - return state of the estimator. """
# copy __dict__
state = dict(self.__dict__)
# remove interpolation method
state.pop('f_', None)
return state
def __setstate__(self, state):
"""Pickle-protocol - set state of the estimator.
We need to rebuild the interpolation function.
"""
self.__dict__.update(state)
self._build_f(self.X_, self.y_)
|
Teagan42/home-assistant | refs/heads/dev | homeassistant/components/proliphix/__init__.py | 36 | """The proliphix component."""
|
exploreodoo/datStruct | refs/heads/master | odoo/addons/website_sale_options/controllers/main.py | 236 | # -*- coding: utf-8 -*-
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website_sale.controllers.main import website_sale
class website_sale_options(website_sale):
@http.route(['/shop/product/<model("product.template"):product>'], type='http', auth="public", website=True)
def product(self, product, category='', search='', **kwargs):
r = super(website_sale_options, self).product(product, category, search, **kwargs)
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
template_obj = pool['product.template']
optional_product_ids = []
for p in product.optional_product_ids:
ctx = dict(context, active_id=p.id)
optional_product_ids.append(template_obj.browse(cr, uid, p.id, context=ctx))
r.qcontext['optional_product_ids'] = optional_product_ids
return r
@http.route(['/shop/cart/update_option'], type='http', auth="public", methods=['POST'], website=True, multilang=False)
def cart_options_update_json(self, product_id, add_qty=1, set_qty=0, goto_shop=None, lang=None, **kw):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
if lang:
context = dict(context, lang=lang)
request.website = request.website.with_context(context)
order = request.website.sale_get_order(force_create=1)
product = pool['product.product'].browse(cr, uid, int(product_id), context=context)
option_ids = [p.id for tmpl in product.optional_product_ids for p in tmpl.product_variant_ids]
optional_product_ids = []
for k, v in kw.items():
if "optional-product-" in k and int(kw.get(k.replace("product", "add"))) and int(v) in option_ids:
optional_product_ids.append(int(v))
value = {}
if add_qty or set_qty:
value = order._cart_update(product_id=int(product_id),
add_qty=int(add_qty), set_qty=int(set_qty),
optional_product_ids=optional_product_ids)
# options have all time the same quantity
for option_id in optional_product_ids:
order._cart_update(product_id=option_id,
set_qty=value.get('quantity'),
linked_line_id=value.get('line_id'))
return str(order.cart_quantity)
@http.route(['/shop/modal'], type='json', auth="public", methods=['POST'], website=True)
def modal(self, product_id, **kw):
cr, uid, context, pool = request.cr, request.uid, request.context, request.registry
pricelist = self.get_pricelist()
if not context.get('pricelist'):
context['pricelist'] = int(pricelist)
website_context = kw.get('kwargs', {}).get('context', {})
context = dict(context or {}, **website_context)
from_currency = pool.get('product.price.type')._get_field_currency(cr, uid, 'list_price', context)
to_currency = pricelist.currency_id
compute_currency = lambda price: pool['res.currency']._compute(cr, uid, from_currency, to_currency, price, context=context)
product = pool['product.product'].browse(cr, uid, int(product_id), context=context)
request.website = request.website.with_context(context)
return request.website._render("website_sale_options.modal", {
'product': product,
'compute_currency': compute_currency,
'get_attribute_value_ids': self.get_attribute_value_ids,
})
|
ShinySide/SM-530T | refs/heads/master | scripts/rt-tester/rt-tester.py | 11005 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
nurmd2/nurmd | refs/heads/master | addons/website_event_questions/report/report_event_registrations_questions.py | 52 | # -*- coding: utf-8 -*-
from openerp import fields, models, tools
class ReportEventRegistrationQuestions(models.Model):
_name = "event.question.report"
_auto = False
attendee_id = fields.Many2one(comodel_name='event.registration', string='Registration')
question_id = fields.Many2one(comodel_name='event.question', string='Question')
answer_id = fields.Many2one(comodel_name='event.answer', string='Answer')
event_id = fields.Many2one(comodel_name='event.event', string='Event')
def init(self, cr):
""" Event Question main report """
tools.drop_view_if_exists(cr, 'event_question_report')
cr.execute(""" CREATE VIEW event_question_report AS (
SELECT
att_answer.id as id,
att_answer.event_registration_id as attendee_id,
answer.question_id as question_id,
answer.id as answer_id,
question.event_id as event_id
FROM
event_registration_answer as att_answer
LEFT JOIN
event_answer as answer ON answer.id = att_answer.event_answer_id
LEFT JOIN
event_question as question ON question.id = answer.question_id
GROUP BY
attendee_id,
event_id,
question_id,
answer_id,
att_answer.id
)""")
|
sametmax/Django--an-app-at-a-time | refs/heads/master | ignore_this_directory/django/test/utils.py | 3 | import collections
import logging
import re
import sys
import time
import warnings
from contextlib import contextmanager
from functools import wraps
from io import StringIO
from itertools import chain
from types import SimpleNamespace
from unittest import TestCase, skipIf, skipUnless
from xml.dom.minidom import Node, parseString
from django.apps import apps
from django.apps.registry import Apps
from django.conf import UserSettingsHolder, settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.core.signals import request_started
from django.db import DEFAULT_DB_ALIAS, connections, reset_queries
from django.db.models.options import Options
from django.template import Template
from django.test.signals import setting_changed, template_rendered
from django.urls import get_script_prefix, set_script_prefix
from django.utils.translation import deactivate
try:
import jinja2
except ImportError:
jinja2 = None
__all__ = (
'Approximate', 'ContextList', 'isolate_lru_cache', 'get_runner',
'modify_settings', 'override_settings',
'requires_tz_support',
'setup_test_environment', 'teardown_test_environment',
)
TZ_SUPPORT = hasattr(time, 'tzset')
class Approximate:
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
return self.val == other or round(abs(self.val - other), self.places) == 0
class ContextList(list):
"""
A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, str):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super().__getitem__(key)
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
return True
def keys(self):
"""
Flattened keys of subcontexts.
"""
return set(chain.from_iterable(d for subcontext in self for d in subcontext))
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal that can be
intercepted by the test Client.
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
class _TestState:
pass
def setup_test_environment(debug=None):
"""
Perform global pre-test setup, such as installing the instrumented template
renderer and setting the email backend to the locmem email backend.
"""
if hasattr(_TestState, 'saved_data'):
# Executing this function twice would overwrite the saved values.
raise RuntimeError(
"setup_test_environment() was already called and can't be called "
"again without first calling teardown_test_environment()."
)
if debug is None:
debug = settings.DEBUG
saved_data = SimpleNamespace()
_TestState.saved_data = saved_data
saved_data.allowed_hosts = settings.ALLOWED_HOSTS
# Add the default host of the test client.
settings.ALLOWED_HOSTS = [*settings.ALLOWED_HOSTS, 'testserver']
saved_data.debug = settings.DEBUG
settings.DEBUG = debug
saved_data.email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
saved_data.template_render = Template._render
Template._render = instrumented_test_render
mail.outbox = []
deactivate()
def teardown_test_environment():
"""
Perform any global post-test teardown, such as restoring the original
template renderer and restoring the email sending functions.
"""
saved_data = _TestState.saved_data
settings.ALLOWED_HOSTS = saved_data.allowed_hosts
settings.DEBUG = saved_data.debug
settings.EMAIL_BACKEND = saved_data.email_backend
Template._render = saved_data.template_render
del _TestState.saved_data
del mail.outbox
def setup_databases(verbosity, interactive, keepdb=False, debug_sql=False, parallel=0, aliases=None, **kwargs):
"""Create the test databases."""
test_databases, mirrored_aliases = get_unique_databases_and_mirrors(aliases)
old_names = []
for db_name, aliases in test_databases.values():
first_alias = None
for alias in aliases:
connection = connections[alias]
old_names.append((connection, db_name, first_alias is None))
# Actually create the database for the first connection
if first_alias is None:
first_alias = alias
connection.creation.create_test_db(
verbosity=verbosity,
autoclobber=not interactive,
keepdb=keepdb,
serialize=connection.settings_dict.get('TEST', {}).get('SERIALIZE', True),
)
if parallel > 1:
for index in range(parallel):
connection.creation.clone_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
# Configure all other connections as mirrors of the first one
else:
connections[alias].creation.set_as_test_mirror(connections[first_alias].settings_dict)
# Configure the test mirrors.
for alias, mirror_alias in mirrored_aliases.items():
connections[alias].creation.set_as_test_mirror(
connections[mirror_alias].settings_dict)
if debug_sql:
for alias in connections:
connections[alias].force_debug_cursor = True
return old_names
def dependency_ordered(test_databases, dependencies):
"""
Reorder test_databases into an order that honors the dependencies
described in TEST[DEPENDENCIES].
"""
ordered_test_databases = []
resolved_databases = set()
# Maps db signature to dependencies of all its aliases
dependencies_map = {}
# Check that no database depends on its own alias
for sig, (_, aliases) in test_databases:
all_deps = set()
for alias in aliases:
all_deps.update(dependencies.get(alias, []))
if not all_deps.isdisjoint(aliases):
raise ImproperlyConfigured(
"Circular dependency: databases %r depend on each other, "
"but are aliases." % aliases
)
dependencies_map[sig] = all_deps
while test_databases:
changed = False
deferred = []
# Try to find a DB that has all its dependencies met
for signature, (db_name, aliases) in test_databases:
if dependencies_map[signature].issubset(resolved_databases):
resolved_databases.update(aliases)
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured("Circular dependency in TEST[DEPENDENCIES]")
test_databases = deferred
return ordered_test_databases
def get_unique_databases_and_mirrors(aliases=None):
"""
Figure out which databases actually need to be created.
Deduplicate entries in DATABASES that correspond the same database or are
configured as test mirrors.
Return two values:
- test_databases: ordered mapping of signatures to (name, list of aliases)
where all aliases share the same underlying database.
- mirrored_aliases: mapping of mirror aliases to original aliases.
"""
if aliases is None:
aliases = connections
mirrored_aliases = {}
test_databases = {}
dependencies = {}
default_sig = connections[DEFAULT_DB_ALIAS].creation.test_db_signature()
for alias in connections:
connection = connections[alias]
test_settings = connection.settings_dict['TEST']
if test_settings['MIRROR']:
# If the database is marked as a test mirror, save the alias.
mirrored_aliases[alias] = test_settings['MIRROR']
elif alias in aliases:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], set())
)
item[1].add(alias)
if 'DEPENDENCIES' in test_settings:
dependencies[alias] = test_settings['DEPENDENCIES']
else:
if alias != DEFAULT_DB_ALIAS and connection.creation.test_db_signature() != default_sig:
dependencies[alias] = test_settings.get('DEPENDENCIES', [DEFAULT_DB_ALIAS])
test_databases = dependency_ordered(test_databases.items(), dependencies)
test_databases = collections.OrderedDict(test_databases)
return test_databases, mirrored_aliases
def teardown_databases(old_config, verbosity, parallel=0, keepdb=False):
"""Destroy all the non-mirror databases."""
for connection, old_name, destroy in old_config:
if destroy:
if parallel > 1:
for index in range(parallel):
connection.creation.destroy_test_db(
suffix=str(index + 1),
verbosity=verbosity,
keepdb=keepdb,
)
connection.creation.destroy_test_db(old_name, verbosity, keepdb)
def get_runner(settings, test_runner_class=None):
test_runner_class = test_runner_class or settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
test_runner = getattr(test_module, test_path[-1])
return test_runner
class TestContextDecorator:
"""
A base class that can either be used as a context manager during tests
or as a test function or unittest.TestCase subclass decorator to perform
temporary alterations.
`attr_name`: attribute assigned the return value of enable() if used as
a class decorator.
`kwarg_name`: keyword argument passing the return value of enable() if
used as a function decorator.
"""
def __init__(self, attr_name=None, kwarg_name=None):
self.attr_name = attr_name
self.kwarg_name = kwarg_name
def enable(self):
raise NotImplementedError
def disable(self):
raise NotImplementedError
def __enter__(self):
return self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def decorate_class(self, cls):
if issubclass(cls, TestCase):
decorated_setUp = cls.setUp
decorated_tearDown = cls.tearDown
def setUp(inner_self):
context = self.enable()
if self.attr_name:
setattr(inner_self, self.attr_name, context)
try:
decorated_setUp(inner_self)
except Exception:
self.disable()
raise
def tearDown(inner_self):
decorated_tearDown(inner_self)
self.disable()
cls.setUp = setUp
cls.tearDown = tearDown
return cls
raise TypeError('Can only decorate subclasses of unittest.TestCase')
def decorate_callable(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self as context:
if self.kwarg_name:
kwargs[self.kwarg_name] = context
return func(*args, **kwargs)
return inner
def __call__(self, decorated):
if isinstance(decorated, type):
return self.decorate_class(decorated)
elif callable(decorated):
return self.decorate_callable(decorated)
raise TypeError('Cannot decorate object of type %s' % type(decorated))
class override_settings(TestContextDecorator):
"""
Act as either a decorator or a context manager. If it's a decorator, take a
function and return a wrapped function. If it's a contextmanager, use it
with the ``with`` statement. In either event, entering/exiting are called
before and after, respectively, the function/block is executed.
"""
enable_exception = None
def __init__(self, **kwargs):
self.options = kwargs
super().__init__()
def enable(self):
# Keep this code at the beginning to leave the settings unchanged
# in case it raises an exception because INSTALLED_APPS is invalid.
if 'INSTALLED_APPS' in self.options:
try:
apps.set_installed_apps(self.options['INSTALLED_APPS'])
except Exception:
apps.unset_installed_apps()
raise
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
self.wrapped = settings._wrapped
settings._wrapped = override
for key, new_value in self.options.items():
try:
setting_changed.send(
sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=True,
)
except Exception as exc:
self.enable_exception = exc
self.disable()
def disable(self):
if 'INSTALLED_APPS' in self.options:
apps.unset_installed_apps()
settings._wrapped = self.wrapped
del self.wrapped
responses = []
for key in self.options:
new_value = getattr(settings, key, None)
responses_for_setting = setting_changed.send_robust(
sender=settings._wrapped.__class__,
setting=key, value=new_value, enter=False,
)
responses.extend(responses_for_setting)
if self.enable_exception is not None:
exc = self.enable_exception
self.enable_exception = None
raise exc
for _, response in responses:
if isinstance(response, Exception):
raise response
def save_options(self, test_func):
if test_func._overridden_settings is None:
test_func._overridden_settings = self.options
else:
# Duplicate dict to prevent subclasses from altering their parent.
test_func._overridden_settings = {
**test_func._overridden_settings,
**self.options,
}
def decorate_class(self, cls):
from django.test import SimpleTestCase
if not issubclass(cls, SimpleTestCase):
raise ValueError(
"Only subclasses of Django SimpleTestCase can be decorated "
"with override_settings")
self.save_options(cls)
return cls
class modify_settings(override_settings):
"""
Like override_settings, but makes it possible to append, prepend, or remove
items instead of redefining the entire list.
"""
def __init__(self, *args, **kwargs):
if args:
# Hack used when instantiating from SimpleTestCase.setUpClass.
assert not kwargs
self.operations = args[0]
else:
assert not args
self.operations = list(kwargs.items())
super(override_settings, self).__init__()
def save_options(self, test_func):
if test_func._modified_settings is None:
test_func._modified_settings = self.operations
else:
# Duplicate list to prevent subclasses from altering their parent.
test_func._modified_settings = list(
test_func._modified_settings) + self.operations
def enable(self):
self.options = {}
for name, operations in self.operations:
try:
# When called from SimpleTestCase.setUpClass, values may be
# overridden several times; cumulate changes.
value = self.options[name]
except KeyError:
value = list(getattr(settings, name, []))
for action, items in operations.items():
# items my be a single value or an iterable.
if isinstance(items, str):
items = [items]
if action == 'append':
value = value + [item for item in items if item not in value]
elif action == 'prepend':
value = [item for item in items if item not in value] + value
elif action == 'remove':
value = [item for item in value if item not in items]
else:
raise ValueError("Unsupported action: %s" % action)
self.options[name] = value
super().enable()
class override_system_checks(TestContextDecorator):
"""
Act as a decorator. Override list of registered system checks.
Useful when you override `INSTALLED_APPS`, e.g. if you exclude `auth` app,
you also need to exclude its system checks.
"""
def __init__(self, new_checks, deployment_checks=None):
from django.core.checks.registry import registry
self.registry = registry
self.new_checks = new_checks
self.deployment_checks = deployment_checks
super().__init__()
def enable(self):
self.old_checks = self.registry.registered_checks
self.registry.registered_checks = set()
for check in self.new_checks:
self.registry.register(check, *getattr(check, 'tags', ()))
self.old_deployment_checks = self.registry.deployment_checks
if self.deployment_checks is not None:
self.registry.deployment_checks = set()
for check in self.deployment_checks:
self.registry.register(check, *getattr(check, 'tags', ()), deploy=True)
def disable(self):
self.registry.registered_checks = self.old_checks
self.registry.deployment_checks = self.old_deployment_checks
def compare_xml(want, got):
"""
Try to do a 'xml-comparison' of want and got. Plain string comparison
doesn't always work because, for example, attribute ordering should not be
important. Ignore comment nodes and leading and trailing whitespace.
Based on https://github.com/lxml/lxml/blob/master/src/lxml/doctestcompare.py
"""
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
def child_text(element):
return ''.join(c.data for c in element.childNodes
if c.nodeType == Node.TEXT_NODE)
def children(element):
return [c for c in element.childNodes
if c.nodeType == Node.ELEMENT_NODE]
def norm_child_text(element):
return norm_whitespace(child_text(element))
def attrs_dict(element):
return dict(element.attributes.items())
def check_element(want_element, got_element):
if want_element.tagName != got_element.tagName:
return False
if norm_child_text(want_element) != norm_child_text(got_element):
return False
if attrs_dict(want_element) != attrs_dict(got_element):
return False
want_children = children(want_element)
got_children = children(got_element)
if len(want_children) != len(got_children):
return False
return all(check_element(want, got) for want, got in zip(want_children, got_children))
def first_node(document):
for node in document.childNodes:
if node.nodeType != Node.COMMENT_NODE:
return node
want = want.strip().replace('\\n', '\n')
got = got.strip().replace('\\n', '\n')
# If the string is not a complete xml document, we may need to add a
# root element. This allow us to compare fragments, like "<foo/><bar/>"
if not want.startswith('<?xml'):
wrapper = '<root>%s</root>'
want = wrapper % want
got = wrapper % got
# Parse the want and got strings, and compare the parsings.
want_root = first_node(parseString(want))
got_root = first_node(parseString(got))
return check_element(want_root, got_root)
def str_prefix(s):
return s % {'_': ''}
class CaptureQueriesContext:
"""
Context manager that captures queries executed by the specified connection.
"""
def __init__(self, connection):
self.connection = connection
def __iter__(self):
return iter(self.captured_queries)
def __getitem__(self, index):
return self.captured_queries[index]
def __len__(self):
return len(self.captured_queries)
@property
def captured_queries(self):
return self.connection.queries[self.initial_queries:self.final_queries]
def __enter__(self):
self.force_debug_cursor = self.connection.force_debug_cursor
self.connection.force_debug_cursor = True
# Run any initialization queries if needed so that they won't be
# included as part of the count.
self.connection.ensure_connection()
self.initial_queries = len(self.connection.queries_log)
self.final_queries = None
request_started.disconnect(reset_queries)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.force_debug_cursor = self.force_debug_cursor
request_started.connect(reset_queries)
if exc_type is not None:
return
self.final_queries = len(self.connection.queries_log)
class ignore_warnings(TestContextDecorator):
def __init__(self, **kwargs):
self.ignore_kwargs = kwargs
if 'message' in self.ignore_kwargs or 'module' in self.ignore_kwargs:
self.filter_func = warnings.filterwarnings
else:
self.filter_func = warnings.simplefilter
super().__init__()
def enable(self):
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
self.filter_func('ignore', **self.ignore_kwargs)
def disable(self):
self.catch_warnings.__exit__(*sys.exc_info())
@contextmanager
def patch_logger(logger_name, log_level, log_kwargs=False):
"""
Context manager that takes a named logger and the logging level
and provides a simple mock-like list of messages received.
Use unittest.assertLogs() if you only need Python 3 support. This
private API will be removed after Python 2 EOL in 2020 (#27753).
"""
calls = []
def replacement(msg, *args, **kwargs):
call = msg % args
calls.append((call, kwargs) if log_kwargs else call)
logger = logging.getLogger(logger_name)
orig = getattr(logger, log_level)
setattr(logger, log_level, replacement)
try:
yield calls
finally:
setattr(logger, log_level, orig)
# On OSes that don't provide tzset (Windows), we can't set the timezone
# in which the program runs. As a consequence, we must skip tests that
# don't enforce a specific timezone (with timezone.override or equivalent),
# or attempt to interpret naive datetimes in the default timezone.
requires_tz_support = skipUnless(
TZ_SUPPORT,
"This test relies on the ability to run a program in an arbitrary "
"time zone, but your operating system isn't able to do that."
)
@contextmanager
def extend_sys_path(*paths):
"""Context manager to temporarily add paths to sys.path."""
_orig_sys_path = sys.path[:]
sys.path.extend(paths)
try:
yield
finally:
sys.path = _orig_sys_path
@contextmanager
def isolate_lru_cache(lru_cache_object):
"""Clear the cache of an LRU cache object on entering and exiting."""
lru_cache_object.cache_clear()
try:
yield
finally:
lru_cache_object.cache_clear()
@contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Note: This function and the following ``captured_std*`` are copied
from CPython's ``test.support`` module."""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO())
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print("hello")
self.assertEqual(stdout.getvalue(), "hello\n")
"""
return captured_output("stdout")
def captured_stderr():
"""Capture the output of sys.stderr:
with captured_stderr() as stderr:
print("hello", file=sys.stderr)
self.assertEqual(stderr.getvalue(), "hello\n")
"""
return captured_output("stderr")
def captured_stdin():
"""Capture the input to sys.stdin:
with captured_stdin() as stdin:
stdin.write('hello\n')
stdin.seek(0)
# call test code that consumes from sys.stdin
captured = input()
self.assertEqual(captured, "hello")
"""
return captured_output("stdin")
@contextmanager
def freeze_time(t):
"""
Context manager to temporarily freeze time.time(). This temporarily
modifies the time function of the time module. Modules which import the
time function directly (e.g. `from time import time`) won't be affected
This isn't meant as a public API, but helps reduce some repetitive code in
Django's test suite.
"""
_real_time = time.time
time.time = lambda: t
try:
yield
finally:
time.time = _real_time
def require_jinja2(test_func):
"""
Decorator to enable a Jinja2 template engine in addition to the regular
Django template engine for a test or skip it if Jinja2 isn't available.
"""
test_func = skipIf(jinja2 is None, "this test requires jinja2")(test_func)
test_func = override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}, {
'BACKEND': 'django.template.backends.jinja2.Jinja2',
'APP_DIRS': True,
'OPTIONS': {'keep_trailing_newline': True},
}])(test_func)
return test_func
class override_script_prefix(TestContextDecorator):
"""Decorator or context manager to temporary override the script prefix."""
def __init__(self, prefix):
self.prefix = prefix
super().__init__()
def enable(self):
self.old_prefix = get_script_prefix()
set_script_prefix(self.prefix)
def disable(self):
set_script_prefix(self.old_prefix)
class LoggingCaptureMixin:
"""
Capture the output from the 'django' logger and store it on the class's
logger_output attribute.
"""
def setUp(self):
self.logger = logging.getLogger('django')
self.old_stream = self.logger.handlers[0].stream
self.logger_output = StringIO()
self.logger.handlers[0].stream = self.logger_output
def tearDown(self):
self.logger.handlers[0].stream = self.old_stream
class isolate_apps(TestContextDecorator):
"""
Act as either a decorator or a context manager to register models defined
in its wrapped context to an isolated registry.
The list of installed apps the isolated registry should contain must be
passed as arguments.
Two optional keyword arguments can be specified:
`attr_name`: attribute assigned the isolated registry if used as a class
decorator.
`kwarg_name`: keyword argument passing the isolated registry if used as a
function decorator.
"""
def __init__(self, *installed_apps, **kwargs):
self.installed_apps = installed_apps
super().__init__(**kwargs)
def enable(self):
self.old_apps = Options.default_apps
apps = Apps(self.installed_apps)
setattr(Options, 'default_apps', apps)
return apps
def disable(self):
setattr(Options, 'default_apps', self.old_apps)
def tag(*tags):
"""Decorator to add tags to a test class or method."""
def decorator(obj):
if hasattr(obj, 'tags'):
obj.tags = obj.tags.union(tags)
else:
setattr(obj, 'tags', set(tags))
return obj
return decorator
@contextmanager
def register_lookup(field, *lookups, lookup_name=None):
"""
Context manager to temporarily register lookups on a model field using
lookup_name (or the lookup's lookup_name if not provided).
"""
try:
for lookup in lookups:
field.register_lookup(lookup, lookup_name)
yield
finally:
for lookup in lookups:
field._unregister_lookup(lookup, lookup_name)
|
vongochung/buiquocviet | refs/heads/master | django/contrib/localflavor/se/__init__.py | 12133432 | |
onurhunce/AutoAlbumCreator | refs/heads/master | Assignment/AlbumCreator/management/commands/_private.py | 12133432 | |
KellyChan/python-examples | refs/heads/master | javascript/backbone/backbone-templates/backbone-fileupload/venvs/lib/python2.7/site-packages/django/contrib/localflavor/cz/__init__.py | 12133432 | |
SeleniumHQ/buck | refs/heads/master | py/buck/zip/__init__.py | 12133432 | |
ebukoz/thrive | refs/heads/develop | erpnext/education/report/course_wise_assessment_report/__init__.py | 12133432 | |
swdrsk/myhtml | refs/heads/master | django/mysite/mysite/__init__.py | 12133432 | |
sliz1/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/_pytest/vendored_packages/__init__.py | 12133432 | |
Metaswitch/calico-neutron | refs/heads/calico-readme | neutron/agent/linux/interface.py | 7 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import netaddr
from oslo.config import cfg
from oslo.utils import importutils
import six
from neutron.agent.common import config
from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.extensions import flavor
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('ovs_integration_bridge',
default='br-int',
help=_('Name of Open vSwitch bridge to use')),
cfg.BoolOpt('ovs_use_veth',
default=False,
help=_('Uses veth for an interface or not')),
cfg.IntOpt('network_device_mtu',
help=_('MTU setting for device.')),
cfg.StrOpt('meta_flavor_driver_mappings',
help=_('Mapping between flavor and LinuxInterfaceDriver. '
'It is specific to MetaInterfaceDriver used with '
'admin_user, admin_password, admin_tenant_name, '
'admin_url, auth_strategy, auth_region and '
'endpoint_type.')),
cfg.StrOpt('admin_user',
help=_("Admin username")),
cfg.StrOpt('admin_password',
help=_("Admin password"),
secret=True),
cfg.StrOpt('admin_tenant_name',
help=_("Admin tenant name")),
cfg.StrOpt('auth_url',
help=_("Authentication URL")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('auth_region',
help=_("Authentication region")),
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_("Network service endpoint type to pull from "
"the keystone catalog")),
]
@six.add_metaclass(abc.ABCMeta)
class LinuxInterfaceDriver(object):
# from linux IF_NAMESIZE
DEV_NAME_LEN = 14
DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
def __init__(self, conf):
self.conf = conf
self.root_helper = config.get_root_helper(conf)
def init_l3(self, device_name, ip_cidrs, namespace=None,
preserve_ips=[], gateway=None, extra_subnets=[]):
"""Set the L3 settings for the interface using data from the port.
ip_cidrs: list of 'X.X.X.X/YY' strings
preserve_ips: list of ip cidrs that should not be removed from device
"""
device = ip_lib.IPDevice(device_name,
self.root_helper,
namespace=namespace)
previous = {}
for address in device.addr.list(scope='global', filters=['permanent']):
previous[address['cidr']] = address['ip_version']
# add new addresses
for ip_cidr in ip_cidrs:
net = netaddr.IPNetwork(ip_cidr)
# Convert to compact IPv6 address because the return values of
# "ip addr list" are compact.
if net.version == 6:
ip_cidr = str(net)
if ip_cidr in previous:
del previous[ip_cidr]
continue
device.addr.add(net.version, ip_cidr, str(net.broadcast))
# clean up any old addresses
for ip_cidr, ip_version in previous.items():
if ip_cidr not in preserve_ips:
device.addr.delete(ip_version, ip_cidr)
self.delete_conntrack_state(root_helper=self.root_helper,
namespace=namespace,
ip=ip_cidr)
if gateway:
device.route.add_gateway(gateway)
new_onlink_routes = set(s['cidr'] for s in extra_subnets)
existing_onlink_routes = set(device.route.list_onlink_routes())
for route in new_onlink_routes - existing_onlink_routes:
device.route.add_onlink_route(route)
for route in existing_onlink_routes - new_onlink_routes:
device.route.delete_onlink_route(route)
def delete_conntrack_state(self, root_helper, namespace, ip):
"""Delete conntrack state associated with an IP address.
This terminates any active connections through an IP. Call this soon
after removing the IP address from an interface so that new connections
cannot be created before the IP address is gone.
root_helper: root_helper to gain root access to call conntrack
namespace: the name of the namespace where the IP has been configured
ip: the IP address for which state should be removed. This can be
passed as a string with or without /NN. A netaddr.IPAddress or
netaddr.Network representing the IP address can also be passed.
"""
ip_str = str(netaddr.IPNetwork(ip).ip)
ip_wrapper = ip_lib.IPWrapper(root_helper, namespace=namespace)
# Delete conntrack state for ingress traffic
# If 0 flow entries have been deleted
# conntrack -D will return 1
try:
ip_wrapper.netns.execute(["conntrack", "-D", "-d", ip_str],
check_exit_code=True,
extra_ok_codes=[1])
except RuntimeError:
LOG.exception(_LE("Failed deleting ingress connection state of"
" floatingip %s"), ip_str)
# Delete conntrack state for egress traffic
try:
ip_wrapper.netns.execute(["conntrack", "-D", "-q", ip_str],
check_exit_code=True,
extra_ok_codes=[1])
except RuntimeError:
LOG.exception(_LE("Failed deleting egress connection state of"
" floatingip %s"), ip_str)
def check_bridge_exists(self, bridge):
if not ip_lib.device_exists(bridge):
raise exceptions.BridgeDoesNotExist(bridge=bridge)
def get_device_name(self, port):
return (self.DEV_NAME_PREFIX + port.id)[:self.DEV_NAME_LEN]
@abc.abstractmethod
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plug in the interface."""
@abc.abstractmethod
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
class NullDriver(LinuxInterfaceDriver):
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
pass
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
pass
class OVSInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating an internal interface on an OVS bridge."""
DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
def __init__(self, conf):
super(OVSInterfaceDriver, self).__init__(conf)
if self.conf.ovs_use_veth:
self.DEV_NAME_PREFIX = 'ns-'
def _get_tap_name(self, dev_name, prefix=None):
if self.conf.ovs_use_veth:
dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX,
n_const.TAP_DEVICE_PREFIX)
return dev_name
def _ovs_add_port(self, bridge, device_name, port_id, mac_address,
internal=True):
attrs = [('external-ids:iface-id', port_id),
('external-ids:iface-status', 'active'),
('external-ids:attached-mac', mac_address)]
if internal:
attrs.insert(0, ('type', 'internal'))
ovs = ovs_lib.OVSBridge(bridge, self.root_helper)
ovs.replace_port(device_name, *attrs)
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plug in the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
if not ip_lib.device_exists(device_name,
self.root_helper,
namespace=namespace):
self.check_bridge_exists(bridge)
ip = ip_lib.IPWrapper(self.root_helper)
tap_name = self._get_tap_name(device_name, prefix)
if self.conf.ovs_use_veth:
# Create ns_dev in a namespace if one is configured.
root_dev, ns_dev = ip.add_veth(tap_name,
device_name,
namespace2=namespace)
else:
ns_dev = ip.device(device_name)
internal = not self.conf.ovs_use_veth
self._ovs_add_port(bridge, tap_name, port_id, mac_address,
internal=internal)
ns_dev.link.set_address(mac_address)
if self.conf.network_device_mtu:
ns_dev.link.set_mtu(self.conf.network_device_mtu)
if self.conf.ovs_use_veth:
root_dev.link.set_mtu(self.conf.network_device_mtu)
# Add an interface created by ovs to the namespace.
if not self.conf.ovs_use_veth and namespace:
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
ns_dev.link.set_up()
if self.conf.ovs_use_veth:
root_dev.link.set_up()
else:
LOG.info(_LI("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
tap_name = self._get_tap_name(device_name, prefix)
self.check_bridge_exists(bridge)
ovs = ovs_lib.OVSBridge(bridge, self.root_helper)
try:
ovs.delete_port(tap_name)
if self.conf.ovs_use_veth:
device = ip_lib.IPDevice(device_name,
self.root_helper,
namespace)
device.link.delete()
LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError:
LOG.error(_LE("Failed unplugging interface '%s'"),
device_name)
class MidonetInterfaceDriver(LinuxInterfaceDriver):
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""This method is called by the Dhcp agent or by the L3 agent
when a new network is created
"""
if not ip_lib.device_exists(device_name,
self.root_helper,
namespace=namespace):
ip = ip_lib.IPWrapper(self.root_helper)
tap_name = device_name.replace(prefix or n_const.TAP_DEVICE_PREFIX,
n_const.TAP_DEVICE_PREFIX)
# Create ns_dev in a namespace if one is configured.
root_dev, ns_dev = ip.add_veth(tap_name, device_name,
namespace2=namespace)
ns_dev.link.set_address(mac_address)
# Add an interface created by ovs to the namespace.
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
ns_dev.link.set_up()
root_dev.link.set_up()
cmd = ['mm-ctl', '--bind-port', port_id, device_name]
utils.execute(cmd, self.root_helper)
else:
LOG.info(_LI("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
# the port will be deleted by the dhcp agent that will call the plugin
device = ip_lib.IPDevice(device_name,
self.root_helper,
namespace)
try:
device.link.delete()
except RuntimeError:
LOG.error(_LE("Failed unplugging interface '%s'"), device_name)
LOG.debug("Unplugged interface '%s'", device_name)
ip_lib.IPWrapper(
self.root_helper, namespace).garbage_collect_namespace()
class IVSInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating an internal interface on an IVS bridge."""
DEV_NAME_PREFIX = n_const.TAP_DEVICE_PREFIX
def __init__(self, conf):
super(IVSInterfaceDriver, self).__init__(conf)
self.DEV_NAME_PREFIX = 'ns-'
def _get_tap_name(self, dev_name, prefix=None):
dev_name = dev_name.replace(prefix or self.DEV_NAME_PREFIX,
n_const.TAP_DEVICE_PREFIX)
return dev_name
def _ivs_add_port(self, device_name, port_id, mac_address):
cmd = ['ivs-ctl', 'add-port', device_name]
utils.execute(cmd, self.root_helper)
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plug in the interface."""
if not ip_lib.device_exists(device_name,
self.root_helper,
namespace=namespace):
ip = ip_lib.IPWrapper(self.root_helper)
tap_name = self._get_tap_name(device_name, prefix)
root_dev, ns_dev = ip.add_veth(tap_name, device_name)
self._ivs_add_port(tap_name, port_id, mac_address)
ns_dev = ip.device(device_name)
ns_dev.link.set_address(mac_address)
if self.conf.network_device_mtu:
ns_dev.link.set_mtu(self.conf.network_device_mtu)
root_dev.link.set_mtu(self.conf.network_device_mtu)
if namespace:
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
ns_dev.link.set_up()
root_dev.link.set_up()
else:
LOG.info(_LI("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
tap_name = self._get_tap_name(device_name, prefix)
try:
cmd = ['ivs-ctl', 'del-port', tap_name]
utils.execute(cmd, self.root_helper)
device = ip_lib.IPDevice(device_name,
self.root_helper,
namespace)
device.link.delete()
LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError:
LOG.error(_LE("Failed unplugging interface '%s'"),
device_name)
class BridgeInterfaceDriver(LinuxInterfaceDriver):
"""Driver for creating bridge interfaces."""
DEV_NAME_PREFIX = 'ns-'
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
"""Plugin the interface."""
if not ip_lib.device_exists(device_name,
self.root_helper,
namespace=namespace):
ip = ip_lib.IPWrapper(self.root_helper)
# Enable agent to define the prefix
tap_name = device_name.replace(prefix or self.DEV_NAME_PREFIX,
n_const.TAP_DEVICE_PREFIX)
# Create ns_veth in a namespace if one is configured.
root_veth, ns_veth = ip.add_veth(tap_name, device_name,
namespace2=namespace)
ns_veth.link.set_address(mac_address)
if self.conf.network_device_mtu:
root_veth.link.set_mtu(self.conf.network_device_mtu)
ns_veth.link.set_mtu(self.conf.network_device_mtu)
root_veth.link.set_up()
ns_veth.link.set_up()
else:
LOG.info(_LI("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface."""
device = ip_lib.IPDevice(device_name, self.root_helper, namespace)
try:
device.link.delete()
LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError:
LOG.error(_LE("Failed unplugging interface '%s'"),
device_name)
class MetaInterfaceDriver(LinuxInterfaceDriver):
def __init__(self, conf):
super(MetaInterfaceDriver, self).__init__(conf)
from neutronclient.v2_0 import client
self.neutron = client.Client(
username=self.conf.admin_user,
password=self.conf.admin_password,
tenant_name=self.conf.admin_tenant_name,
auth_url=self.conf.auth_url,
auth_strategy=self.conf.auth_strategy,
region_name=self.conf.auth_region,
endpoint_type=self.conf.endpoint_type
)
self.flavor_driver_map = {}
for net_flavor, driver_name in [
driver_set.split(':')
for driver_set in
self.conf.meta_flavor_driver_mappings.split(',')]:
self.flavor_driver_map[net_flavor] = self._load_driver(driver_name)
def _get_flavor_by_network_id(self, network_id):
network = self.neutron.show_network(network_id)
return network['network'][flavor.FLAVOR_NETWORK]
def _get_driver_by_network_id(self, network_id):
net_flavor = self._get_flavor_by_network_id(network_id)
return self.flavor_driver_map[net_flavor]
def _set_device_plugin_tag(self, network_id, device_name, namespace=None):
plugin_tag = self._get_flavor_by_network_id(network_id)
device = ip_lib.IPDevice(device_name, self.conf.root_helper, namespace)
device.link.set_alias(plugin_tag)
def _get_device_plugin_tag(self, device_name, namespace=None):
device = ip_lib.IPDevice(device_name, self.conf.root_helper, namespace)
return device.link.alias
def get_device_name(self, port):
driver = self._get_driver_by_network_id(port.network_id)
return driver.get_device_name(port)
def plug(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None):
driver = self._get_driver_by_network_id(network_id)
ret = driver.plug(network_id, port_id, device_name, mac_address,
bridge=bridge, namespace=namespace, prefix=prefix)
self._set_device_plugin_tag(network_id, device_name, namespace)
return ret
def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
plugin_tag = self._get_device_plugin_tag(device_name, namespace)
driver = self.flavor_driver_map[plugin_tag]
return driver.unplug(device_name, bridge, namespace, prefix)
def _load_driver(self, driver_provider):
LOG.debug("Driver location: %s", driver_provider)
plugin_klass = importutils.import_class(driver_provider)
return plugin_klass(self.conf)
|
chhao91/QGIS | refs/heads/master | python/ext-libs/pygments/styles/fruity.py | 364 | # -*- coding: utf-8 -*-
"""
pygments.styles.fruity
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "fruity" vim theme.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Token, Comment, Name, Keyword, \
Generic, Number, String, Whitespace
class FruityStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
background_color = '#111111'
highlight_color = '#333333'
styles = {
Whitespace: '#888888',
Token: '#ffffff',
Generic.Output: '#444444 bg:#222222',
Keyword: '#fb660a bold',
Keyword.Pseudo: 'nobold',
Number: '#0086f7 bold',
Name.Tag: '#fb660a bold',
Name.Variable: '#fb660a',
Comment: '#008800 bg:#0f140f italic',
Name.Attribute: '#ff0086 bold',
String: '#0086d2',
Name.Function: '#ff0086 bold',
Generic.Heading: '#ffffff bold',
Keyword.Type: '#cdcaa9 bold',
Generic.Subheading: '#ffffff bold',
Name.Constant: '#0086d2',
Comment.Preproc: '#ff0007 bold'
}
|
heuermh/cloudbiolinux | refs/heads/master | contrib/flavor/millstone/installer.py | 7 | from fabric.api import *
from fabric.contrib.files import *
import os.path
import os
INSTALLATION_PATH = "$HOME/millstone"
REPO_URL = "git@github.com:churchlab/millstone.git"
REPO_STABLE_COMMIT = "634db63de2fb275719868839bf44bd9b4b9f016e"
BOOTSTRAP_SCRIPT = """#!/bin/bash
set -x
export RUN_MASTER=%d
export RUN_WORKER=%d
export PROJECT_DIR="%s"
export MILLSTONE_PATH="%s"
export CELERY_MANAGER_PATH="%s"
export JBROWSE_PATH="%s"
export TEMP_FILE_PATH="%s"
export PASSWORD_FILE_PATH="%s"
export BOOTSTRAP_FINISH_PATH="%s"
export RUN_AS_USER="%s"
export EC2_HOSTNAME=$(curl http://169.254.169.254/latest/meta-data/public-hostname)
export MILLSTONE_WEB_PORT=8000
export CELERY_MANAGER_WEB_PORT=8001
genpw() {
python -c "import string,random;print ''.join(random.choice(string.letters + string.digits) for x in range(10))"
}
cpucount() {
grep -c ^processor /proc/cpuinfo
}
export NUM_CPU=$(cpucount)
export TIMEOUT=3600
if [ "$RUN_MASTER" == "1" ];
then
echo "Configuring master..."
export PASSWORD=$(genpw)
export RABBITMQ_USER="millstone"
export POSTGRES_DB="millstone"
export POSTGRES_USER="millstone"
echo ${PASSWORD} > ${PASSWORD_FILE_PATH}
echo "Using '${PASSWORD}' as password for PostgresSQL and RabbitMQ"
/etc/init.d/rabbitmq-server stop
cat > /etc/rabbitmq/rabbitmq-env.conf << EOF
NODE_IP_ADDRESS=0.0.0.0
NODE_PORT=5672
NODENAME="rabbit@localhost"
EOF
/etc/init.d/rabbitmq-server start
# Setup user in RabbitMQ and make it public.
rabbitmqctl change_password guest $(genpw)
RABBITMQ_USERS=$(sudo rabbitmqctl list_users -q)
if [[ "${RABBITMQ_USERS}" =~ "${RABBITMQ_USER}" ]]
then
echo "Deleting existing ${RABBITMQ_USER} user in RabbitMQ."
rabbitmqctl delete_user ${RABBITMQ_USER}
fi
rabbitmqctl add_user ${RABBITMQ_USER} ${PASSWORD}
rabbitmqctl set_permissions -p / ${RABBITMQ_USER} ".*" ".*" ".*"
/etc/init.d/rabbitmq-server restart
# Setup user and database in PostgresSQL
sudo -u postgres psql -U postgres -d postgres -c "DROP DATABASE IF EXISTS ${POSTGRES_DB};"
sudo -u postgres psql -U postgres -d postgres -c "DROP USER IF EXISTS ${POSTGRES_USER};"
sudo -u postgres psql -U postgres -d postgres -c "CREATE USER ${POSTGRES_USER} WITH PASSWORD '${PASSWORD}';"
sudo -u postgres psql -U postgres -d postgres -c "CREATE DATABASE ${POSTGRES_DB};"
sudo -u postgres psql -U postgres -d postgres -c "GRANT ALL PRIVILEGES ON DATABASE ${POSTGRES_DB} to ${POSTGRES_USER};"
sudo -u postgres psql -U postgres -d postgres -c "ALTER USER ${POSTGRES_USER} CREATEDB;"
POSTGRES_CONF=$(find /etc/postgresql -name "postgresql.conf" | head -1)
echo "listen_addresses='*'" >> ${POSTGRES_CONF}
PG_HDA_CONF=$(find /etc/postgresql -name "pg_hba.conf" | head -1)
echo "host all all 0.0.0.0/0 md5" >> ${PG_HDA_CONF}
/etc/init.d/postgresql restart
LOCAL_SETTINGS_PATH="${MILLSTONE_PATH}/conf/local_settings.py"
if [[ ! -e ${LOCAL_SETTINGS_PATH} ]]
then
echo "${LOCAL_SETTINGS_PATH} does not exist!"
fi
cat > ${LOCAL_SETTINGS_PATH} << EOF
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '${POSTGRES_DB}',
'USER': '${POSTGRES_USER}',
'PASSWORD': '${PASSWORD}',
'HOST': '127.0.0.1',
'PORT': '5432',
'OS_USER': 'postgres',
}
}
BROKER_URL = 'amqp://${RABBITMQ_USER}:${PASSWORD}@127.0.0.1:5672//'
EOF
chown ${RUN_AS_USER} ${LOCAL_SETTINGS_PATH}
echo "New local settings in ${LOCAL_SETTINGS_PATH}:"
cat ${LOCAL_SETTINGS_PATH}
fi
chown -R ${RUN_AS_USER} ${PROJECT_DIR}
echo "Reconfiguring nginx and supervisor..."
/etc/init.d/supervisor stop
/etc/init.d/nginx stop
rm -f /etc/nginx/sites-enabled/default
rm -f /etc/nginx/sites-available/millstone
rm -f /etc/nginx/sites-enabled/millstone
rm -f /etc/supervisor/supervisord.conf
cat > /etc/nginx/sites-available/millstone << EOF
server {
server_name localhost;
location /jbrowse {
alias ${JBROWSE_PATH};
}
location /tmp {
alias ${TEMP_FILE_PATH};
}
location /static {
alias ${MILLSTONE_PATH}/main/static;
}
location / {
proxy_pass http://127.0.0.1:${MILLSTONE_WEB_PORT}/;
}
location /worker/ {
proxy_pass http://127.0.0.1:${CELERY_MANAGER_WEB_PORT}/;
}
location /worker/static {
alias ${CELERY_MANAGER_PATH}/celery_manager/static;
}
# Override timeouts. Especially relevant to upload requests.
proxy_connect_timeout ${TIMEOUT};
proxy_read_timeout ${TIMEOUT};
# No limit on upload size.
client_max_body_size 0;
}
EOF
if [ "$RUN_WORKER" == "1" ];
then
echo "Configuring worker..."
fi
ln -s /etc/nginx/sites-available/millstone /etc/nginx/sites-enabled/millstone
/etc/init.d/nginx start
cat > "/etc/supervisor/supervisord.conf" << EOF
[unix_http_server]
file=/var/run//supervisor.sock ; (the path to the socket file)
chmod=0700 ; sockef file mode (default 0700)
[inet_http_server]
port=127.0.0.1:9001
[supervisord]
logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log)
pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
childlogdir=/var/log/supervisor ; ('AUTO' child log dir, default $TEMP)
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=http://127.0.0.1:9001
[program:millstone]
command=gunicorn_django -b 127.0.0.1:${MILLSTONE_WEB_PORT} --workers=$(expr ${NUM_CPU} + 1) --timeout=${TIMEOUT}
directory=${MILLSTONE_PATH}
autostart=${RUN_MASTER}
autorestart=True
redirect_stderr=True
user=${RUN_AS_USER}
[program:celery_manager]
command=gunicorn_django -b 127.0.0.1:${CELERY_MANAGER_WEB_PORT} --workers=$(expr ${NUM_CPU} + 1) --timeout=${TIMEOUT}
directory=${CELERY_MANAGER_PATH}
autostart=${RUN_WORKER}
autorestart=True
redirect_stderr=True
user=${RUN_AS_USER}
[program:celery]
command=python manage.py celery worker --loglevel=info
directory=${MILLSTONE_PATH}
autostart=${RUN_WORKER}
autorestart=True
redirect_stderr=True
user=${RUN_AS_USER}
EOF
/etc/init.d/supervisor start
update-rc.d nginx defaults
update-rc.d supervisor defaults
if [ "$RUN_MASTER" == "1" ];
then
pushd ${MILLSTONE_PATH}
sudo -u ${RUN_AS_USER} python manage.py syncdb --noinput
sudo -u ${RUN_AS_USER} python manage.py migrate
echo "y" | sudo -u ${RUN_AS_USER} python scripts/bootstrap_data.py
popd
fi
touch ${BOOTSTRAP_FINISH_PATH}
echo "Bootstrap finished!"
"""
BOOTSTRAP_INVOKER_SCRIPT = """#!/bin/bash
### BEGIN INIT INFO
# Provides: millstone_setup
# Required-Start: $all
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
### END INIT INFO
export PROJECT_DIR="%s"
export BOOTSTRAP_FINISH_PATH="%s"
export BOOTSTRAP_SCRIPT_PATH="%s"
# Carry out specific functions when asked to by the system
case "$1" in
start)
if [[ ! -e "${BOOTSTRAP_FINISH_PATH}" ]];
then
/bin/bash ${BOOTSTRAP_SCRIPT_PATH} 2>&1 > "${PROJECT_DIR}/bootstrap.log"
fi
;;
stop)
echo "/etc/init.d/millstone_setup stop"
;;
*)
echo "Usage: /etc/init.d/millstone_setup {start|stop}"
exit 1
;;
esac
exit 0
"""
def install_millstone(env):
current_user = env.safe_run("echo $USER").strip()
home_dir = env.safe_run("echo $HOME").strip()
installation_dir = env.safe_run("echo %s" % INSTALLATION_PATH).strip()
VM_MODE = {
'MASTER': os.getenv("MASTER") is not None,
'WORKER': os.getenv("WORKER") is not None,
}
env.logger.info("VM_MODE: %r" % VM_MODE)
if env.safe_exists(installation_dir):
env.logger.warning("%s already exists. Removing the directory. " % installation_dir)
with cd(installation_dir):
env.safe_sudo("rm -rf %s" % installation_dir)
env.safe_run("mkdir -p %s" % installation_dir)
env.logger.info("Installing Millstone to %s" % installation_dir)
env.logger.debug("Configure SSH to ignore host checking for Github...")
env.safe_run("mkdir -p %s" % os.path.join(home_dir, ".ssh"))
env.safe_run("chmod 700 ~/.ssh")
env.safe_run("chmod 600 ~/.ssh/*")
append("~/.ssh/config", "Host github.com\n\tStrictHostKeyChecking no\n")
with cd(installation_dir):
env.safe_run("git clone %s %s" % (REPO_URL, installation_dir))
env.safe_run("git reset --hard %s" % REPO_STABLE_COMMIT)
project_dir = installation_dir
jbrowse_dir = os.path.join(project_dir, "jbrowse")
genome_designer_dir = os.path.join(project_dir, "genome_designer")
temp_file_dir = os.path.join(genome_designer_dir, "temp_data/tmp")
celery_manager_dir = os.path.join(project_dir, "celery_manager")
config_dir = os.path.join(project_dir, "config")
assert env.safe_exists(project_dir)
assert env.safe_exists(jbrowse_dir)
assert env.safe_exists(genome_designer_dir)
assert env.safe_exists(celery_manager_dir)
assert env.safe_exists(config_dir)
with cd(project_dir):
# clone JBrowse
env.safe_run("git submodule update --init --recursive")
with cd(jbrowse_dir):
# Setup JBrowse.
env.safe_run("./setup.sh")
# Install millstone python requirements.
python_requirements_file = os.path.join(installation_dir,
'requirements', 'deploy.txt')
env.safe_sudo("pip install -r %s" % python_requirements_file)
with cd(genome_designer_dir):
env.safe_run("ln -s ../jbrowse jbrowse")
env.safe_run("./millstone_setup.py")
run_worker = 1 if VM_MODE['WORKER'] else 0
run_master = 1 if VM_MODE['MASTER'] else 0
bootstrap_script_path = os.path.join(project_dir, "bootstrap.sh")
bootstrap_finish_path = os.path.join(project_dir, "BOOTSTRAPPED")
bootstrap_script = BOOTSTRAP_SCRIPT % (run_master, run_worker,
project_dir, genome_designer_dir, celery_manager_dir, jbrowse_dir,
temp_file_dir, os.path.join(project_dir, "password.txt"),
bootstrap_finish_path, current_user)
append(bootstrap_script_path, bootstrap_script)
env.safe_run("chmod +x %s" % bootstrap_script_path)
"""
/etc/init.d/millstone_setup will check if bootstrap script has
run before, and execute bootstrap if not.
"""
append("/etc/init.d/millstone_setup", BOOTSTRAP_INVOKER_SCRIPT % (project_dir,
bootstrap_finish_path, bootstrap_script_path), use_sudo=True)
env.safe_sudo("sudo chmod +x /etc/init.d/millstone_setup")
env.safe_sudo("sudo update-rc.d millstone_setup defaults")
|
bixbydev/Bixby | refs/heads/master | google/gdata-2.0.18/tests/gdata_tests/contacts/profiles/live_client_test.py | 39 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import atom.core
import atom.data
import atom.http_core
import gdata.contacts.client
import gdata.data
import gdata.test_config as conf
import unittest
conf.options.register_option(conf.APPS_DOMAIN_OPTION)
conf.options.register_option(conf.TARGET_USERNAME_OPTION)
class ProfileTest(unittest.TestCase):
def setUp(self):
self.client = gdata.contacts.client.ContactsClient(domain='example.com')
if conf.options.get_value('runlive') == 'true':
self.client = gdata.contacts.client.ContactsClient(
domain=conf.options.get_value('appsdomain'))
if conf.options.get_value('ssl') == 'true':
self.client.ssl = True
conf.configure_client(self.client, 'ProfileTest',
self.client.auth_service, True)
self.client.username = conf.options.get_value('appsusername').split('@')[0]
def tearDown(self):
conf.close_client(self.client)
def test_profiles_feed(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_profiles_feed')
feed = self.client.get_profiles_feed()
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
def test_profiles_query(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_profiles_feed')
query = gdata.contacts.client.ProfilesQuery(max_results=1)
feed = self.client.get_profiles_feed(q=query)
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
self.assert_(len(feed.entry) == 1)
# Needs at least 2 profiles in the feed to test the start-key
# query param.
next = feed.GetNextLink()
feed = None
if next:
# Retrieve the start-key query param from the next link.
uri = atom.http_core.Uri.parse_uri(next.href)
if 'start-key' in uri.query:
query.start_key = uri.query['start-key']
feed = self.client.get_profiles_feed(q=query)
self.assert_(isinstance(feed, gdata.contacts.data.ProfilesFeed))
self.assert_(len(feed.entry) == 1)
self.assert_(feed.GetSelfLink().href == next.href)
# Compare with a feed retrieved with the next link.
next_feed = self.client.get_profiles_feed(uri=next.href)
self.assert_(len(next_feed.entry) == 1)
self.assert_(next_feed.entry[0].id.text == feed.entry[0].id.text)
def suite():
return conf.build_suite([ProfileTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
open-synergy/stock-logistics-warehouse | refs/heads/8.0 | stock_inventory_chatter/models/__init__.py | 2 | # -*- coding: utf-8 -*-
# Copyright 2017 Eficent Business and IT Consulting Services S.L.
# (http://www.eficent.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from . import stock
|
levilucio/SyVOLT | refs/heads/master | UMLRT2Kiltera_MM/Properties/from_thesis/HMM4_then2_ConnectedLHS.py | 1 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HMM4_then2_ConnectedLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HMM4_then2_ConnectedLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HMM4_then2_ConnectedLHS, self).__init__(name='HMM4_then2_ConnectedLHS', num_nodes=0, edges=[])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'MM4_then2')
# Set the node attributes
# Nodes that represent the edges of the property.
# Add the edges
self.add_edges([
])
# Add the attribute equations
self["equations"] = []
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
bitslabsyr/stack | refs/heads/master | config.py | 1 | import os
# MONGODB CONFIG
AUTH = True
USERNAME = 'LOCAL_DB_USERNAME'
PASSWORD = 'LOCAL_DB_PASSWORD'
# CENTRAL MONGODB SERVER
CT_SERVER = 'CENTRAL_DB_ADDRESS'
CT_DB_NAME = 'CENTRAL_DB_NAME'
CT_AUTH = True
CT_USERNAME = 'CENTRAL_DB_USERNAME'
CT_PASSWORD = 'CENTRAL_DB_USERNAME'
# Directory structure config vars
BASEDIR = os.path.abspath(os.path.dirname(__file__))
LOGDIR = BASEDIR + '/out'
DATADIR = BASEDIR + '/data'
# Flask config vars
DEBUG = False
SECRET_KEY = 'This key will be replaced with a secure key in production.'
CSRF_ENABLED = True
CSRF_SECRET_KEY = 'willbereplacedinproduction'
# STACKS config info
VERSION = '2.0'
DESC = 'STACKS - Social Media Tracker, Aggregator, and Collector Kit'
DEFAULT_ROLE = 0 # by default, users aren't admins
NETWORKS = ['twitter', 'facebook'] # networks that STACKS is set to work for
# Celery config info - queues & routes are added dynamically
CELERY_BROKER_URL = 'amqp://'
CELERY_RESULT_BACKEND = 'amqp'
CELERY_QUEUES = ()
CELERY_ROUTES = {}
# CELERY_REDIRECT_STDOUTS = False # We handle stdout/err/in logging ourselves, so don't want Celery taking over
|
mjem/carrie | refs/heads/master | setup.py | 1 | #!/usr/bin/env python
import os
if 'PYTHONDONTWRITEBYTECODE' in os.environ:
del os.environ['PYTHONDONTWRITEBYTECODE']
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
execfile('carrie/__init__.py')
setup(
name='carrie',
version=__version__,
author='Mike Elson',
author_email='mike.elson@gmail.com',
url='http://github.com/mjem/carrie',
description='Remote control of media players via web or Android phone',
license='GPL',
keywords="mplayer youtube iplayer android",
packages=['carrie', 'carrie.cmd'],
#scripts=['bin/carrie'],
entry_points={
'console_scripts': [
'carrie=carrie.cmd.carrie_:main',
'stop-screensaver=carrie.cmd.stop_screensaver_:main',
]},
# packages=find_packages(exclude="example_project"),
# zip_safe=False,
install_requires=[
'Flask>=0.6.1',
],
# dependency_links=[
# 'https://github.com/disqus/django-haystack/tarball/master#egg=django-haystack',
# ],
# tests_require=tests_require,
# extras_require={'test': tests_require},
# test_suite='sentry.runtests.runtests',
#include_package_data=True,
package_dir={'carrie': 'carrie'},
# Files go into MANIFEST.in to get them in the distribution archives,
# package_data to get them installed
package_data={'carrie': ['static/*.js',
'static/*.css',
'templates/*.html',
'static/jquery-ui/js/*.js',
'static/jquery-ui/css/smoothness/*.css',
'static/jquery-ui/css/smoothness/images/*.png']},
#data_files=[('carrie/static', ['index.js'])],
long_description=read('README.md'),
classifiers=[
'Framework :: Django',
'Topic :: Software Development',
'License :: OSI Approved :: GNU General Public License (GPL)',
"Intended Audience :: End Users/Desktop",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Multimedia :: Sound/Audio",
],
)
|
Lokesh-K-Haralakatta/iot-python | refs/heads/master | samples/gatewayExamples/gatewayPublishEvent.py | 1 | # *****************************************************************************
# Copyright (c) 2016 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Amit M Mangalvedkar - Initial Contribution
# *****************************************************************************
import time
import sys
import pprint
import uuid
try:
import ibmiotf.application
import ibmiotf.gateway
from ibmiotf.codecs import jsonCodec, jsonIotfCodec
except ImportError:
# This part is only required to run the sample from within the samples
# directory when the module itself is not installed.
#
# If you have the module installed, just use "import ibmiotf.application" & "import ibmiotf.device"
import os
import inspect
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../../src")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import ibmiotf.application
import ibmiotf.gateway
def myAppEventCallback(event):
print("Received live data from %s (%s) sent at %s: hello=%s x=%s" % (event.deviceId, event.deviceType, event.timestamp.strftime("%H:%M:%S"), data['hello'], data['x']))
def myOnPublishCallback():
print("Confirmed event %s received by IBM Watson IoT Platform\n" % x)
organization = "MASKED"
gatewayType = "MASKED"
gatewayId = "MASKED"
authMethod = "token"
authToken = "MASKED"
# Initialize the device client.
try:
gatewayOptions = {"org": organization, "type": gatewayType, "id": gatewayId, "auth-method": authMethod, "auth-token": authToken}
gatewayCli = ibmiotf.gateway.Client(gatewayOptions)
except Exception as e:
print("Caught exception connecting device: %s" % str(e))
sys.exit()
gatewayCli.connect()
for x in range (0,5):
sensorValues = {"timestamp": "2016-01-20", "moisture" : 0.90, "pressure" : 1, "altitude": 23, "temperature": 273}
timestamp = sensorValues["timestamp"]
moisture = sensorValues["moisture"]
pressure = sensorValues["pressure"]
altitude = sensorValues["altitude"]
temperature = sensorValues["temperature"]
myData = "{'g' : { 'timestamp': timestamp, 'moisture': moisture, 'pressure': pressure, 'altitude': altitude, 'temperature': temperature}}"
gatewayCli.setMessageEncoderModule('json', jsonCodec)
gatewaySuccess = gatewayCli.publishGatewayEvent("greeting", "json", myData, qos=1, on_publish=myOnPublishCallback )
deviceSuccess = gatewayCli.publishDeviceEvent("DEVICE TYPE OF AUTO REGISTERED DEVICE", "DEVICE ID OF AUTO REGSITERED DEVICE", "greeting", "json", myData, qos=1, on_publish=myOnPublishCallback )
if not gatewaySuccess:
print("Gateway not connected to IBM Watson IoT Platform while publishing from Gateway")
if not deviceSuccess:
print("Gateway not connected to IBM Watson IoT Platform while publishing from Gateway on behalf of a device")
time.sleep(1)
# Disconnect the device and application from the cloud
gatewayCli.disconnect()
|
gauravbose/digital-menu | refs/heads/master | digimenu2/django/contrib/gis/geos/base.py | 197 | from ctypes import c_void_p
from django.contrib.gis.geos.error import GEOSException
# Trying to import GDAL libraries, if available. Have to place in
# try/except since this package may be used outside GeoDjango.
try:
from django.contrib.gis import gdal
except ImportError:
# A 'dummy' gdal module.
class GDALInfo(object):
HAS_GDAL = False
gdal = GDALInfo()
# NumPy supported?
try:
import numpy
except ImportError:
numpy = False
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
|
kevinastone/sentry | refs/heads/master | src/sentry/templatetags/sentry_helpers.py | 11 | """
sentry.templatetags.sentry_helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# XXX: Import django-paging's template tags so we don't have to worry about
# INSTALLED_APPS
from __future__ import absolute_import
import os.path
import pytz
from collections import namedtuple
from datetime import timedelta
from paging.helpers import paginate as paginate_func
from pkg_resources import parse_version as Version
from urllib import quote
from django import template
from django.conf import settings
from django.template import RequestContext
from django.template.defaultfilters import stringfilter
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
import six
from six.moves import range
from sentry import options
from sentry.constants import EVENTS_PER_PAGE
from sentry.models import Organization
from sentry.web.helpers import group_is_public
from sentry.utils import to_unicode
from sentry.utils.avatar import get_gravatar_url
from sentry.utils.http import absolute_uri
from sentry.utils.javascript import to_json
from sentry.utils.safe import safe_execute
from sentry.utils.strings import truncatechars
from templatetag_sugar.register import tag
from templatetag_sugar.parser import Name, Variable, Constant, Optional
SentryVersion = namedtuple('SentryVersion', ['current', 'latest',
'update_available'])
register = template.Library()
truncatechars = register.filter(stringfilter(truncatechars))
truncatechars.is_safe = True
register.filter(to_json)
register.simple_tag(absolute_uri)
@register.filter
def pprint(value, break_after=10):
"""
break_after is used to define how often a <span> is
inserted (for soft wrapping).
"""
value = to_unicode(value)
return mark_safe(u'<span></span>'.join(
[escape(value[i:(i + break_after)]) for i in range(0, len(value), break_after)]
))
@register.filter
def is_url(value):
if not isinstance(value, six.string_types):
return False
if not value.startswith(('http://', 'https://')):
return False
if ' ' in value:
return False
return True
# seriously Django?
@register.filter
def subtract(value, amount):
return int(value) - int(amount)
@register.filter
def has_charts(group):
from sentry.utils.db import has_charts
if hasattr(group, '_state'):
db = group._state.db or 'default'
else:
db = 'default'
return has_charts(db)
@register.filter
def as_sorted(value):
return sorted(value)
@register.filter
def small_count(v):
if not v:
return 0
z = [
(1000000000, _('b')),
(1000000, _('m')),
(1000, _('k')),
]
v = int(v)
for x, y in z:
o, p = divmod(v, x)
if o:
if len(str(o)) > 2 or not p:
return '%d%s' % (o, y)
return '%.1f%s' % (v / float(x), y)
return v
@register.filter
def num_digits(value):
return len(str(value))
@register.filter
def to_str(data):
return str(data)
@register.filter
def is_none(value):
return value is None
@register.simple_tag(takes_context=True)
def get_sentry_version(context):
import sentry
current = sentry.VERSION
latest = options.get('sentry:latest_version') or current
update_available = Version(latest) > Version(current)
context['sentry_version'] = SentryVersion(
current, latest, update_available
)
return ''
@register.filter
def timesince(value, now=None):
from django.template.defaultfilters import timesince
if now is None:
now = timezone.now()
if not value:
return _('never')
if value < (now - timedelta(days=5)):
return value.date()
value = (' '.join(timesince(value, now).split(' ')[0:2])).strip(',')
if value == _('0 minutes'):
return _('just now')
if value == _('1 day'):
return _('yesterday')
return value + _(' ago')
@register.filter
def duration(value):
if not value:
return '0s'
# value is assumed to be in ms
value = value / 1000.0
hours, minutes, seconds = 0, 0, 0
if value > 3600:
hours = value / 3600
value = value % 3600
if value > 60:
minutes = value / 60
value = value % 60
seconds = value
output = []
if hours:
output.append('%dh' % hours)
if minutes:
output.append('%dm' % minutes)
if seconds > 1:
output.append('%0.2fs' % seconds)
elif seconds:
output.append('%dms' % (seconds * 1000))
return ''.join(output)
# XXX: this is taken from django-paging so that we may render
# a custom template, and not worry about INSTALLED_APPS
@tag(register, [Variable('queryset_or_list'),
Constant('from'), Variable('request'),
Optional([Constant('as'), Name('asvar')]),
Optional([Constant('per_page'), Variable('per_page')])])
def paginate(context, queryset_or_list, request, asvar=None, per_page=EVENTS_PER_PAGE):
"""{% paginate queryset_or_list from request as foo[ per_page 25] %}"""
result = paginate_func(request, queryset_or_list, per_page, endless=True)
context_instance = RequestContext(request)
paging = mark_safe(render_to_string('sentry/partial/_pager.html', result, context_instance))
result = dict(objects=result['paginator'].get('objects', []), paging=paging)
if asvar:
context[asvar] = result
return ''
return result
@tag(register, [Variable('queryset_or_list'),
Constant('from'), Variable('request'),
Optional([Constant('as'), Name('asvar')]),
Optional([Constant('per_page'), Variable('per_page')])])
def paginator(context, queryset_or_list, request, asvar=None, per_page=EVENTS_PER_PAGE):
"""{% paginator queryset_or_list from request as foo[ per_page 25] %}"""
result = paginate_func(request, queryset_or_list, per_page, endless=True)
if asvar:
context[asvar] = result
return ''
return result
@tag(register, [Constant('from'), Variable('request'),
Optional([Constant('without'), Name('withoutvar')]),
Optional([Constant('as'), Name('asvar')])])
def querystring(context, request, withoutvar, asvar=None):
params = request.GET.copy()
if withoutvar in params:
del params[withoutvar]
result = params.urlencode()
if asvar:
context[asvar] = result
return ''
return result
@register.inclusion_tag('sentry/partial/_form.html')
def render_form(form):
return {'form': form}
@register.filter
def as_bookmarks(group_list, user):
group_list = list(group_list)
if user.is_authenticated() and group_list:
project = group_list[0].project
bookmarks = set(project.bookmark_set.filter(
user=user,
group__in=group_list,
).values_list('group_id', flat=True))
else:
bookmarks = set()
for g in group_list:
yield g, g.pk in bookmarks
@register.filter
def is_bookmarked(group, user):
if user.is_authenticated():
return group.bookmark_set.filter(
user=user,
group=group,
).exists()
return False
@register.filter
def date(dt, arg=None):
from django.template.defaultfilters import date
if not timezone.is_aware(dt):
dt = dt.replace(tzinfo=timezone.utc)
return date(dt, arg)
@tag(register, [Constant('for'), Variable('user'),
Constant('from'), Variable('project'),
Constant('as'), Name('asvar')])
def get_project_dsn(context, user, project, asvar):
from sentry.models import ProjectKey
if not user.is_authenticated():
context[asvar] = None
return ''
try:
key = ProjectKey.objects.filter(project=project)[0]
except ProjectKey.DoesNotExist:
context[asvar] = None
else:
context[asvar] = key.get_dsn()
return ''
# Adapted from http://en.gravatar.com/site/implement/images/django/
# The "mm" default is for the grey, "mystery man" icon. See:
# http://en.gravatar.com/site/implement/images/
@tag(register, [Variable('email'),
Optional([Constant('size'), Variable('size')]),
Optional([Constant('default'), Variable('default')])])
def gravatar_url(context, email, size=None, default='mm'):
return get_gravatar_url(email, size, default)
@register.filter
def trim_schema(value):
return value.split('//', 1)[-1]
@register.filter
def with_metadata(group_list, request):
group_list = list(group_list)
if request.user.is_authenticated() and group_list:
project = group_list[0].project
bookmarks = set(project.bookmark_set.filter(
user=request.user,
group__in=group_list,
).values_list('group_id', flat=True))
else:
bookmarks = set()
# TODO(dcramer): this is obsolete and needs to pull from the tsdb backend
historical_data = {}
for g in group_list:
yield g, {
'is_bookmarked': g.pk in bookmarks,
'historical_data': ','.join(str(x[1]) for x in historical_data.get(g.id, [])),
}
@register.inclusion_tag('sentry/plugins/bases/tag/widget.html')
def render_tag_widget(group, tag):
cutoff = timezone.now() - timedelta(days=7)
return {
'title': tag['label'],
'tag_name': tag['key'],
'group': group,
}
@register.simple_tag
def percent(value, total):
if not (value and total):
return 0
return int(int(value) / float(total) * 100)
@register.filter
def titlize(value):
return value.replace('_', ' ').title()
@register.filter
def split(value, delim=''):
return value.split(delim)
@register.filter
def get_rendered_interfaces(event, request):
interface_list = []
is_public = group_is_public(event.group, request.user)
for interface in event.interfaces.itervalues():
html = safe_execute(interface.to_html, event, is_public=is_public)
if not html:
continue
interface_list.append((interface, mark_safe(html)))
return sorted(interface_list, key=lambda x: x[0].get_display_score(), reverse=True)
@register.inclusion_tag('sentry/partial/github_button.html')
def github_button(user, repo):
return {
'user': user,
'repo': repo,
}
@register.inclusion_tag('sentry/partial/data_values.html')
def render_values(value, threshold=5, collapse_to=3):
if isinstance(value, (list, tuple)):
value = list(enumerate(value))
is_list, is_dict = bool(value), True
else:
is_list, is_dict = False, isinstance(value, dict)
if is_dict:
value = sorted(value.iteritems())
context = {
'is_dict': is_dict,
'is_list': is_list,
'threshold': threshold,
'collapse_to': collapse_to,
}
if is_dict:
value_len = len(value)
over_threshold = value_len > threshold
if over_threshold:
context.update({
'over_threshold': over_threshold,
'hidden_values': value_len - collapse_to,
'value_before_expand': value[:collapse_to],
'value_after_expand': value[collapse_to:],
})
else:
context.update({
'over_threshold': over_threshold,
'hidden_values': 0,
'value_before_expand': value,
'value_after_expand': [],
})
else:
context['value'] = value
return context
@register.filter
def urlquote(value, safe=''):
return quote(value.encode('utf8'), safe)
@register.filter
def basename(value):
return os.path.basename(value)
@register.filter
def user_display_name(user):
return user.first_name or user.username
@register.simple_tag(takes_context=True)
def localized_datetime(context, dt, format='DATETIME_FORMAT'):
request = context['request']
timezone = getattr(request, 'timezone', None)
if not timezone:
timezone = pytz.timezone(settings.SENTRY_DEFAULT_TIME_ZONE)
dt = dt.astimezone(timezone)
return date(dt, format)
@register.filter
def list_organizations(user):
return Organization.objects.get_for_user(user)
@register.filter
def count_pending_access_requests(organization):
from sentry.models import OrganizationAccessRequest
return OrganizationAccessRequest.objects.filter(
team__organization=organization,
).count()
|
shubhdev/openedx | refs/heads/master | lms/djangoapps/verify_student/models.py | 20 | # -*- coding: utf-8 -*-
"""
Models for Student Identity Verification
This is where we put any models relating to establishing the real-life identity
of a student over a period of time. Right now, the only models are the abstract
`PhotoVerification`, and its one concrete implementation
`SoftwareSecurePhotoVerification`. The hope is to keep as much of the
photo verification process as generic as possible.
"""
import functools
import json
import logging
from datetime import datetime, timedelta
from email.utils import formatdate
import pytz
import requests
import uuid
from lazy import lazy
from opaque_keys.edx.keys import UsageKey
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext as _, ugettext_lazy
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from config_models.models import ConfigurationModel
from course_modes.models import CourseMode
from model_utils.models import StatusModel
from model_utils import Choices
from verify_student.ssencrypt import (
random_aes_key, encrypt_and_encode,
generate_signed_message, rsa_encrypt
)
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule_django.models import CourseKeyField
log = logging.getLogger(__name__)
def generateUUID(): # pylint: disable=invalid-name
""" Utility function; generates UUIDs """
return str(uuid.uuid4())
class VerificationException(Exception):
pass
def status_before_must_be(*valid_start_statuses):
"""
Helper decorator with arguments to make sure that an object with a `status`
attribute is in one of a list of acceptable status states before a method
is called. You could use it in a class definition like:
@status_before_must_be("submitted", "approved", "denied")
def refund_user(self, user_id):
# Do logic here...
If the object has a status that is not listed when the `refund_user` method
is invoked, it will throw a `VerificationException`. This is just to avoid
distracting boilerplate when looking at a Model that needs to go through a
workflow process.
"""
def decorator_func(func):
"""
Decorator function that gets returned
"""
@functools.wraps(func)
def with_status_check(obj, *args, **kwargs):
if obj.status not in valid_start_statuses:
exception_msg = (
u"Error calling {} {}: status is '{}', must be one of: {}"
).format(func, obj, obj.status, valid_start_statuses)
raise VerificationException(exception_msg)
return func(obj, *args, **kwargs)
return with_status_check
return decorator_func
class PhotoVerification(StatusModel):
"""
Each PhotoVerification represents a Student's attempt to establish
their identity by uploading a photo of themselves and a picture ID. An
attempt actually has a number of fields that need to be filled out at
different steps of the approval process. While it's useful as a Django Model
for the querying facilities, **you should only edit a `PhotoVerification`
object through the methods provided**. Initialize them with a user:
attempt = PhotoVerification(user=user)
We track this attempt through various states:
`created`
Initial creation and state we're in after uploading the images.
`ready`
The user has uploaded their images and checked that they can read the
images. There's a separate state here because it may be the case that we
don't actually submit this attempt for review until payment is made.
`submitted`
Submitted for review. The review may be done by a staff member or an
external service. The user cannot make changes once in this state.
`must_retry`
We submitted this, but there was an error on submission (i.e. we did not
get a 200 when we POSTed to Software Secure)
`approved`
An admin or an external service has confirmed that the user's photo and
photo ID match up, and that the photo ID's name matches the user's.
`denied`
The request has been denied. See `error_msg` for details on why. An
admin might later override this and change to `approved`, but the
student cannot re-open this attempt -- they have to create another
attempt and submit it instead.
Because this Model inherits from StatusModel, we can also do things like::
attempt.status == PhotoVerification.STATUS.created
attempt.status == "created"
pending_requests = PhotoVerification.submitted.all()
"""
######################## Fields Set During Creation ########################
# See class docstring for description of status states
STATUS = Choices('created', 'ready', 'submitted', 'must_retry', 'approved', 'denied')
user = models.ForeignKey(User, db_index=True)
# They can change their name later on, so we want to copy the value here so
# we always preserve what it was at the time they requested. We only copy
# this value during the mark_ready() step. Prior to that, you should be
# displaying the user's name from their user.profile.name.
name = models.CharField(blank=True, max_length=255)
# Where we place the uploaded image files (e.g. S3 URLs)
face_image_url = models.URLField(blank=True, max_length=255)
photo_id_image_url = models.URLField(blank=True, max_length=255)
# Randomly generated UUID so that external services can post back the
# results of checking a user's photo submission without use exposing actual
# user IDs or something too easily guessable.
receipt_id = models.CharField(
db_index=True,
default=lambda: generateUUID(),
max_length=255,
)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True, db_index=True)
# Indicates whether or not a user wants to see the verification status
# displayed on their dash. Right now, only relevant for allowing students
# to "dismiss" a failed midcourse reverification message
# TODO: This field is deprecated.
display = models.BooleanField(db_index=True, default=True)
######################## Fields Set When Submitting ########################
submitted_at = models.DateTimeField(null=True, db_index=True)
#################### Fields Set During Approval/Denial #####################
# If the review was done by an internal staff member, mark who it was.
reviewing_user = models.ForeignKey(
User,
db_index=True,
default=None,
null=True,
related_name="photo_verifications_reviewed"
)
# Mark the name of the service used to evaluate this attempt (e.g
# Software Secure).
reviewing_service = models.CharField(blank=True, max_length=255)
# If status is "denied", this should contain text explaining why.
error_msg = models.TextField(blank=True)
# Non-required field. External services can add any arbitrary codes as time
# goes on. We don't try to define an exhuastive list -- this is just
# capturing it so that we can later query for the common problems.
error_code = models.CharField(blank=True, max_length=50)
class Meta(object): # pylint: disable=missing-docstring
abstract = True
ordering = ['-created_at']
##### Methods listed in the order you'd typically call them
@classmethod
def _earliest_allowed_date(cls):
"""
Returns the earliest allowed date given the settings
"""
days_good_for = settings.VERIFY_STUDENT["DAYS_GOOD_FOR"]
return datetime.now(pytz.UTC) - timedelta(days=days_good_for)
@classmethod
def user_is_verified(cls, user, earliest_allowed_date=None):
"""
Return whether or not a user has satisfactorily proved their identity.
Depending on the policy, this can expire after some period of time, so
a user might have to renew periodically.
This will check for the user's *initial* verification.
"""
return cls.objects.filter(
user=user,
status="approved",
created_at__gte=(earliest_allowed_date
or cls._earliest_allowed_date())
).exists()
@classmethod
def verification_valid_or_pending(cls, user, earliest_allowed_date=None, queryset=None):
"""
Check whether the user has a complete verification attempt that is
or *might* be good. This means that it's approved, been submitted,
or would have been submitted but had an non-user error when it was
being submitted.
It's basically any situation in which the user has signed off on
the contents of the attempt, and we have not yet received a denial.
This will check for the user's *initial* verification.
Arguments:
user:
earliest_allowed_date: earliest allowed date given in the
settings
queryset: If a queryset is provided, that will be used instead
of hitting the database.
Returns:
queryset: queryset of 'PhotoVerification' sorted by 'created_at' in
descending order.
"""
valid_statuses = ['submitted', 'approved', 'must_retry']
if queryset is None:
queryset = cls.objects.filter(user=user)
return queryset.filter(
status__in=valid_statuses,
created_at__gte=(
earliest_allowed_date
or cls._earliest_allowed_date()
)
).order_by('-created_at')
@classmethod
def user_has_valid_or_pending(cls, user, earliest_allowed_date=None, queryset=None):
"""
Check whether the user has an active or pending verification attempt
Returns:
bool: True or False according to existence of valid verifications
"""
return cls.verification_valid_or_pending(user, earliest_allowed_date, queryset).exists()
@classmethod
def active_for_user(cls, user):
"""
Return the most recent PhotoVerification that is marked ready (i.e. the
user has said they're set, but we haven't submitted anything yet).
This checks for the original verification.
"""
# This should only be one at the most, but just in case we create more
# by mistake, we'll grab the most recently created one.
active_attempts = cls.objects.filter(user=user, status='ready').order_by('-created_at')
if active_attempts:
return active_attempts[0]
else:
return None
@classmethod
def user_status(cls, user):
"""
Returns the status of the user based on their past verification attempts
If no such verification exists, returns 'none'
If verification has expired, returns 'expired'
If the verification has been approved, returns 'approved'
If the verification process is still ongoing, returns 'pending'
If the verification has been denied and the user must resubmit photos, returns 'must_reverify'
This checks initial verifications
"""
status = 'none'
error_msg = ''
if cls.user_is_verified(user):
status = 'approved'
elif cls.user_has_valid_or_pending(user):
# user_has_valid_or_pending does include 'approved', but if we are
# here, we know that the attempt is still pending
status = 'pending'
else:
# we need to check the most recent attempt to see if we need to ask them to do
# a retry
try:
attempts = cls.objects.filter(user=user).order_by('-updated_at')
attempt = attempts[0]
except IndexError:
# we return 'none'
return ('none', error_msg)
if attempt.created_at < cls._earliest_allowed_date():
return (
'expired',
_("Your {platform_name} verification has expired.").format(platform_name=settings.PLATFORM_NAME)
)
# If someone is denied their original verification attempt, they can try to reverify.
if attempt.status == 'denied':
status = 'must_reverify'
if attempt.error_msg:
error_msg = attempt.parsed_error_msg()
return (status, error_msg)
@classmethod
def verification_for_datetime(cls, deadline, candidates):
"""Find a verification in a set that applied during a particular datetime.
A verification is considered "active" during a datetime if:
1) The verification was created before the datetime, and
2) The verification is set to expire after the datetime.
Note that verification status is *not* considered here,
just the start/expire dates.
If multiple verifications were active at the deadline,
returns the most recently created one.
Arguments:
deadline (datetime): The datetime at which the verification applied.
If `None`, then return the most recently created candidate.
candidates (list of `PhotoVerification`s): Potential verifications to search through.
Returns:
PhotoVerification: A photo verification that was active at the deadline.
If no verification was active, return None.
"""
if len(candidates) == 0:
return None
# If there's no deadline, then return the most recently created verification
if deadline is None:
return candidates[0]
# Otherwise, look for a verification that was in effect at the deadline,
# preferring recent verifications.
# If no such verification is found, implicitly return `None`
for verification in candidates:
if verification.active_at_datetime(deadline):
return verification
@property
def expiration_datetime(self):
"""Datetime that the verification will expire. """
days_good_for = settings.VERIFY_STUDENT["DAYS_GOOD_FOR"]
return self.created_at + timedelta(days=days_good_for)
def active_at_datetime(self, deadline):
"""Check whether the verification was active at a particular datetime.
Arguments:
deadline (datetime): The date at which the verification was active
(created before and expired after).
Returns:
bool
"""
return (
self.created_at < deadline and
self.expiration_datetime > deadline
)
def parsed_error_msg(self):
"""
Sometimes, the error message we've received needs to be parsed into
something more human readable
The default behavior is to return the current error message as is.
"""
return self.error_msg
@status_before_must_be("created")
def upload_face_image(self, img):
raise NotImplementedError
@status_before_must_be("created")
def upload_photo_id_image(self, img):
raise NotImplementedError
@status_before_must_be("created")
def mark_ready(self):
"""
Mark that the user data in this attempt is correct. In order to
succeed, the user must have uploaded the necessary images
(`face_image_url`, `photo_id_image_url`). This method will also copy
their name from their user profile. Prior to marking it ready, we read
this value directly from their profile, since they're free to change it.
This often happens because people put in less formal versions of their
name on signup, but realize they want something different to go on a
formal document.
Valid attempt statuses when calling this method:
`created`
Status after method completes: `ready`
Other fields that will be set by this method:
`name`
State Transitions:
`created` → `ready`
This is what happens when the user confirms to us that the pictures
they uploaded are good. Note that we don't actually do a submission
anywhere yet.
"""
# At any point prior to this, they can change their names via their
# student dashboard. But at this point, we lock the value into the
# attempt.
self.name = self.user.profile.name
self.status = "ready"
self.save()
@status_before_must_be("must_retry", "submitted", "approved", "denied")
def approve(self, user_id=None, service=""):
"""
Approve this attempt. `user_id`
Valid attempt statuses when calling this method:
`submitted`, `approved`, `denied`
Status after method completes: `approved`
Other fields that will be set by this method:
`reviewed_by_user_id`, `reviewed_by_service`, `error_msg`
State Transitions:
`submitted` → `approved`
This is the usual flow, whether initiated by a staff user or an
external validation service.
`approved` → `approved`
No-op. First one to approve it wins.
`denied` → `approved`
This might happen if a staff member wants to override a decision
made by an external service or another staff member (say, in
response to a support request). In this case, the previous values
of `reviewed_by_user_id` and `reviewed_by_service` will be changed
to whoever is doing the approving, and `error_msg` will be reset.
The only record that this record was ever denied would be in our
logs. This should be a relatively rare occurence.
"""
# If someone approves an outdated version of this, the first one wins
if self.status == "approved":
return
log.info(u"Verification for user '{user_id}' approved by '{reviewer}'.".format(
user_id=self.user, reviewer=user_id
))
self.error_msg = "" # reset, in case this attempt was denied before
self.error_code = "" # reset, in case this attempt was denied before
self.reviewing_user = user_id
self.reviewing_service = service
self.status = "approved"
self.save()
@status_before_must_be("must_retry", "submitted", "approved", "denied")
def deny(self,
error_msg,
error_code="",
reviewing_user=None,
reviewing_service=""):
"""
Deny this attempt.
Valid attempt statuses when calling this method:
`submitted`, `approved`, `denied`
Status after method completes: `denied`
Other fields that will be set by this method:
`reviewed_by_user_id`, `reviewed_by_service`, `error_msg`,
`error_code`
State Transitions:
`submitted` → `denied`
This is the usual flow, whether initiated by a staff user or an
external validation service.
`approved` → `denied`
This might happen if a staff member wants to override a decision
made by an external service or another staff member, or just correct
a mistake made during the approval process. In this case, the
previous values of `reviewed_by_user_id` and `reviewed_by_service`
will be changed to whoever is doing the denying. The only record
that this record was ever approved would be in our logs. This should
be a relatively rare occurence.
`denied` → `denied`
Update the error message and reviewing_user/reviewing_service. Just
lets you amend the error message in case there were additional
details to be made.
"""
log.info(u"Verification for user '{user_id}' denied by '{reviewer}'.".format(
user_id=self.user, reviewer=reviewing_user
))
self.error_msg = error_msg
self.error_code = error_code
self.reviewing_user = reviewing_user
self.reviewing_service = reviewing_service
self.status = "denied"
self.save()
@status_before_must_be("must_retry", "submitted", "approved", "denied")
def system_error(self,
error_msg,
error_code="",
reviewing_user=None,
reviewing_service=""):
"""
Mark that this attempt could not be completed because of a system error.
Status should be moved to `must_retry`. For example, if Software Secure
reported to us that they couldn't process our submission because they
couldn't decrypt the image we sent.
"""
if self.status in ["approved", "denied"]:
return # If we were already approved or denied, just leave it.
self.error_msg = error_msg
self.error_code = error_code
self.reviewing_user = reviewing_user
self.reviewing_service = reviewing_service
self.status = "must_retry"
self.save()
class SoftwareSecurePhotoVerification(PhotoVerification):
"""
Model to verify identity using a service provided by Software Secure. Much
of the logic is inherited from `PhotoVerification`, but this class
encrypts the photos.
Software Secure (http://www.softwaresecure.com/) is a remote proctoring
service that also does identity verification. A student uses their webcam
to upload two images: one of their face, one of a photo ID. Due to the
sensitive nature of the data, the following security precautions are taken:
1. The snapshot of their face is encrypted using AES-256 in CBC mode. All
face photos are encypted with the same key, and this key is known to
both Software Secure and edx-platform.
2. The snapshot of a user's photo ID is also encrypted using AES-256, but
the key is randomly generated using pycrypto's Random. Every verification
attempt has a new key. The AES key is then encrypted using a public key
provided by Software Secure. We store only the RSA-encryped AES key.
Since edx-platform does not have Software Secure's private RSA key, it
means that we can no longer even read photo ID.
3. The encrypted photos are base64 encoded and stored in an S3 bucket that
edx-platform does not have read access to.
Note: this model handles *inital* verifications (which you must perform
at the time you register for a verified cert).
"""
# This is a base64.urlsafe_encode(rsa_encrypt(photo_id_aes_key), ss_pub_key)
# So first we generate a random AES-256 key to encrypt our photo ID with.
# Then we RSA encrypt it with Software Secure's public key. Then we base64
# encode that. The result is saved here. Actual expected length is 344.
photo_id_key = models.TextField(max_length=1024)
IMAGE_LINK_DURATION = 5 * 60 * 60 * 24 # 5 days in seconds
@classmethod
def original_verification(cls, user):
"""
Returns the most current SoftwareSecurePhotoVerification object associated with the user.
"""
query = cls.objects.filter(user=user).order_by('-updated_at')
return query[0]
@classmethod
def get_initial_verification(cls, user):
"""Get initial verification for a user
Arguments:
user(User): user object
Return:
SoftwareSecurePhotoVerification (object)
"""
init_verification = cls.objects.filter(user=user, status__in=["submitted", "approved"])
return init_verification.latest('created_at') if init_verification.exists() else None
@status_before_must_be("created")
def upload_face_image(self, img_data):
"""
Upload an image of the user's face to S3. `img_data` should be a raw
bytestream of a PNG image. This method will take the data, encrypt it
using our FACE_IMAGE_AES_KEY, encode it with base64 and save it to S3.
Yes, encoding it to base64 adds compute and disk usage without much real
benefit, but that's what the other end of this API is expecting to get.
"""
# Skip this whole thing if we're running acceptance tests or if we're
# developing and aren't interested in working on student identity
# verification functionality. If you do want to work on it, you have to
# explicitly enable these in your private settings.
if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
return
aes_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["FACE_IMAGE_AES_KEY"]
aes_key = aes_key_str.decode("hex")
s3_key = self._generate_s3_key("face")
s3_key.set_contents_from_string(encrypt_and_encode(img_data, aes_key))
@status_before_must_be("created")
def fetch_photo_id_image(self):
"""
Find the user's photo ID image, which was submitted with their original verification.
The image has already been encrypted and stored in s3, so we just need to find that
location
"""
if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
return
self.photo_id_key = self.original_verification(self.user).photo_id_key
self.save()
@status_before_must_be("created")
def upload_photo_id_image(self, img_data):
"""
Upload an the user's photo ID image to S3. `img_data` should be a raw
bytestream of a PNG image. This method will take the data, encrypt it
using a randomly generated AES key, encode it with base64 and save it to
S3. The random key is also encrypted using Software Secure's public RSA
key and stored in our `photo_id_key` field.
Yes, encoding it to base64 adds compute and disk usage without much real
benefit, but that's what the other end of this API is expecting to get.
"""
# Skip this whole thing if we're running acceptance tests or if we're
# developing and aren't interested in working on student identity
# verification functionality. If you do want to work on it, you have to
# explicitly enable these in your private settings.
if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
return
aes_key = random_aes_key()
rsa_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["RSA_PUBLIC_KEY"]
rsa_encrypted_aes_key = rsa_encrypt(aes_key, rsa_key_str)
# Upload this to S3
s3_key = self._generate_s3_key("photo_id")
s3_key.set_contents_from_string(encrypt_and_encode(img_data, aes_key))
# Update our record fields
self.photo_id_key = rsa_encrypted_aes_key.encode('base64')
self.save()
@status_before_must_be("must_retry", "ready", "submitted")
def submit(self):
"""
Submit our verification attempt to Software Secure for validation. This
will set our status to "submitted" if the post is successful, and
"must_retry" if the post fails.
"""
try:
response = self.send_request()
if response.ok:
self.submitted_at = datetime.now(pytz.UTC)
self.status = "submitted"
self.save()
else:
self.status = "must_retry"
self.error_msg = response.text
self.save()
except Exception as error:
log.exception(error)
self.status = "must_retry"
self.save()
def parsed_error_msg(self):
"""
Parse the error messages we receive from SoftwareSecure
Error messages are written in the form:
`[{"photoIdReasons": ["Not provided"]}]`
Returns a list of error messages
"""
# Translates the category names and messages into something more human readable
message_dict = {
("photoIdReasons", "Not provided"): _("No photo ID was provided."),
("photoIdReasons", "Text not clear"): _("We couldn't read your name from your photo ID image."),
("generalReasons", "Name mismatch"): _("The name associated with your account and the name on your ID do not match."),
("userPhotoReasons", "Image not clear"): _("The image of your face was not clear."),
("userPhotoReasons", "Face out of view"): _("Your face was not visible in your self-photo."),
}
try:
msg_json = json.loads(self.error_msg)
msg_dict = msg_json[0]
msg = []
for category in msg_dict:
# find the messages associated with this category
category_msgs = msg_dict[category]
for category_msg in category_msgs:
msg.append(message_dict[(category, category_msg)])
return u", ".join(msg)
except (ValueError, KeyError):
# if we can't parse the message as JSON or the category doesn't
# match one of our known categories, show a generic error
log.error('PhotoVerification: Error parsing this error message: %s', self.error_msg)
return _("There was an error verifying your ID photos.")
def image_url(self, name):
"""
We dynamically generate this, since we want it the expiration clock to
start when the message is created, not when the record is created.
"""
s3_key = self._generate_s3_key(name)
return s3_key.generate_url(self.IMAGE_LINK_DURATION)
def _generate_s3_key(self, prefix):
"""
Generates a key for an s3 bucket location
Example: face/4dd1add9-6719-42f7-bea0-115c008c4fca
"""
conn = S3Connection(
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["AWS_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["AWS_SECRET_KEY"]
)
bucket = conn.get_bucket(settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["S3_BUCKET"])
key = Key(bucket)
key.key = "{}/{}".format(prefix, self.receipt_id)
return key
def _encrypted_user_photo_key_str(self):
"""
Software Secure needs to have both UserPhoto and PhotoID decrypted in
the same manner. So even though this is going to be the same for every
request, we're also using RSA encryption to encrypt the AES key for
faces.
"""
face_aes_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["FACE_IMAGE_AES_KEY"]
face_aes_key = face_aes_key_str.decode("hex")
rsa_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["RSA_PUBLIC_KEY"]
rsa_encrypted_face_aes_key = rsa_encrypt(face_aes_key, rsa_key_str)
return rsa_encrypted_face_aes_key.encode("base64")
def create_request(self):
"""return headers, body_dict"""
access_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]
secret_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
scheme = "https" if settings.HTTPS == "on" else "http"
callback_url = "{}://{}{}".format(
scheme, settings.SITE_NAME, reverse('verify_student_results_callback')
)
body = {
"EdX-ID": str(self.receipt_id),
"ExpectedName": self.name,
"PhotoID": self.image_url("photo_id"),
"PhotoIDKey": self.photo_id_key,
"SendResponseTo": callback_url,
"UserPhoto": self.image_url("face"),
"UserPhotoKey": self._encrypted_user_photo_key_str(),
}
headers = {
"Content-Type": "application/json",
"Date": formatdate(timeval=None, localtime=False, usegmt=True)
}
_message, _sig, authorization = generate_signed_message(
"POST", headers, body, access_key, secret_key
)
headers['Authorization'] = authorization
return headers, body
def request_message_txt(self):
"""
This is the body of the request we send across. This is never actually
used in the code, but exists for debugging purposes -- you can call
`print attempt.request_message_txt()` on the console and get a readable
rendering of the request that would be sent across, without actually
sending anything.
"""
headers, body = self.create_request()
header_txt = "\n".join(
"{}: {}".format(h, v) for h, v in sorted(headers.items())
)
body_txt = json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8')
return header_txt + "\n\n" + body_txt
def send_request(self):
"""
Assembles a submission to Software Secure and sends it via HTTPS.
Returns a request.Response() object with the reply we get from SS.
"""
# If AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING is True, we want to
# skip posting anything to Software Secure. We actually don't even
# create the message because that would require encryption and message
# signing that rely on settings.VERIFY_STUDENT values that aren't set
# in dev. So we just pretend like we successfully posted
if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
fake_response = requests.Response()
fake_response.status_code = 200
return fake_response
headers, body = self.create_request()
response = requests.post(
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_URL"],
headers=headers,
data=json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8'),
verify=False
)
log.debug("Sent request to Software Secure for {}".format(self.receipt_id))
log.debug("Headers:\n{}\n\n".format(headers))
log.debug("Body:\n{}\n\n".format(body))
log.debug("Return code: {}".format(response.status_code))
log.debug("Return message:\n\n{}\n\n".format(response.text))
return response
@classmethod
def submit_faceimage(cls, user, face_image, photo_id_key):
"""Submit the face image to SoftwareSecurePhotoVerification.
Arguments:
user(User): user object
face_image (bytestream): raw bytestream image data
photo_id_key (str) : SoftwareSecurePhotoVerification attribute
Returns:
SoftwareSecurePhotoVerification Object
"""
b64_face_image = face_image.split(",")[1]
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.upload_face_image(b64_face_image.decode('base64'))
attempt.photo_id_key = photo_id_key
attempt.mark_ready()
attempt.save()
attempt.submit()
return attempt
@classmethod
def verification_status_for_user(cls, user, course_id, user_enrollment_mode):
"""
Returns the verification status for use in grade report.
"""
if user_enrollment_mode not in CourseMode.VERIFIED_MODES:
return 'N/A'
user_is_verified = cls.user_is_verified(user)
if not user_is_verified:
return 'Not ID Verified'
else:
return 'ID Verified'
class VerificationCheckpoint(models.Model):
"""Represents a point at which a user is asked to re-verify his/her
identity.
Each checkpoint is uniquely identified by a
(course_id, checkpoint_location) tuple.
"""
course_id = CourseKeyField(max_length=255, db_index=True)
checkpoint_location = models.CharField(max_length=255)
photo_verification = models.ManyToManyField(SoftwareSecurePhotoVerification)
class Meta: # pylint: disable=missing-docstring, old-style-class
unique_together = ('course_id', 'checkpoint_location')
def __unicode__(self):
"""
Unicode representation of the checkpoint.
"""
return u"{checkpoint} in {course}".format(
checkpoint=self.checkpoint_name,
course=self.course_id
)
@lazy
def checkpoint_name(self):
"""Lazy method for getting checkpoint name of reverification block.
Return location of the checkpoint if no related assessment found in
database.
"""
checkpoint_key = UsageKey.from_string(self.checkpoint_location)
try:
checkpoint_name = modulestore().get_item(checkpoint_key).related_assessment
except ItemNotFoundError:
log.warning(
u"Verification checkpoint block with location '%s' and course id '%s' "
u"not found in database.", self.checkpoint_location, unicode(self.course_id)
)
checkpoint_name = self.checkpoint_location
return checkpoint_name
def add_verification_attempt(self, verification_attempt):
"""Add the verification attempt in M2M relation of photo_verification.
Arguments:
verification_attempt(object): SoftwareSecurePhotoVerification object
Returns:
None
"""
self.photo_verification.add(verification_attempt) # pylint: disable=no-member
def get_user_latest_status(self, user_id):
"""Get the status of the latest checkpoint attempt of the given user.
Args:
user_id(str): Id of user
Returns:
VerificationStatus object if found any else None
"""
try:
return self.checkpoint_status.filter(user_id=user_id).latest() # pylint: disable=no-member
except ObjectDoesNotExist:
return None
@classmethod
def get_verification_checkpoint(cls, course_id, checkpoint_location):
"""Get the verification checkpoint for given 'course_id' and
checkpoint name.
Arguments:
course_id(CourseKey): CourseKey
checkpoint_location(str): Verification checkpoint location
Returns:
VerificationCheckpoint object if exists otherwise None
"""
try:
return cls.objects.get(course_id=course_id, checkpoint_location=checkpoint_location)
except cls.DoesNotExist:
return None
class VerificationStatus(models.Model):
"""This model is an append-only table that represents user status changes
during the verification process.
A verification status represents a user’s progress through the verification
process for a particular checkpoint.
"""
VERIFICATION_STATUS_CHOICES = (
("submitted", "submitted"),
("approved", "approved"),
("denied", "denied"),
("error", "error")
)
checkpoint = models.ForeignKey(VerificationCheckpoint, related_name="checkpoint_status")
user = models.ForeignKey(User)
status = models.CharField(choices=VERIFICATION_STATUS_CHOICES, db_index=True, max_length=32)
timestamp = models.DateTimeField(auto_now_add=True)
response = models.TextField(null=True, blank=True)
error = models.TextField(null=True, blank=True)
class Meta(object): # pylint: disable=missing-docstring
get_latest_by = "timestamp"
verbose_name = "Verification Status"
verbose_name_plural = "Verification Statuses"
@classmethod
def add_verification_status(cls, checkpoint, user, status):
"""Create new verification status object.
Arguments:
checkpoint(VerificationCheckpoint): VerificationCheckpoint object
user(User): user object
status(str): Status from VERIFICATION_STATUS_CHOICES
Returns:
None
"""
cls.objects.create(checkpoint=checkpoint, user=user, status=status)
@classmethod
def add_status_from_checkpoints(cls, checkpoints, user, status):
"""Create new verification status objects for a user against the given
checkpoints.
Arguments:
checkpoints(list): list of VerificationCheckpoint objects
user(User): user object
status(str): Status from VERIFICATION_STATUS_CHOICES
Returns:
None
"""
for checkpoint in checkpoints:
cls.objects.create(checkpoint=checkpoint, user=user, status=status)
@classmethod
def get_user_attempts(cls, user_id, course_key, related_assessment_location):
"""
Get re-verification attempts against a user for a given 'checkpoint'
and 'course_id'.
Arguments:
user_id(str): User Id string
course_key(str): A CourseKey of a course
related_assessment_location(str): Verification checkpoint location
Returns:
Count of re-verification attempts
"""
return cls.objects.filter(
user_id=user_id,
checkpoint__course_id=course_key,
checkpoint__checkpoint_location=related_assessment_location,
status="submitted"
).count()
@classmethod
def get_location_id(cls, photo_verification):
"""Get the location ID of reverification XBlock.
Args:
photo_verification(object): SoftwareSecurePhotoVerification object
Return:
Location Id of XBlock if any else empty string
"""
try:
verification_status = cls.objects.filter(checkpoint__photo_verification=photo_verification).latest()
return verification_status.checkpoint.checkpoint_location
except cls.DoesNotExist:
return ""
class InCourseReverificationConfiguration(ConfigurationModel):
"""Configure in-course re-verification.
Enable or disable in-course re-verification feature.
When this flag is disabled, the "in-course re-verification" feature
will be disabled.
When the flag is enabled, the "in-course re-verification" feature
will be enabled.
"""
pass
class SkippedReverification(models.Model):
"""Model for tracking skipped Reverification of a user against a specific
course.
If a user skipped a Reverification checkpoint for a specific course then in
future that user cannot see the reverification link.
"""
user = models.ForeignKey(User)
course_id = CourseKeyField(max_length=255, db_index=True)
checkpoint = models.ForeignKey(VerificationCheckpoint, related_name="skipped_checkpoint")
created_at = models.DateTimeField(auto_now_add=True)
class Meta: # pylint: disable=missing-docstring, old-style-class
unique_together = (('user', 'course_id'),)
@classmethod
def add_skipped_reverification_attempt(cls, checkpoint, user_id, course_id):
"""Create skipped reverification object.
Arguments:
checkpoint(VerificationCheckpoint): VerificationCheckpoint object
user_id(str): User Id of currently logged in user
course_id(CourseKey): CourseKey
Returns:
None
"""
cls.objects.create(checkpoint=checkpoint, user_id=user_id, course_id=course_id)
@classmethod
def check_user_skipped_reverification_exists(cls, user, course_id):
"""Check existence of a user's skipped re-verification attempt for a
specific course.
Arguments:
user(User): user object
course_id(CourseKey): CourseKey
Returns:
Boolean
"""
return cls.objects.filter(user=user, course_id=course_id).exists()
|
3dfxsoftware/cbss-addons | refs/heads/master | l10n_in_hr_payroll/wizard/hr_yearly_salary_detail.py | 374 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class yearly_salary_detail(osv.osv_memory):
_name ='yearly.salary.detail'
_description = 'Hr Salary Employee By Category Report'
_columns = {
'employee_ids': fields.many2many('hr.employee', 'payroll_emp_rel', 'payroll_id', 'employee_id', 'Employees', required=True),
'date_from': fields.date('Start Date', required=True),
'date_to': fields.date('End Date', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, context=context)
res = res and res[0] or {}
datas.update({'form':res})
return self.pool['report'].get_action(cr, uid, ids, 'l10n_in_hr_payroll.report_hryearlysalary', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Reagankm/KnockKnock | refs/heads/master | venv/lib/python3.4/site-packages/matplotlib/sphinxext/plot_directive.py | 11 | """
A directive for including a matplotlib plot in a Sphinx document.
By default, in HTML output, `plot` will include a .png file with a
link to a high-res .png and .pdf. In LaTeX output, it will include a
.pdf.
The source code for the plot may be included in one of three ways:
1. **A path to a source file** as the argument to the directive::
.. plot:: path/to/plot.py
When a path to a source file is given, the content of the
directive may optionally contain a caption for the plot::
.. plot:: path/to/plot.py
This is the caption for the plot
Additionally, one my specify the name of a function to call (with
no arguments) immediately after importing the module::
.. plot:: path/to/plot.py plot_function1
2. Included as **inline content** to the directive::
.. plot::
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img = mpimg.imread('_static/stinkbug.png')
imgplot = plt.imshow(img)
3. Using **doctest** syntax::
.. plot::
A plotting example:
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3], [4,5,6])
Options
-------
The ``plot`` directive supports the following options:
format : {'python', 'doctest'}
Specify the format of the input
include-source : bool
Whether to display the source code. The default can be changed
using the `plot_include_source` variable in conf.py
encoding : str
If this source file is in a non-UTF8 or non-ASCII encoding,
the encoding must be specified using the `:encoding:` option.
The encoding will not be inferred using the ``-*- coding -*-``
metacomment.
context : bool or str
If provided, the code will be run in the context of all
previous plot directives for which the `:context:` option was
specified. This only applies to inline code plot directives,
not those run from files. If the ``:context: reset`` is specified,
the context is reset for this and future plots.
nofigs : bool
If specified, the code block will be run, but no figures will
be inserted. This is usually useful with the ``:context:``
option.
Additionally, this directive supports all of the options of the
`image` directive, except for `target` (since plot will add its own
target). These include `alt`, `height`, `width`, `scale`, `align` and
`class`.
Configuration options
---------------------
The plot directive has the following configuration options:
plot_include_source
Default value for the include-source option
plot_html_show_source_link
Whether to show a link to the source in HTML.
plot_pre_code
Code that should be executed before each plot.
plot_basedir
Base directory, to which ``plot::`` file names are relative
to. (If None or empty, file names are relative to the
directory where the file containing the directive is.)
plot_formats
File formats to generate. List of tuples or strings::
[(suffix, dpi), suffix, ...]
that determine the file format and the DPI. For entries whose
DPI was omitted, sensible defaults are chosen.
plot_html_show_formats
Whether to show links to the files in HTML.
plot_rcparams
A dictionary containing any non-standard rcParams that should
be applied before each plot.
plot_apply_rcparams
By default, rcParams are applied when `context` option is not used in
a plot directive. This configuration option overrides this behavior
and applies rcParams before each plot.
plot_working_directory
By default, the working directory will be changed to the directory of
the example, so the code can get at its data files, if any. Also its
path will be added to `sys.path` so it can import any helper modules
sitting beside it. This configuration option can be used to specify
a central directory (also added to `sys.path`) where data files and
helper modules for all code are located.
plot_template
Provide a customized template for preparing restructured text.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import sys, os, shutil, io, re, textwrap
from os.path import relpath
import traceback
if not six.PY3:
import cStringIO
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.images import Image
align = Image.align
import sphinx
sphinx_version = sphinx.__version__.split(".")
# The split is necessary for sphinx beta versions where the string is
# '6b1'
sphinx_version = tuple([int(re.split('[^0-9]', x)[0])
for x in sphinx_version[:2]])
try:
# Sphinx depends on either Jinja or Jinja2
import jinja2
def format_template(template, **kw):
return jinja2.Template(template).render(**kw)
except ImportError:
import jinja
def format_template(template, **kw):
return jinja.from_string(template, **kw)
import matplotlib
import matplotlib.cbook as cbook
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import _pylab_helpers
__version__ = 2
#------------------------------------------------------------------------------
# Registration hook
#------------------------------------------------------------------------------
def plot_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(arguments, content, options, state_machine, state, lineno)
plot_directive.__doc__ = __doc__
def _option_boolean(arg):
if not arg or not arg.strip():
# no argument given, assume used as a flag
return True
elif arg.strip().lower() in ('no', '0', 'false'):
return False
elif arg.strip().lower() in ('yes', '1', 'true'):
return True
else:
raise ValueError('"%s" unknown boolean' % arg)
def _option_context(arg):
if arg in [None, 'reset']:
return arg
else:
raise ValueError("argument should be None or 'reset'")
return directives.choice(arg, ('None', 'reset'))
def _option_format(arg):
return directives.choice(arg, ('python', 'doctest'))
def _option_align(arg):
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
"right"))
def mark_plot_labels(app, document):
"""
To make plots referenceable, we need to move the reference from
the "htmlonly" (or "latexonly") node to the actual figure node
itself.
"""
for name, explicit in six.iteritems(document.nametypes):
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
if node.tagname in ('html_only', 'latex_only'):
for n in node:
if n.tagname == 'figure':
sectname = name
for c in n:
if c.tagname == 'caption':
sectname = c.astext()
break
node['ids'].remove(labelid)
node['names'].remove(name)
n['ids'].append(labelid)
n['names'].append(name)
document.settings.env.labels[name] = \
document.settings.env.docname, labelid, sectname
break
def setup(app):
setup.app = app
setup.config = app.config
setup.confdir = app.confdir
options = {'alt': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.nonnegative_int,
'align': _option_align,
'class': directives.class_option,
'include-source': _option_boolean,
'format': _option_format,
'context': _option_context,
'nofigs': directives.flag,
'encoding': directives.encoding
}
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
app.add_config_value('plot_pre_code', None, True)
app.add_config_value('plot_include_source', False, True)
app.add_config_value('plot_html_show_source_link', True, True)
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
app.add_config_value('plot_basedir', None, True)
app.add_config_value('plot_html_show_formats', True, True)
app.add_config_value('plot_rcparams', {}, True)
app.add_config_value('plot_apply_rcparams', False, True)
app.add_config_value('plot_working_directory', None, True)
app.add_config_value('plot_template', None, True)
app.connect(str('doctree-read'), mark_plot_labels)
#------------------------------------------------------------------------------
# Doctest handling
#------------------------------------------------------------------------------
def contains_doctest(text):
try:
# check if it's valid Python as-is
compile(text, '<string>', 'exec')
return False
except SyntaxError:
pass
r = re.compile(r'^\s*>>>', re.M)
m = r.search(text)
return bool(m)
def unescape_doctest(text):
"""
Extract code from a piece of text, which contains either Python code
or doctests.
"""
if not contains_doctest(text):
return text
code = ""
for line in text.split("\n"):
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
if m:
code += m.group(2) + "\n"
elif line.strip():
code += "# " + line.strip() + "\n"
else:
code += "\n"
return code
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts
def remove_coding(text):
"""
Remove the coding comment, which six.exec_ doesn't like.
"""
return re.sub(
"^#\s*-\*-\s*coding:\s*.*-\*-$", "", text, flags=re.MULTILINE)
#------------------------------------------------------------------------------
# Template
#------------------------------------------------------------------------------
TEMPLATE = """
{{ source_code }}
{{ only_html }}
{% if source_link or (html_show_formats and not multi_image) %}
(
{%- if source_link -%}
`Source code <{{ source_link }}>`__
{%- endif -%}
{%- if html_show_formats and not multi_image -%}
{%- for img in images -%}
{%- for fmt in img.formats -%}
{%- if source_link or not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
{%- endfor -%}
{%- endif -%}
)
{% endif %}
{% for img in images %}
.. figure:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% if html_show_formats and multi_image -%}
(
{%- for fmt in img.formats -%}
{%- if not loop.first -%}, {% endif -%}
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
{%- endfor -%}
)
{%- endif -%}
{{ caption }}
{% endfor %}
{{ only_latex }}
{% for img in images %}
{% if 'pdf' in img.formats -%}
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
{% endif -%}
{% endfor %}
{{ only_texinfo }}
{% for img in images %}
.. image:: {{ build_dir }}/{{ img.basename }}.png
{% for option in options -%}
{{ option }}
{% endfor %}
{% endfor %}
"""
exception_template = """
.. htmlonly::
[`source code <%(linkdir)s/%(basename)s.py>`__]
Exception occurred rendering plot.
"""
# the context of the plot for all directives specified with the
# :context: option
plot_context = dict()
class ImageFile(object):
def __init__(self, basename, dirname):
self.basename = basename
self.dirname = dirname
self.formats = []
def filename(self, format):
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
def filenames(self):
return [self.filename(fmt) for fmt in self.formats]
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
"""
return (not os.path.exists(derived) or
(os.path.exists(original) and
os.stat(derived).st_mtime < os.stat(original).st_mtime))
class PlotError(RuntimeError):
pass
def run_code(code, code_path, ns=None, function_name=None):
"""
Import a Python module from a path, and run the function given by
name, if function_name is not None.
"""
# Change the working directory to the directory of the example, so
# it can get at its data files, if any. Add its path to sys.path
# so it can import any helper modules sitting beside it.
if six.PY2:
pwd = os.getcwdu()
else:
pwd = os.getcwd()
old_sys_path = list(sys.path)
if setup.config.plot_working_directory is not None:
try:
os.chdir(setup.config.plot_working_directory)
except OSError as err:
raise OSError(str(err) + '\n`plot_working_directory` option in'
'Sphinx configuration file must be a valid '
'directory path')
except TypeError as err:
raise TypeError(str(err) + '\n`plot_working_directory` option in '
'Sphinx configuration file must be a string or '
'None')
sys.path.insert(0, setup.config.plot_working_directory)
elif code_path is not None:
dirname = os.path.abspath(os.path.dirname(code_path))
os.chdir(dirname)
sys.path.insert(0, dirname)
# Reset sys.argv
old_sys_argv = sys.argv
sys.argv = [code_path]
# Redirect stdout
stdout = sys.stdout
if six.PY3:
sys.stdout = io.StringIO()
else:
sys.stdout = cStringIO.StringIO()
# Assign a do-nothing print function to the namespace. There
# doesn't seem to be any other way to provide a way to (not) print
# that works correctly across Python 2 and 3.
def _dummy_print(*arg, **kwarg):
pass
try:
try:
code = unescape_doctest(code)
if ns is None:
ns = {}
if not ns:
if setup.config.plot_pre_code is None:
six.exec_(six.text_type("import numpy as np\n" +
"from matplotlib import pyplot as plt\n"), ns)
else:
six.exec_(six.text_type(setup.config.plot_pre_code), ns)
ns['print'] = _dummy_print
if "__main__" in code:
six.exec_("__name__ = '__main__'", ns)
code = remove_coding(code)
six.exec_(code, ns)
if function_name is not None:
six.exec_(function_name + "()", ns)
except (Exception, SystemExit) as err:
raise PlotError(traceback.format_exc())
finally:
os.chdir(pwd)
sys.argv = old_sys_argv
sys.path[:] = old_sys_path
sys.stdout = stdout
return ns
def clear_state(plot_rcparams, close=True):
if close:
plt.close('all')
matplotlib.rc_file_defaults()
matplotlib.rcParams.update(plot_rcparams)
def render_figures(code, code_path, output_dir, output_base, context,
function_name, config, context_reset=False):
"""
Run a pyplot script and save the low and high res PNGs and a PDF
in *output_dir*.
Save the images under *output_dir* with file names derived from
*output_base*
"""
# -- Parse format list
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
formats = []
plot_formats = config.plot_formats
if isinstance(plot_formats, six.string_types):
plot_formats = eval(plot_formats)
for fmt in plot_formats:
if isinstance(fmt, six.string_types):
formats.append((fmt, default_dpi.get(fmt, 80)))
elif type(fmt) in (tuple, list) and len(fmt)==2:
formats.append((str(fmt[0]), int(fmt[1])))
else:
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
# -- Try to determine if all images already exist
code_pieces = split_code_at_show(code)
# Look for single-figure output files first
all_exists = True
img = ImageFile(output_base, output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
if all_exists:
return [(code, [img])]
# Then look for multi-figure output files
results = []
all_exists = True
for i, code_piece in enumerate(code_pieces):
images = []
for j in xrange(1000):
if len(code_pieces) > 1:
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
else:
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
for format, dpi in formats:
if out_of_date(code_path, img.filename(format)):
all_exists = False
break
img.formats.append(format)
# assume that if we have one, we have them all
if not all_exists:
all_exists = (j > 0)
break
images.append(img)
if not all_exists:
break
results.append((code_piece, images))
if all_exists:
return results
# We didn't find the files, so build them
results = []
if context:
ns = plot_context
else:
ns = {}
if context_reset:
clear_state(config.plot_rcparams)
for i, code_piece in enumerate(code_pieces):
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
run_code(code_piece, code_path, ns, function_name)
images = []
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
for j, figman in enumerate(fig_managers):
if len(fig_managers) == 1 and len(code_pieces) == 1:
img = ImageFile(output_base, output_dir)
elif len(code_pieces) == 1:
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
else:
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
output_dir)
images.append(img)
for format, dpi in formats:
try:
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
except Exception as err:
raise PlotError(traceback.format_exc())
img.formats.append(format)
results.append((code_piece, images))
if not context or config.plot_apply_rcparams:
clear_state(config.plot_rcparams, close=not context)
return results
def run(arguments, content, options, state_machine, state, lineno):
# The user may provide a filename *or* Python code content, but not both
if arguments and content:
raise RuntimeError("plot:: directive can't have both args and content")
document = state_machine.document
config = document.settings.env.config
nofigs = 'nofigs' in options
options.setdefault('include-source', config.plot_include_source)
context = 'context' in options
context_reset = True if (context and options['context'] == 'reset') else False
rst_file = document.attributes['source']
rst_dir = os.path.dirname(rst_file)
if len(arguments):
if not config.plot_basedir:
source_file_name = os.path.join(setup.app.builder.srcdir,
directives.uri(arguments[0]))
else:
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
directives.uri(arguments[0]))
# If there is content, it will be passed as a caption.
caption = '\n'.join(content)
# If the optional function name is provided, use it
if len(arguments) == 2:
function_name = arguments[1]
else:
function_name = None
with io.open(source_file_name, 'r', encoding='utf-8') as fd:
code = fd.read()
output_base = os.path.basename(source_file_name)
else:
source_file_name = rst_file
code = textwrap.dedent("\n".join(map(str, content)))
counter = document.attributes.get('_plot_counter', 0) + 1
document.attributes['_plot_counter'] = counter
base, ext = os.path.splitext(os.path.basename(source_file_name))
output_base = '%s-%d.py' % (base, counter)
function_name = None
caption = ''
base, source_ext = os.path.splitext(output_base)
if source_ext in ('.py', '.rst', '.txt'):
output_base = base
else:
source_ext = ''
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
output_base = output_base.replace('.', '-')
# is it in doctest format?
is_doctest = contains_doctest(code)
if 'format' in options:
if options['format'] == 'python':
is_doctest = False
else:
is_doctest = True
# determine output directory name fragment
source_rel_name = relpath(source_file_name, setup.confdir)
source_rel_dir = os.path.dirname(source_rel_name)
while source_rel_dir.startswith(os.path.sep):
source_rel_dir = source_rel_dir[1:]
# build_dir: where to place output files (temporarily)
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
'plot_directive',
source_rel_dir)
# get rid of .. in paths, also changes pathsep
# see note in Python docs for warning about symbolic links on Windows.
# need to compare source and dest paths at end
build_dir = os.path.normpath(build_dir)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
# output_dir: final location in the builder's directory
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
source_rel_dir))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
# how to link to files from the RST file
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
source_rel_dir).replace(os.path.sep, '/')
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
source_link = dest_dir_link + '/' + output_base + source_ext
# make figures
try:
results = render_figures(code, source_file_name, build_dir, output_base,
context, function_name, config,
context_reset=context_reset)
errors = []
except PlotError as err:
reporter = state.memo.reporter
sm = reporter.system_message(
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
source_file_name, err),
line=lineno)
results = [(code, [])]
errors = [sm]
# Properly indent the caption
caption = '\n'.join(' ' + line.strip()
for line in caption.split('\n'))
# generate output restructuredtext
total_lines = []
for j, (code_piece, images) in enumerate(results):
if options['include-source']:
if is_doctest:
lines = ['']
lines += [row.rstrip() for row in code_piece.split('\n')]
else:
lines = ['.. code-block:: python', '']
lines += [' %s' % row.rstrip()
for row in code_piece.split('\n')]
source_code = "\n".join(lines)
else:
source_code = ""
if nofigs:
images = []
opts = [':%s: %s' % (key, val) for key, val in six.iteritems(options)
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
only_html = ".. only:: html"
only_latex = ".. only:: latex"
only_texinfo = ".. only:: texinfo"
# Not-None src_link signals the need for a source link in the generated
# html
if j == 0 and config.plot_html_show_source_link:
src_link = source_link
else:
src_link = None
result = format_template(
config.plot_template or TEMPLATE,
dest_dir=dest_dir_link,
build_dir=build_dir_link,
source_link=src_link,
multi_image=len(images) > 1,
only_html=only_html,
only_latex=only_latex,
only_texinfo=only_texinfo,
options=opts,
images=images,
source_code=source_code,
html_show_formats=config.plot_html_show_formats and not nofigs,
caption=caption)
total_lines.extend(result.split("\n"))
total_lines.extend("\n")
if total_lines:
state_machine.insert_input(total_lines, source=source_file_name)
# copy image files to builder's output directory, if necessary
if not os.path.exists(dest_dir):
cbook.mkdirs(dest_dir)
for code_piece, images in results:
for img in images:
for fn in img.filenames():
destimg = os.path.join(dest_dir, os.path.basename(fn))
if fn != destimg:
shutil.copyfile(fn, destimg)
# copy script (if necessary)
target_name = os.path.join(dest_dir, output_base + source_ext)
with io.open(target_name, 'w', encoding="utf-8") as f:
if source_file_name == rst_file:
code_escaped = unescape_doctest(code)
else:
code_escaped = code
f.write(code_escaped)
return errors
|
russellb/nova | refs/heads/master | nova/api/openstack/compute/server_metadata.py | 1 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
class Controller(object):
""" The server metadata API controller for the Openstack API """
def __init__(self):
self.compute_api = compute.API()
super(Controller, self).__init__()
def _get_metadata(self, context, server_id):
try:
server = self.compute_api.get(context, server_id)
meta = self.compute_api.get_instance_metadata(context, server)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
meta_dict = {}
for key, value in meta.iteritems():
meta_dict[key] = value
return meta_dict
@wsgi.serializers(xml=common.MetadataTemplate)
def index(self, req, server_id):
""" Returns the list of metadata for a given instance """
context = req.environ['nova.context']
return {'metadata': self._get_metadata(context, server_id)}
@wsgi.serializers(xml=common.MetadataTemplate)
@wsgi.deserializers(xml=common.MetadataDeserializer)
def create(self, req, server_id, body):
try:
metadata = body['metadata']
except (KeyError, TypeError):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=False)
return {'metadata': new_metadata}
@wsgi.serializers(xml=common.MetaItemTemplate)
@wsgi.deserializers(xml=common.MetaItemDeserializer)
def update(self, req, server_id, id, body):
try:
meta_item = body['meta']
except (TypeError, KeyError):
expl = _('Malformed request body')
raise exc.HTTPBadRequest(explanation=expl)
try:
meta_value = meta_item[id]
except (AttributeError, KeyError):
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
if len(meta_item) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
context = req.environ['nova.context']
self._update_instance_metadata(context,
server_id,
meta_item,
delete=False)
return {'meta': meta_item}
@wsgi.serializers(xml=common.MetadataTemplate)
@wsgi.deserializers(xml=common.MetadataDeserializer)
def update_all(self, req, server_id, body):
try:
metadata = body['metadata']
except (TypeError, KeyError):
expl = _('Malformed request body')
raise exc.HTTPBadRequest(explanation=expl)
context = req.environ['nova.context']
new_metadata = self._update_instance_metadata(context,
server_id,
metadata,
delete=True)
return {'metadata': new_metadata}
def _update_instance_metadata(self, context, server_id, metadata,
delete=False):
try:
server = self.compute_api.get(context, server_id)
return self.compute_api.update_instance_metadata(context,
server,
metadata,
delete)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
except (ValueError, AttributeError):
msg = _("Malformed request body")
raise exc.HTTPBadRequest(explanation=msg)
except exception.QuotaError as error:
self._handle_quota_error(error)
@wsgi.serializers(xml=common.MetaItemTemplate)
def show(self, req, server_id, id):
""" Return a single metadata item """
context = req.environ['nova.context']
data = self._get_metadata(context, server_id)
try:
return {'meta': {id: data[id]}}
except KeyError:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
@wsgi.response(204)
def delete(self, req, server_id, id):
""" Deletes an existing metadata """
context = req.environ['nova.context']
metadata = self._get_metadata(context, server_id)
try:
meta_value = metadata[id]
except KeyError:
msg = _("Metadata item was not found")
raise exc.HTTPNotFound(explanation=msg)
try:
server = self.compute_api.get(context, server_id)
self.compute_api.delete_instance_metadata(context, server, id)
except exception.InstanceNotFound:
msg = _('Server does not exist')
raise exc.HTTPNotFound(explanation=msg)
def _handle_quota_error(self, error):
"""Reraise quota errors as api-specific http exceptions."""
if error.kwargs['code'] == "MetadataLimitExceeded":
raise exc.HTTPRequestEntityTooLarge(explanation=error.message,
headers={'Retry-After': 0})
raise error
def create_resource():
return wsgi.Resource(Controller())
|
prune998/ansible | refs/heads/devel | lib/ansible/modules/cloud/misc/__init__.py | 12133432 | |
ThiagoGarciaAlves/intellij-community | refs/heads/master | python/testData/qualifiedName/topLevelFunctionReference/a/b/c/__init__.py | 12133432 | |
saurabh6790/aimobilize-app-backup | refs/heads/master | accounts/report/customer_account_head/__init__.py | 12133432 | |
1st/django | refs/heads/master | tests/pagination/models.py | 559 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
def __str__(self):
return self.headline
|
shupelneker/gae_new_structure | refs/heads/master | boilerplate/external/httplib2/test/brokensocket/socket.py | 314 | from realsocket import gaierror, error, getaddrinfo, SOCK_STREAM
|
CyrilWaechter/pyRevitMEP | refs/heads/master | pyRevitMEP.tab/Manage.panel/Copy.pulldown/CopyPipeType.pushbutton/script.py | 1 | """
Copyright (c) 2017 Cyril Waechter
Python scripts for Autodesk Revit
This file is part of pypevitmep repository at https://github.com/CyrilWaechter/pypevitmep
pypevitmep is an extension for pyRevit. It contain free set of scripts for Autodesk Revit:
you can redistribute it and/or modify it under the terms of the GNU General Public License
version 3, as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
See this link for a copy of the GNU General Public License protecting this package.
https://github.com/CyrilWaechter/pypevitmep/blob/master/LICENSE
"""
# noinspection PyUnresolvedReferences
from Autodesk.Revit.DB import FilteredElementCollector, CopyPasteOptions, ElementTransformUtils, Element,\
ElementId, Transform, Transaction
# noinspection PyUnresolvedReferences
from Autodesk.Revit.DB.Plumbing import PipeType
# noinspection PyUnresolvedReferences
from System.Collections.Generic import List
import rpw
from rpw import doc
from pyrevit.forms import WPFWindow
__doc__ = "Copy pipe types from a selected opened document to active document"
__title__ = "PipeType"
__author__ = "Cyril Waechter"
opened_docs = {d.Title:d for d in rpw.revit.docs}
def copy(source_doc, elem):
"""
Copy elem from source_doc to active document
:param source_doc: Autodesk.Revit.DB.Document
:param elem: Autodesk.Revit.DB.Element
:return: None
"""
copypasteoptions = CopyPasteOptions()
id_list = List[ElementId]()
id_list.Add(elem.Id)
with rpw.db.Transaction("Copy pipe type", doc):
ElementTransformUtils.CopyElements(source_doc, id_list, doc, Transform.Identity, copypasteoptions)
class PipeTypeSelectionForm(WPFWindow):
"""
GUI used to select pipe type to copy
"""
def __init__(self, xaml_file_name):
WPFWindow.__init__(self, xaml_file_name)
self.source_docs.DataContext = rpw.revit.docs
# noinspection PyUnusedLocal
def source_doc_selection_changed(self, sender, e):
try:
self.source_doc = sender.SelectedItem
self.source_pipe.DataContext = FilteredElementCollector(self.source_doc).OfClass(PipeType)
except:
pass
# noinspection PyUnusedLocal
def button_copy_click(self, sender, e):
self.Close()
elem = self.source_pipe.SelectedItem
copy(self.source_doc, elem)
PipeTypeSelectionForm('PipeTypeSelection.xaml').ShowDialog()
|
Rungee/openrouteservice | refs/heads/master | build/tools/minimize.py | 297 | # Minimal Python Minimizer
# Copyright 2008, Christopher Schmidt
# Released under the MIT License
#
# Taken from: http://svn.crschmidt.net/personal/python/minimize.py
# $Id: minimize.py 6 2008-01-03 06:33:35Z crschmidt $
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
def strip_comments_helper(data):
"""remove all /* */ format comments and surrounding whitespace."""
p = re.compile(r'[\s]*/\*.*?\*/[\s]*', re.DOTALL)
return p.sub('',data)
def minimize(data, exclude=None):
"""Central function call. This will call all other compression
functions. To add further compression algorithms, simply add
functions whose names end in _helper which take a string as input
and return a more compressed string as output."""
for key, item in globals().iteritems():
if key.endswith("_helper"):
func_key = key[:-7]
if not exclude or not func_key in exclude:
data = item(data)
return data
if __name__ == "__main__":
import sys
print minimize(open(sys.argv[1]).read())
|
v1bri/gnuradio | refs/heads/master | gr-vocoder/examples/codec2_audio_loopback.py | 47 | #!/usr/bin/env python
#
# Copyright 2005,2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio import blocks
from gnuradio import vocoder
from gnuradio.vocoder import codec2
def build_graph():
tb = gr.top_block()
src = audio.source(8000)
src_scale = blocks.multiply_const_ff(32767)
f2s = blocks.float_to_short()
enc = vocoder.codec2_encode_sp(codec2.MODE_2400)
dec = vocoder.codec2_decode_ps(codec2.MODE_2400)
s2f = blocks.short_to_float()
sink_scale = blocks.multiply_const_ff(1.0/32767.)
sink = audio.sink(8000)
tb.connect(src, src_scale, f2s, enc, dec, s2f, sink_scale, sink)
return tb
if __name__ == '__main__':
tb = build_graph()
tb.start()
raw_input ('Press Enter to exit: ')
tb.stop()
tb.wait()
|
lcy-seso/Paddle | refs/heads/develop | python/paddle/fluid/tests/unittests/test_decayed_adagrad_op.py | 5 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestDecayedAdagradOp1(OpTest):
''' Test DecayedAdagrad operator with explicit attributes
'''
def setUp(self):
self.op_type = "decayed_adagrad"
param = np.random.random((123, 321)).astype("float32")
grad = np.random.random((123, 321)).astype("float32")
moment = np.zeros((123, 321)).astype("float32")
lr = 0.01
decay = 0.80
epsilon = 1e-8
self.inputs = {
'Param': param,
'Grad': grad,
'Moment': moment,
'LearningRate': np.array([lr]).astype("float32")
}
self.attrs = {'decay': decay, 'epsilon': epsilon}
moment_out = decay * moment + (1 - decay) * grad * grad
param_out = param - lr * grad / (np.sqrt(moment_out) + epsilon)
self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out}
def test_check_output(self):
self.check_output()
class TestDecayedAdagradOp2(OpTest):
''' Test DecayedAdagrad operator with default attributes
'''
def setUp(self):
self.op_type = "decayed_adagrad"
param = np.random.random((123, 321)).astype("float32")
grad = np.random.random((123, 321)).astype("float32")
moment = np.zeros((123, 321)).astype("float32")
lr = 0.01
decay = 0.95
epsilon = 1e-6
self.inputs = {
'Param': param,
'Grad': grad,
'Moment': moment,
'LearningRate': np.array([lr]).astype("float32")
}
self.attrs = {'decay': decay, 'epsilon': epsilon}
moment_out = decay * moment + (1 - decay) * grad * grad
param_out = param - lr * grad / (np.sqrt(moment_out) + epsilon)
self.outputs = {'ParamOut': param_out, 'MomentOut': moment_out}
def test_check_output(self):
self.check_output()
if __name__ == "__main__":
unittest.main()
|
KristofferC/FeynSimul | refs/heads/master | lm2m2_npart.py | 1 | # This file is part of FeynSimul.
#
# FeynSimul is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FeynSimul is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FeynSimul. If not, see <http://www.gnu.org/licenses/>.
# Does a run with the lm2m2 potential for 3 particles.
import os
import sys
import math
import numpy as np
from datetime import datetime
from time import time
from time import sleep
import csv
from FeynSimul.kernel_args import *
from FeynSimul.kernel import *
from FeynSimul.pimc_utils import *
# Import the harmonic oscillator class
from FeynSimul.physical_systems.lm2m2_cluster import *
enableDoubleOuter=False
system = Lm2m2_cluster(3,10e-3,verbose=False,enableDouble=enableDoubleOuter)
ka = KernelArgs(system = system,
nbrOfWalkers = 64,
N = 8,
beta = system.beta,
S = 6,
enableOperator = True,
enableCorrelator = False,
metroStepsPerOperatorRun = 40,
enableBisection = True,
enablePathShift = False,
enableSingleNodeMove = False,
enableParallelizePath = True,
enableGlobalPath = True,
enableGlobalOldPath = True,
enableBins = False,
enableDouble = enableDoubleOuter,
enableRanlux = False,
luxuaryFactor = 2,
ranluxIntMax = 2**32-1,
operatorRuns = 100,
nbrOfWalkersPerWorkGroup = 4,
operators = (system.energyOp, system.meanSquaredRadiusOp))
# Time to run simul
#endTime = 60 * 60 * 24 * 14
endTime = 0 # Stop when final N has reached 'runsThisN' amount of runs
# How often to save paths.
savePathsInterval = 3000
ka.operators = (ka.system.energyOp, ka.system.meanSquaredRadiusOp)
def opRunsFormula(N, S):
return max(2 ** 10 / 2 ** S, 1)
def mStepsPerOPRun(N, S):
return 10
def runsPerN(N, S):
return max(N / 8, 10)
startXList=np.random.uniform(size=(ka.nbrOfWalkers,ka.system.DOF)
,low=-1.0,high=1.0)*0.1*1e-10/ka.system.rUnit*np.sqrt(ka.system.m)*0
# Run the simulation function
modN(ka, startXList, savePathsInterval, "lm2m2_cluster", opRunsFormula
, mStepsPerOPRun, runsPerN, 512, simTime=endTime, finalN=1024*64,
verbosity=2, continueRun=False, cont_S=12, cont_N=1024*16, cont_path='results/lm2m2_cluster/2013-02-20/13.51.01/pathsN16384episode1')
|
olivierb2/openchange | refs/heads/master | python/openchange/tests/test_provision.py | 1 | #!/usr/bin/python
# OpenChange provisioning
# Copyright (C) Jelmer Vernooij <jelmer@openchange.org> 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from samba import param
from samba.credentials import Credentials
from samba.tests import TestCaseInTempDir
from samba.tests.samdb import SamDBTestCase
from openchange.provision import (
find_setup_dir,
guess_names_from_smbconf,
install_schemas,
openchangedb_provision,
)
import os
class OpenChangeDBProvisionTestCase(TestCaseInTempDir):
def test_provision(self):
lp = param.LoadParm()
lp.load_default()
lp.set("private dir", self.tempdir)
openchangedb_provision(lp)
os.unlink(os.path.join(self.tempdir, "openchange.ldb"))
|
pjg101/SickRage | refs/heads/master | lib/sqlalchemy/orm/identity.py | 78 | # orm/identity.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import weakref
from . import attributes
from .. import util
class IdentityMap(dict):
def __init__(self):
self._modified = set()
self._wr = weakref.ref(self)
def replace(self, state):
raise NotImplementedError()
def add(self, state):
raise NotImplementedError()
def update(self, dict):
raise NotImplementedError("IdentityMap uses add() to insert data")
def clear(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def _manage_incoming_state(self, state):
state._instance_dict = self._wr
if state.modified:
self._modified.add(state)
def _manage_removed_state(self, state):
del state._instance_dict
self._modified.discard(state)
def _dirty_states(self):
return self._modified
def check_modified(self):
"""return True if any InstanceStates present have been marked
as 'modified'.
"""
return bool(self._modified)
def has_key(self, key):
return key in self
def popitem(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def pop(self, key, *args):
raise NotImplementedError("IdentityMap uses remove() to remove data")
def setdefault(self, key, default=None):
raise NotImplementedError("IdentityMap uses add() to insert data")
def copy(self):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError("IdentityMap uses add() to insert data")
def __delitem__(self, key):
raise NotImplementedError("IdentityMap uses remove() to remove data")
class WeakInstanceDict(IdentityMap):
def __init__(self):
IdentityMap.__init__(self)
def __getitem__(self, key):
state = dict.__getitem__(self, key)
o = state.obj()
if o is None:
raise KeyError(key)
return o
def __contains__(self, key):
try:
if dict.__contains__(self, key):
state = dict.__getitem__(self, key)
o = state.obj()
else:
return False
except KeyError:
return False
else:
return o is not None
def contains_state(self, state):
return dict.get(self, state.key) is state
def replace(self, state):
if dict.__contains__(self, state.key):
existing = dict.__getitem__(self, state.key)
if existing is not state:
self._manage_removed_state(existing)
else:
return
dict.__setitem__(self, state.key, state)
self._manage_incoming_state(state)
def add(self, state):
key = state.key
# inline of self.__contains__
if dict.__contains__(self, key):
try:
existing_state = dict.__getitem__(self, key)
if existing_state is not state:
o = existing_state.obj()
if o is not None:
raise AssertionError(
"A conflicting state is already "
"present in the identity map for key %r"
% (key, ))
else:
return
except KeyError:
pass
dict.__setitem__(self, key, state)
self._manage_incoming_state(state)
def get(self, key, default=None):
state = dict.get(self, key, default)
if state is default:
return default
o = state.obj()
if o is None:
return default
return o
def _items(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append((state.key, value))
return result
def _values(self):
values = self.all_states()
result = []
for state in values:
value = state.obj()
if value is not None:
result.append(value)
return result
if util.py2k:
items = _items
values = _values
def iteritems(self):
return iter(self.items())
def itervalues(self):
return iter(self.values())
else:
def items(self):
return iter(self._items())
def values(self):
return iter(self._values())
def all_states(self):
if util.py2k:
return dict.values(self)
else:
return list(dict.values(self))
def discard(self, state):
st = dict.get(self, state.key, None)
if st is state:
dict.pop(self, state.key, None)
self._manage_removed_state(state)
def prune(self):
return 0
class StrongInstanceDict(IdentityMap):
def all_states(self):
return [attributes.instance_state(o) for o in self.values()]
def contains_state(self, state):
return (
state.key in self and
attributes.instance_state(self[state.key]) is state)
def replace(self, state):
if dict.__contains__(self, state.key):
existing = dict.__getitem__(self, state.key)
existing = attributes.instance_state(existing)
if existing is not state:
self._manage_removed_state(existing)
else:
return
dict.__setitem__(self, state.key, state.obj())
self._manage_incoming_state(state)
def add(self, state):
if state.key in self:
if attributes.instance_state(dict.__getitem__(self,
state.key)) is not state:
raise AssertionError('A conflicting state is already '
'present in the identity map for key %r'
% (state.key, ))
else:
dict.__setitem__(self, state.key, state.obj())
self._manage_incoming_state(state)
def discard(self, state):
obj = dict.get(self, state.key, None)
if obj is not None:
st = attributes.instance_state(obj)
if st is state:
dict.pop(self, state.key, None)
self._manage_removed_state(state)
def prune(self):
"""prune unreferenced, non-dirty states."""
ref_count = len(self)
dirty = [s.obj() for s in self.all_states() if s.modified]
# work around http://bugs.python.org/issue6149
keepers = weakref.WeakValueDictionary()
keepers.update(self)
dict.clear(self)
dict.update(self, keepers)
self.modified = bool(dirty)
return ref_count - len(self)
|
fchu/hadoop-0.20.205 | refs/heads/master | contrib/hod/hodlib/ServiceProxy/serviceProxy.py | 182 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""HOD Service Proxy Implementation"""
# -*- python -*-
import sys, time, signal, httplib, socket, threading
import sha, base64, hmac
import xml.dom.minidom
from hodlib.Common.socketServers import hodHTTPServer
from hodlib.Common.hodsvc import hodBaseService
from hodlib.Common.threads import loop
from hodlib.Common.tcp import tcpSocket
from hodlib.Common.util import get_exception_string
from hodlib.Common.AllocationManagerUtil import *
class svcpxy(hodBaseService):
def __init__(self, config):
hodBaseService.__init__(self, 'serviceProxy', config['service_proxy'],
xrtype='twisted')
self.amcfg=config['allocation_manager']
def _xr_method_isProjectUserValid(self, userid, project, ignoreErrors = False, timeOut = 15):
return self.isProjectUserValid(userid, project, ignoreErrors, timeOut)
def isProjectUserValid(self, userid, project, ignoreErrors, timeOut):
"""Method thats called upon by
the hodshell to verify if the
specified (user, project) combination
is valid"""
self.logs['main'].info("Begin isProjectUserValid()")
am = AllocationManagerUtil.getAllocationManager(self.amcfg['id'],
self.amcfg,
self.logs['main'])
self.logs['main'].info("End isProjectUserValid()")
return am.getQuote(userid, project)
|
krafczyk/spack | refs/heads/develop | var/spack/repos/builtin/packages/vpic/package.py | 2 | ##############################################################################
# Copyright (c) 2017, Los Alamos National Security, LLC
# Produced at the Los Alamos National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Vpic(CMakePackage):
"""VPIC is a general purpose particle-in-cell simulation code for modeling
kinetic plasmas in one, two, or three spatial dimensions. It employs a
second-order, explicit, leapfrog algorithm to update charged particle
positions and velocities in order to solve the relativistic kinetic
equation for each species in the plasma, along with a full Maxwell
description for the electric and magnetic fields evolved via a second-
order finite-difference-time-domain (FDTD) solve.
"""
homepage = "https://github.com/lanl/vpic"
git = "https://github.com/lanl/vpic.git"
version('develop', branch='master', submodules=True)
depends_on("cmake@3.1:", type='build')
depends_on('mpi')
def cmake_args(self):
options = ['-DENABLE_INTEGRATED_TESTS=ON', '-DENABLE_UNIT_TESTS=ON']
return options
|
josegom/training | refs/heads/master | ctf/utad2016/my_crypto-1dd6cf93be2c40a74364982847b35a30.py | 1 |
import random
import base64
import codecs
def demutate2(s):
demuted = ""
try:
demuted = base64.b64decode(bytes(s,"utf-8")).decode("utf-8")
except:
demuted = codecs.decode(s,'rot_13')
return demuted
def demutate(s):
demuted = ""
if s.endswith("="):
try:
demuted = base64.b64decode(bytes(s,"utf-8")).decode("utf-8")
except UnicodeDecodeError:
demuted = codecs.decode(s,'rot_13')
else:
demuted = codecs.decode(s,'rot_13')
return demuted
def main():
with open('enc-0a8be8ec687f8ce9c3f7862cf7544d22.txt','r') as ecnc2:
data=ecnc2.read()
data=base64.b64decode(bytes(data,"utf-8")).decode("utf-8")
for i in range(64):
data = demutate2(data)
with open('desc.txt','w') as desc:
desc.write(data)
print(data)
main()
|
Sudokeys/server-tools | refs/heads/8.0 | super_calendar/models/super_calendar_configurator.py | 21 | # -*- coding: utf-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
#
# Copyright (c) All rights reserved:
# (c) 2012 Agile Business Group sagl (<http://www.agilebg.com>)
# (c) 2012 Domsense srl (<http://www.domsense.com>)
# (c) 2015 Anubía, soluciones en la nube,SL (http://www.anubia.es)
# Alejandro Santana <alejandrosantana@anubia.es>
# (c) 2015 Savoir-faire Linux <http://www.savoirfairelinux.com>)
# Agathe Mollé <agathe.molle@savoirfairelinux.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses
#
##############################################################################
import logging
from datetime import datetime
from pytz import timezone, utc
from mako.template import Template
from openerp import _, api, exceptions, fields, models, tools
from openerp.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class SuperCalendarConfigurator(models.Model):
_name = 'super.calendar.configurator'
name = fields.Char(
string='Name',
required=True,
)
line_ids = fields.One2many(
comodel_name='super.calendar.configurator.line',
inverse_name='configurator_id',
string='Lines',
)
def _clear_super_calendar_records(self):
"""
Remove old super_calendar records
"""
super_calendar_pool = self.env['super.calendar']
super_calendar_list = super_calendar_pool.search([])
super_calendar_list.unlink()
@api.multi
def generate_calendar_records(self):
"""
At every CRON execution, every 'super calendar' data is deleted and
regenerated again.
"""
# Remove old records
self._clear_super_calendar_records()
# Rebuild all calendar records
configurator_list = self.search([])
for configurator in configurator_list:
for line in configurator.line_ids:
configurator._generate_record_from_line(line)
_logger.info('Calendar generated')
return True
@api.multi
def _generate_record_from_line(self, line):
"""
Create super_calendar records from super_calendar_configurator_line
objects.
"""
super_calendar_pool = self.env['super.calendar']
values = self._get_record_values_from_line(line)
for record in values:
super_calendar_pool.create(values[record])
@api.multi
def _get_record_values_from_line(self, line):
"""
Get super_calendar fields values from super_calendar_configurator_line
objects.
Check if the User value is a res.users.
"""
res = {}
current_pool = self.env[line.name.model]
domain = line.domain and safe_eval(line.domain) or []
current_record_list = current_pool.search(domain)
for cur_rec in current_record_list:
f_user = line.user_field_id.name
f_descr = line.description_field_id.name
f_date_start = line.date_start_field_id.name
f_date_stop = line.date_stop_field_id.name
f_duration = line.duration_field_id.name
# Check if f_user refer to a res.users
if (f_user and cur_rec[f_user] and
cur_rec[f_user]._model._name != 'res.users'):
raise exceptions.ValidationError(
_("The 'User' field of record %s (%s) "
"does not refer to res.users")
% (cur_rec[f_descr], line.name.model))
if ((cur_rec[f_descr] or line.description_code) and
cur_rec[f_date_start]):
duration = False
if line.date_start_field_id.ttype == 'date':
date_format = tools.DEFAULT_SERVER_DATE_FORMAT
else:
date_format = tools.DEFAULT_SERVER_DATETIME_FORMAT
date_start = datetime.strptime(
cur_rec[f_date_start], date_format
)
if (not line.duration_field_id and
line.date_stop_field_id and
cur_rec[f_date_start] and
cur_rec[f_date_stop]):
if line.date_stop_field_id.ttype == 'date':
date_format = tools.DEFAULT_SERVER_DATE_FORMAT
else:
date_format = tools.DEFAULT_SERVER_DATETIME_FORMAT
date_stop = datetime.strptime(
cur_rec[f_date_stop], date_format
)
date_diff = (date_stop - date_start)
duration = date_diff.total_seconds() / 3600
elif line.duration_field_id:
duration = cur_rec[f_duration]
if line.description_type != 'code':
name = cur_rec[f_descr]
else:
parse_dict = {'o': cur_rec}
mytemplate = Template(line.description_code)
name = mytemplate.render(**parse_dict)
# Convert date_start to UTC timezone if it is a date field
# in order to be stored in UTC in the database
if line.date_start_field_id.ttype == 'date':
tz = timezone(self._context.get('tz')
or self.env.user.tz
or 'UTC')
local_date_start = tz.localize(date_start)
utc_date_start = local_date_start.astimezone(utc)
date_start = utc_date_start
date_start = datetime.strftime(
date_start,
tools.DEFAULT_SERVER_DATETIME_FORMAT
)
super_calendar_values = {
'name': name,
'date_start': date_start,
'duration': duration,
'user_id': (f_user and cur_rec[f_user].id),
'configurator_id': self.id,
'res_id': line.name.model + ',' + str(cur_rec['id']),
'model_id': line.name.id,
}
res[cur_rec] = super_calendar_values
return res
|
HenrikSolver/micropython | refs/heads/master | tests/basics/set_pop.py | 28 | s = {1}
print(s.pop())
try:
print(s.pop(), "!!!")
except KeyError:
pass
else:
print("Failed to raise KeyError")
# this tests an optimisation in mp_set_remove_first
# N must not be equal to one of the values in hash_allocation_sizes
N = 11
s = set(range(N))
while s:
print(s.pop()) # last pop() should trigger the optimisation
for i in range(N):
s.add(i) # check that we can add the numbers back to the set
print(list(s))
|
xme1226/horizon | refs/heads/master | openstack_dashboard/dashboards/project/data_processing/clusters/workflows/scale.py | 32 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from openstack_dashboard.api import sahara as saharaclient
import openstack_dashboard.dashboards.project.data_processing. \
cluster_templates.workflows.create as clt_create_flow
import openstack_dashboard.dashboards.project.data_processing. \
clusters.workflows.create as cl_create_flow
from openstack_dashboard.dashboards.project.data_processing.utils \
import workflow_helpers
from saharaclient.api import base as api_base
LOG = logging.getLogger(__name__)
class NodeGroupsStep(clt_create_flow.ConfigureNodegroups):
pass
class ScaleCluster(cl_create_flow.ConfigureCluster,
workflow_helpers.StatusFormatMixin):
slug = "scale_cluster"
name = _("Scale Cluster")
finalize_button_name = _("Scale")
success_url = "horizon:project:data_processing.clusters:index"
default_steps = (NodeGroupsStep, )
def __init__(self, request, context_seed, entry_point, *args, **kwargs):
ScaleCluster._cls_registry = set([])
self.success_message = _("Scaled cluster successfully started.")
cluster_id = context_seed["cluster_id"]
try:
cluster = saharaclient.cluster_get(request, cluster_id)
plugin = cluster.plugin_name
hadoop_version = cluster.hadoop_version
# Initialize deletable node groups.
deletable = dict()
for group in cluster.node_groups:
deletable[group["name"]] = "false"
request.GET = request.GET.copy()
request.GET.update({
"cluster_id": cluster_id,
"plugin_name": plugin,
"hadoop_version": hadoop_version,
"deletable": deletable
})
super(ScaleCluster, self).__init__(request, context_seed,
entry_point, *args,
**kwargs)
# Initialize node groups.
for step in self.steps:
if not isinstance(step, clt_create_flow.ConfigureNodegroups):
continue
ng_action = step.action
template_ngs = cluster.node_groups
if 'forms_ids' in request.POST:
continue
ng_action.groups = []
for i, templ_ng in enumerate(template_ngs):
group_name = "group_name_%d" % i
template_id = "template_id_%d" % i
count = "count_%d" % i
serialized = "serialized_%d" % i
serialized_val = base64.urlsafe_b64encode(json.dumps(
workflow_helpers.clean_node_group(templ_ng)))
ng_action.groups.append({
"name": templ_ng["name"],
"template_id": templ_ng["node_group_template_id"],
"count": templ_ng["count"],
"id": i,
"deletable": "false",
"serialized": serialized_val
})
workflow_helpers.build_node_group_fields(ng_action,
group_name,
template_id,
count,
serialized)
except Exception:
exceptions.handle(request,
_("Unable to fetch cluster to scale"))
def format_status_message(self, message):
# Scaling form requires special handling because it has no Cluster name
# in it's context
error_description = getattr(self, 'error_description', None)
if error_description:
return error_description
else:
return self.success_message
def handle(self, request, context):
cluster_id = request.GET["cluster_id"]
try:
cluster = saharaclient.cluster_get(request, cluster_id)
existing_node_groups = set([])
for ng in cluster.node_groups:
existing_node_groups.add(ng["name"])
scale_object = dict()
ids = json.loads(context["ng_forms_ids"])
for _id in ids:
name = context["ng_group_name_%s" % _id]
template_id = context["ng_template_id_%s" % _id]
count = context["ng_count_%s" % _id]
if name not in existing_node_groups:
if "add_node_groups" not in scale_object:
scale_object["add_node_groups"] = []
scale_object["add_node_groups"].append(
{"name": name,
"node_group_template_id": template_id,
"count": int(count)})
else:
old_count = None
for ng in cluster.node_groups:
if name == ng["name"]:
old_count = ng["count"]
break
if old_count != count:
if "resize_node_groups" not in scale_object:
scale_object["resize_node_groups"] = []
scale_object["resize_node_groups"].append(
{"name": name,
"count": int(count)}
)
except Exception:
scale_object = {}
exceptions.handle(request,
_("Unable to fetch cluster to scale."))
try:
saharaclient.cluster_scale(request, cluster_id, scale_object)
return True
except api_base.APIException as e:
self.error_description = str(e)
return False
except Exception:
exceptions.handle(request,
_("Scale cluster operation failed"))
return False
|
smasala/phantomjs | refs/heads/master | src/qt/qtwebkit/Source/ThirdParty/gtest/scripts/pump.py | 233 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.1 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$\$.*'), '$$'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$$': # A meta comment.
if prev_token_rstripped:
yield prev_token_rstripped
pos = Cursor(found.end.line + 1, 0)
elif found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
lines = s.splitlines(True)
return TokenizeLines(lines, Cursor(0, 0))
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def Convert(file_path):
s = file(file_path, 'r').read()
tokens = []
for token in Tokenize(s):
tokens.append(token)
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirevative(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines != [] and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirevative(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsHeaderGuardOrInclude(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsComment(line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirevative(output, line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapPreprocessorDirevative(line, output)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
ast = Convert(file_path)
output = Output()
RunCode(Env(), ast, output)
output_str = BeautifyCode(output.string)
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
olexiim/edx-platform | refs/heads/master | common/lib/xmodule/xmodule/modulestore/tests/test_xml.py | 8 | """
Tests around our XML modulestore, including importing
well-formed and not-well-formed XML.
"""
import os.path
import unittest
from glob import glob
from mock import patch
from xmodule.modulestore.xml import XMLModuleStore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.tests import DATA_DIR
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.modulestore.tests.test_modulestore import check_has_course_method
def glob_tildes_at_end(path):
"""
A wrapper for the `glob.glob` function, but it always returns
files that end in a tilde (~) at the end of the list of results.
"""
result = glob(path)
with_tildes = [f for f in result if f.endswith("~")]
no_tildes = [f for f in result if not f.endswith("~")]
return no_tildes + with_tildes
class TestXMLModuleStore(unittest.TestCase):
"""
Test around the XML modulestore
"""
def test_xml_modulestore_type(self):
store = XMLModuleStore(DATA_DIR, course_dirs=[])
self.assertEqual(store.get_modulestore_type(), ModuleStoreEnum.Type.xml)
def test_unicode_chars_in_xml_content(self):
# edX/full/6.002_Spring_2012 has non-ASCII chars, and during
# uniquification of names, would raise a UnicodeError. It no longer does.
# Ensure that there really is a non-ASCII character in the course.
with open(os.path.join(DATA_DIR, "toy/sequential/vertical_sequential.xml")) as xmlf:
xml = xmlf.read()
with self.assertRaises(UnicodeDecodeError):
xml.decode('ascii')
# Load the course, but don't make error modules. This will succeed,
# but will record the errors.
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy'], load_error_modules=False)
# Look up the errors during load. There should be none.
errors = modulestore.get_course_errors(SlashSeparatedCourseKey("edX", "toy", "2012_Fall"))
assert errors == []
@patch("xmodule.modulestore.xml.glob.glob", side_effect=glob_tildes_at_end)
def test_tilde_files_ignored(self, _fake_glob):
modulestore = XMLModuleStore(DATA_DIR, course_dirs=['tilde'], load_error_modules=False)
about_location = SlashSeparatedCourseKey('edX', 'tilde', '2012_Fall').make_usage_key(
'about', 'index',
)
about_module = modulestore.get_item(about_location)
self.assertIn("GREEN", about_module.data)
self.assertNotIn("RED", about_module.data)
def test_get_courses_for_wiki(self):
"""
Test the get_courses_for_wiki method
"""
store = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple'])
for course in store.get_courses():
course_locations = store.get_courses_for_wiki(course.wiki_slug)
self.assertEqual(len(course_locations), 1)
self.assertIn(course.location.course_key, course_locations)
course_locations = store.get_courses_for_wiki('no_such_wiki')
self.assertEqual(len(course_locations), 0)
# now set toy course to share the wiki with simple course
toy_course = store.get_course(SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'))
toy_course.wiki_slug = 'simple'
course_locations = store.get_courses_for_wiki('toy')
self.assertEqual(len(course_locations), 0)
course_locations = store.get_courses_for_wiki('simple')
self.assertEqual(len(course_locations), 2)
for course_number in ['toy', 'simple']:
self.assertIn(SlashSeparatedCourseKey('edX', course_number, '2012_Fall'), course_locations)
def test_has_course(self):
"""
Test the has_course method
"""
check_has_course_method(
XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple']),
SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'),
locator_key_fields=SlashSeparatedCourseKey.KEY_FIELDS
)
def test_branch_setting(self):
"""
Test the branch setting context manager
"""
store = XMLModuleStore(DATA_DIR, course_dirs=['toy'])
course = store.get_courses()[0]
# XML store allows published_only branch setting
with store.branch_setting(ModuleStoreEnum.Branch.published_only, course.id):
store.get_item(course.location)
# XML store does NOT allow draft_preferred branch setting
with self.assertRaises(ValueError):
with store.branch_setting(ModuleStoreEnum.Branch.draft_preferred, course.id):
# verify that the above context manager raises a ValueError
pass # pragma: no cover
@patch('xmodule.modulestore.xml.log')
def test_dag_course(self, mock_logging):
"""
Test a course whose structure is not a tree.
"""
store = XMLModuleStore(DATA_DIR, course_dirs=['xml_dag'])
course_key = store.get_courses()[0].id
mock_logging.warning.assert_called_with(
"%s has more than one definition", course_key.make_usage_key('discussion', 'duplicate_def')
)
shared_item_loc = course_key.make_usage_key('html', 'toyhtml')
shared_item = store.get_item(shared_item_loc)
parent = shared_item.get_parent()
self.assertIsNotNone(parent, "get_parent failed to return a value")
parent_loc = course_key.make_usage_key('vertical', 'vertical_test')
self.assertEqual(parent.location, parent_loc)
self.assertIn(shared_item, parent.get_children())
# ensure it's still a child of the other parent even tho it doesn't claim the other parent as its parent
other_parent_loc = course_key.make_usage_key('vertical', 'zeta')
other_parent = store.get_item(other_parent_loc)
# children rather than get_children b/c the instance returned by get_children != shared_item
self.assertIn(shared_item_loc, other_parent.children)
|
hnoerdli/hussa | refs/heads/master | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
|
KellenSunderland/sockeye | refs/heads/master | test/unit/test_callback.py | 1 | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Tests sockeye.callback.TrainingMonitor optimization logic
"""
import pytest
import numpy as np
import sockeye.callback
import tempfile
test_constants = [('perplexity', np.inf, True,
[{'perplexity': 100.0, '_': 42}, {'perplexity': 50.0}, {'perplexity': 60.0}, {'perplexity': 80.0}],
[{'perplexity': 200.0}, {'perplexity': 100.0}, {'perplexity': 100.001}, {'perplexity': 99.99}],
[True, True, False, True]),
('accuracy', -np.inf, False,
[{'accuracy': 100.0}, {'accuracy': 50.0}, {'accuracy': 60.0}, {'accuracy': 80.0}],
[{'accuracy': 200.0}, {'accuracy': 100.0}, {'accuracy': 100.001}, {'accuracy': 99.99}],
[True, False, False, False])]
class DummyMetric(object):
def __init__(self, metric_dict):
self.metric_dict = metric_dict
def get_name_value(self):
for metric_name, value in self.metric_dict.items():
yield metric_name, value
@pytest.mark.parametrize("optimized_metric, initial_best, minimize, train_metrics, eval_metrics, improved_seq",
test_constants)
def test_callback(optimized_metric, initial_best, minimize, train_metrics, eval_metrics, improved_seq):
with tempfile.TemporaryDirectory() as tmpdir:
batch_size = 32
monitor = sockeye.callback.TrainingMonitor(batch_size=batch_size,
output_folder=tmpdir,
optimized_metric=optimized_metric)
assert monitor.optimized_metric == optimized_metric
assert monitor.get_best_validation_score() == initial_best
assert monitor.minimize == minimize
for checkpoint, (train_metric, eval_metric, expected_improved) in enumerate(
zip(train_metrics, eval_metrics, improved_seq), 1):
monitor.checkpoint_callback(checkpoint, DummyMetric(train_metric))
assert len(monitor.metrics) == checkpoint
assert monitor.metrics[-1] == {k + "-train": v for k, v in train_metric.items()}
improved, best_checkpoint = monitor.eval_end_callback(checkpoint, DummyMetric(eval_metric))
assert {k + "-val" for k in eval_metric.keys()} <= monitor.metrics[-1].keys()
assert improved == expected_improved
def test_bleu_requires_checkpoint_decoder():
with pytest.raises(AssertionError), tempfile.TemporaryDirectory() as tmpdir:
sockeye.callback.TrainingMonitor(batch_size=1,
output_folder=tmpdir,
optimized_metric='bleu',
checkpoint_decoder=None)
|
hfp/libxsmm | refs/heads/master | samples/deeplearning/sparse_training/fairseq/fairseq/model_parallel/modules/multihead_attention.py | 1 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from torch import Tensor, nn
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.fairseq_dropout import FairseqDropout
try:
from fairseq.model_parallel.megatron.mpu import (
get_cuda_rng_tracker,
get_model_parallel_world_size,
ColumnParallelLinear,
RowParallelLinear,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
@with_incremental_state
class ModelParallelMultiheadAttention(nn.Module):
"""Model parallel Multi-headed attention.
This performs the Multi-headed attention over multiple gpus.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
self_attention=False,
encoder_decoder_attention=False,
):
super().__init__()
if not has_megatron_submodule:
raise ImportError(
'\n\nPlease install the megatron submodule:'
'\n\n git submodule update --init '
'fairseq/model_parallel/megatron'
)
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.model_parallel_size = get_model_parallel_world_size()
self.num_heads_partition = num_heads // self.model_parallel_size
assert (
self.num_heads_partition * self.model_parallel_size == num_heads
), "Number of heads must be divisble by model parallel size"
self.dropout_module = FairseqDropout(
dropout, module_name=self.__class__.__name__
)
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and value to be of the same size"
)
self.k_proj = ColumnParallelLinear(self.kdim, embed_dim, bias=bias, gather_output=False)
self.v_proj = ColumnParallelLinear(self.vdim, embed_dim, bias=bias, gather_output=False)
self.q_proj = ColumnParallelLinear(embed_dim, embed_dim, bias=bias, gather_output=False)
self.out_proj = RowParallelLinear(embed_dim, embed_dim, bias=bias, input_is_parallel=True)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
**unused_kwargs,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
"""
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads_partition, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads_partition, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads_partition, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads_partition, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = ModelParallelMultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads_partition, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads_partition, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads_partition, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads_partition, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
)
attn_weights = attn_weights.view(bsz * self.num_heads_partition, tgt_len, src_len)
attn_weights_float = utils.softmax(
attn_weights, dim=-1
)
attn_weights = attn_weights_float.type_as(attn_weights)
with get_cuda_rng_tracker().fork():
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads_partition, tgt_len, self.head_dim]
embed_dim_partition = embed_dim // self.model_parallel_size
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim_partition)
attn = self.out_proj(attn)
# return attn_weights None to keep the return type same as single gpu multihead attention
# This will be deprecated.
attn_weights: Optional[Tensor] = None
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1))
if prev_key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1))
if key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
def reorder_incremental_state(
self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
if input_buffer[k] is not None:
input_buffer[k] = input_buffer[k].index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
|
benhc123/cjdns | refs/heads/master | node_build/dependencies/libuv/build/gyp/test/same-rule-output-file-name/src/touch.py | 679 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
f = open(sys.argv[1], 'w+')
f.write('Hello from touch.py\n')
f.close()
|
mm-s/bitcoin | refs/heads/master | test/functional/combine_logs.py | 69 | #!/usr/bin/env python3
"""Combine logs from multiple bitcoin nodes as well as the test_framework log.
This streams the combined log output to stdout. Use combine_logs.py > outputfile
to write to an outputfile."""
import argparse
from collections import defaultdict, namedtuple
import heapq
import itertools
import os
import re
import sys
# Matches on the date format at the start of the log event
TIMESTAMP_PATTERN = re.compile(r"^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{6}")
LogEvent = namedtuple('LogEvent', ['timestamp', 'source', 'event'])
def main():
"""Main function. Parses args, reads the log files and renders them as text or html."""
parser = argparse.ArgumentParser(usage='%(prog)s [options] <test temporary directory>', description=__doc__)
parser.add_argument('-c', '--color', dest='color', action='store_true', help='outputs the combined log with events colored by source (requires posix terminal colors. Use less -r for viewing)')
parser.add_argument('--html', dest='html', action='store_true', help='outputs the combined log as html. Requires jinja2. pip install jinja2')
args, unknown_args = parser.parse_known_args()
if args.color and os.name != 'posix':
print("Color output requires posix terminal colors.")
sys.exit(1)
if args.html and args.color:
print("Only one out of --color or --html should be specified")
sys.exit(1)
# There should only be one unknown argument - the path of the temporary test directory
if len(unknown_args) != 1:
print("Unexpected arguments" + str(unknown_args))
sys.exit(1)
log_events = read_logs(unknown_args[0])
print_logs(log_events, color=args.color, html=args.html)
def read_logs(tmp_dir):
"""Reads log files.
Delegates to generator function get_log_events() to provide individual log events
for each of the input log files."""
files = [("test", "%s/test_framework.log" % tmp_dir)]
for i in itertools.count():
logfile = "{}/node{}/regtest/debug.log".format(tmp_dir, i)
if not os.path.isfile(logfile):
break
files.append(("node%d" % i, logfile))
return heapq.merge(*[get_log_events(source, f) for source, f in files])
def get_log_events(source, logfile):
"""Generator function that returns individual log events.
Log events may be split over multiple lines. We use the timestamp
regex match as the marker for a new log event."""
try:
with open(logfile, 'r') as infile:
event = ''
timestamp = ''
for line in infile:
# skip blank lines
if line == '\n':
continue
# if this line has a timestamp, it's the start of a new log event.
time_match = TIMESTAMP_PATTERN.match(line)
if time_match:
if event:
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
event = line
timestamp = time_match.group()
# if it doesn't have a timestamp, it's a continuation line of the previous log.
else:
event += "\n" + line
# Flush the final event
yield LogEvent(timestamp=timestamp, source=source, event=event.rstrip())
except FileNotFoundError:
print("File %s could not be opened. Continuing without it." % logfile, file=sys.stderr)
def print_logs(log_events, color=False, html=False):
"""Renders the iterator of log events into text or html."""
if not html:
colors = defaultdict(lambda: '')
if color:
colors["test"] = "\033[0;36m" # CYAN
colors["node0"] = "\033[0;34m" # BLUE
colors["node1"] = "\033[0;32m" # GREEN
colors["node2"] = "\033[0;31m" # RED
colors["node3"] = "\033[0;33m" # YELLOW
colors["reset"] = "\033[0m" # Reset font color
for event in log_events:
print("{0} {1: <5} {2} {3}".format(colors[event.source.rstrip()], event.source, event.event, colors["reset"]))
else:
try:
import jinja2
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
print(jinja2.Environment(loader=jinja2.FileSystemLoader('./'))
.get_template('combined_log_template.html')
.render(title="Combined Logs from testcase", log_events=[event._asdict() for event in log_events]))
if __name__ == '__main__':
main()
|
camradal/ansible | refs/heads/devel | lib/ansible/plugins/terminal/sros.py | 24 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_prompts_re = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_errors_re = [
re.compile(r"^\r\nError:"),
]
supports_multiplexing = False
def on_open_shell(self):
try:
self._exec_cli_command('environment no more')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
|
nicolaoun/NS3-AM-Proto-Simulation | refs/heads/master | src/point-to-point-layout/bindings/callbacks_list.py | 240 | callback_classes = [
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'unsigned int', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['bool', 'ns3::Ptr<ns3::Socket>', 'ns3::Address const&', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['void', 'ns3::Ptr<ns3::NetDevice>', 'ns3::Ptr<ns3::Packet const>', 'unsigned short', 'ns3::Address const&', 'ns3::Address const&', 'ns3::NetDevice::PacketType', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
['unsigned char', 'ns3::Ptr<ns3::QueueItem>', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty', 'ns3::empty'],
]
|
thdtjsdn/FreeCAD | refs/heads/master | src/Mod/Cam/Init.py | 55 | # FreeCAD init script of the Cam module
# (c) 2007 Juergen Riegel
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is Cam of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2007 *
#***************************************************************************/
|
Pakoach/Sick-Beard-Animes | refs/heads/master | cherrypy/process/__init__.py | 36 | """Site container for an HTTP server.
A Web Site Process Bus object is used to connect applications, servers,
and frameworks with site-wide services such as daemonization, process
reload, signal handling, drop privileges, PID file management, logging
for all of these, and many more.
The 'plugins' module defines a few abstract and concrete services for
use with the bus. Some use tool-specific channels; see the documentation
for each class.
"""
from cherrypy.process.wspbus import bus
from cherrypy.process import plugins, servers
|
kmoocdev2/edx-platform | refs/heads/real_2019 | lms/djangoapps/bulk_email/migrations/0002_data__load_course_email_template.py | 87 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management import call_command
from django.db import migrations, models
def forwards(apps, schema_editor):
"""Load data from the fixture"""
CourseEmailTemplate = apps.get_model("bulk_email", "CourseEmailTemplate")
if not CourseEmailTemplate.objects.exists():
call_command("loaddata", "course_email_template.json")
def backwards(apps, schema_editor):
CourseEmailTemplate = apps.get_model("bulk_email", "CourseEmailTemplate")
CourseEmailTemplate.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('bulk_email', '0001_initial'),
]
operations = [
migrations.RunPython(forwards, backwards),
]
|
klonage/nlt-gcs | refs/heads/master | packages/IronPython.StdLib.2.7.4/content/Lib/encodings/utf_16_be.py | 103 | """ Python 'utf-16-be' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_be_encode
def decode(input, errors='strict'):
return codecs.utf_16_be_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_be_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_be_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_be_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_be_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-be',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
praba230890/PYPOWER | refs/heads/master | pypower/case57.py | 2 | # Copyright (c) 1996-2015 PSERC. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Power flow data for IEEE 57 bus test case.
"""
from numpy import array
def case57():
"""Power flow data for IEEE 57 bus test case.
Please see L{caseformat} for details on the case file format.
This data was converted from IEEE Common Data Format
(ieee57cdf.txt) on 20-Sep-2004 by cdf2matp, rev. 1.11
Converted from IEEE CDF file from:
U{http://www.ee.washington.edu/research/pstca/}
Manually modified C{Qmax}, C{Qmin} on generator 1 to 200, -140,
respectively.
08/25/93 UW ARCHIVE 100.0 1961 W IEEE 57 Bus Test Case
@return: Power flow data for IEEE 57 bus test case.
"""
ppc = {"version": '2'}
##----- Power Flow Data -----##
## system MVA base
ppc["baseMVA"] = 100.0
## bus data
# bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin
ppc["bus"] = array([
[1, 3, 55, 17, 0, 0, 1, 1.04, 0, 0, 1, 1.06, 0.94],
[2, 2, 3, 88, 0, 0, 1, 1.01, -1.18, 0, 1, 1.06, 0.94],
[3, 2, 41, 21, 0, 0, 1, 0.985, -5.97, 0, 1, 1.06, 0.94],
[4, 1, 0, 0, 0, 0, 1, 0.981, -7.32, 0, 1, 1.06, 0.94],
[5, 1, 13, 4, 0, 0, 1, 0.976, -8.52, 0, 1, 1.06, 0.94],
[6, 2, 75, 2, 0, 0, 1, 0.98, -8.65, 0, 1, 1.06, 0.94],
[7, 1, 0, 0, 0, 0, 1, 0.984, -7.58, 0, 1, 1.06, 0.94],
[8, 2, 150, 22, 0, 0, 1, 1.005, -4.45, 0, 1, 1.06, 0.94],
[9, 2, 121, 26, 0, 0, 1, 0.98, -9.56, 0, 1, 1.06, 0.94],
[10, 1, 5, 2, 0, 0, 1, 0.986, -11.43, 0, 1, 1.06, 0.94],
[11, 1, 0, 0, 0, 0, 1, 0.974, -10.17, 0, 1, 1.06, 0.94],
[12, 2, 377, 24, 0, 0, 1, 1.015, -10.46, 0, 1, 1.06, 0.94],
[13, 1, 18, 2.3, 0, 0, 1, 0.979, -9.79, 0, 1, 1.06, 0.94],
[14, 1, 10.5, 5.3, 0, 0, 1, 0.97, -9.33, 0, 1, 1.06, 0.94],
[15, 1, 22, 5, 0, 0, 1, 0.988, -7.18, 0, 1, 1.06, 0.94],
[16, 1, 43, 3, 0, 0, 1, 1.013, -8.85, 0, 1, 1.06, 0.94],
[17, 1, 42, 8, 0, 0, 1, 1.017, -5.39, 0, 1, 1.06, 0.94],
[18, 1, 27.2, 9.8, 0, 10, 1, 1.001, -11.71, 0, 1, 1.06, 0.94],
[19, 1, 3.3, 0.6, 0, 0, 1, 0.97, -13.2, 0, 1, 1.06, 0.94],
[20, 1, 2.3, 1, 0, 0, 1, 0.964, -13.41, 0, 1, 1.06, 0.94],
[21, 1, 0, 0, 0, 0, 1, 1.008, -12.89, 0, 1, 1.06, 0.94],
[22, 1, 0, 0, 0, 0, 1, 1.01, -12.84, 0, 1, 1.06, 0.94],
[23, 1, 6.3, 2.1, 0, 0, 1, 1.008, -12.91, 0, 1, 1.06, 0.94],
[24, 1, 0, 0, 0, 0, 1, 0.999, -13.25, 0, 1, 1.06, 0.94],
[25, 1, 6.3, 3.2, 0, 5.9, 1, 0.982, -18.13, 0, 1, 1.06, 0.94],
[26, 1, 0, 0, 0, 0, 1, 0.959, -12.95, 0, 1, 1.06, 0.94],
[27, 1, 9.3, 0.5, 0, 0, 1, 0.982, -11.48, 0, 1, 1.06, 0.94],
[28, 1, 4.6, 2.3, 0, 0, 1, 0.997, -10.45, 0, 1, 1.06, 0.94],
[29, 1, 17, 2.6, 0, 0, 1, 1.01, -9.75, 0, 1, 1.06, 0.94],
[30, 1, 3.6, 1.8, 0, 0, 1, 0.962, -18.68, 0, 1, 1.06, 0.94],
[31, 1, 5.8, 2.9, 0, 0, 1, 0.936, -19.34, 0, 1, 1.06, 0.94],
[32, 1, 1.6, 0.8, 0, 0, 1, 0.949, -18.46, 0, 1, 1.06, 0.94],
[33, 1, 3.8, 1.9, 0, 0, 1, 0.947, -18.5, 0, 1, 1.06, 0.94],
[34, 1, 0, 0, 0, 0, 1, 0.959, -14.1, 0, 1, 1.06, 0.94],
[35, 1, 6, 3, 0, 0, 1, 0.966, -13.86, 0, 1, 1.06, 0.94],
[36, 1, 0, 0, 0, 0, 1, 0.976, -13.59, 0, 1, 1.06, 0.94],
[37, 1, 0, 0, 0, 0, 1, 0.985, -13.41, 0, 1, 1.06, 0.94],
[38, 1, 14, 7, 0, 0, 1, 1.013, -12.71, 0, 1, 1.06, 0.94],
[39, 1, 0, 0, 0, 0, 1, 0.983, -13.46, 0, 1, 1.06, 0.94],
[40, 1, 0, 0, 0, 0, 1, 0.973, -13.62, 0, 1, 1.06, 0.94],
[41, 1, 6.3, 3, 0, 0, 1, 0.996, -14.05, 0, 1, 1.06, 0.94],
[42, 1, 7.1, 4.4, 0, 0, 1, 0.966, -15.5, 0, 1, 1.06, 0.94],
[43, 1, 2, 1, 0, 0, 1, 1.01, -11.33, 0, 1, 1.06, 0.94],
[44, 1, 12, 1.8, 0, 0, 1, 1.017, -11.86, 0, 1, 1.06, 0.94],
[45, 1, 0, 0, 0, 0, 1, 1.036, -9.25, 0, 1, 1.06, 0.94],
[46, 1, 0, 0, 0, 0, 1, 1.05, -11.89, 0, 1, 1.06, 0.94],
[47, 1, 29.7, 11.6, 0, 0, 1, 1.033, -12.49, 0, 1, 1.06, 0.94],
[48, 1, 0, 0, 0, 0, 1, 1.027, -12.59, 0, 1, 1.06, 0.94],
[49, 1, 18, 8.5, 0, 0, 1, 1.036, -12.92, 0, 1, 1.06, 0.94],
[50, 1, 21, 10.5, 0, 0, 1, 1.023, -13.39, 0, 1, 1.06, 0.94],
[51, 1, 18, 5.3, 0, 0, 1, 1.052, -12.52, 0, 1, 1.06, 0.94],
[52, 1, 4.9, 2.2, 0, 0, 1, 0.98, -11.47, 0, 1, 1.06, 0.94],
[53, 1, 20, 10, 0, 6.3, 1, 0.971, -12.23, 0, 1, 1.06, 0.94],
[54, 1, 4.1, 1.4, 0, 0, 1, 0.996, -11.69, 0, 1, 1.06, 0.94],
[55, 1, 6.8, 3.4, 0, 0, 1, 1.031, -10.78, 0, 1, 1.06, 0.94],
[56, 1, 7.6, 2.2, 0, 0, 1, 0.968, -16.04, 0, 1, 1.06, 0.94],
[57, 1, 6.7, 2, 0, 0, 1, 0.965, -16.56, 0, 1, 1.06, 0.94]
])
## generator data
# bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,
# Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf
ppc["gen"] = array([
[1, 128.9, -16.1, 200, -140, 1.04, 100, 1, 575.88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 0, -0.8, 50, -17, 1.01, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 40, -1, 60, -10, 0.985, 100, 1, 140, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[6, 0, 0.8, 25, -8, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[8, 450, 62.1, 200, -140, 1.005, 100, 1, 550, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[9, 0, 2.2, 9, -3, 0.98, 100, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[12, 310, 128.5, 155, -150, 1.015, 100, 1, 410, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
])
## branch data
# fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax
ppc["branch"] = array([
[1, 2, 0.0083, 0.028, 0.129, 9900, 0, 0, 0, 0, 1, -360, 360],
[2, 3, 0.0298, 0.085, 0.0818, 9900, 0, 0, 0, 0, 1, -360, 360],
[3, 4, 0.0112, 0.0366, 0.038, 9900, 0, 0, 0, 0, 1, -360, 360],
[4, 5, 0.0625, 0.132, 0.0258, 9900, 0, 0, 0, 0, 1, -360, 360],
[4, 6, 0.043, 0.148, 0.0348, 9900, 0, 0, 0, 0, 1, -360, 360],
[6, 7, 0.02, 0.102, 0.0276, 9900, 0, 0, 0, 0, 1, -360, 360],
[6, 8, 0.0339, 0.173, 0.047, 9900, 0, 0, 0, 0, 1, -360, 360],
[8, 9, 0.0099, 0.0505, 0.0548, 9900, 0, 0, 0, 0, 1, -360, 360],
[9, 10, 0.0369, 0.1679, 0.044, 9900, 0, 0, 0, 0, 1, -360, 360],
[9, 11, 0.0258, 0.0848, 0.0218, 9900, 0, 0, 0, 0, 1, -360, 360],
[9, 12, 0.0648, 0.295, 0.0772, 9900, 0, 0, 0, 0, 1, -360, 360],
[9, 13, 0.0481, 0.158, 0.0406, 9900, 0, 0, 0, 0, 1, -360, 360],
[13, 14, 0.0132, 0.0434, 0.011, 9900, 0, 0, 0, 0, 1, -360, 360],
[13, 15, 0.0269, 0.0869, 0.023, 9900, 0, 0, 0, 0, 1, -360, 360],
[1, 15, 0.0178, 0.091, 0.0988, 9900, 0, 0, 0, 0, 1, -360, 360],
[1, 16, 0.0454, 0.206, 0.0546, 9900, 0, 0, 0, 0, 1, -360, 360],
[1, 17, 0.0238, 0.108, 0.0286, 9900, 0, 0, 0, 0, 1, -360, 360],
[3, 15, 0.0162, 0.053, 0.0544, 9900, 0, 0, 0, 0, 1, -360, 360],
[4, 18, 0, 0.555, 0, 9900, 0, 0, 0.97, 0, 1, -360, 360],
[4, 18, 0, 0.43, 0, 9900, 0, 0, 0.978, 0, 1, -360, 360],
[5, 6, 0.0302, 0.0641, 0.0124, 9900, 0, 0, 0, 0, 1, -360, 360],
[7, 8, 0.0139, 0.0712, 0.0194, 9900, 0, 0, 0, 0, 1, -360, 360],
[10, 12, 0.0277, 0.1262, 0.0328, 9900, 0, 0, 0, 0, 1, -360, 360],
[11, 13, 0.0223, 0.0732, 0.0188, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 13, 0.0178, 0.058, 0.0604, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 16, 0.018, 0.0813, 0.0216, 9900, 0, 0, 0, 0, 1, -360, 360],
[12, 17, 0.0397, 0.179, 0.0476, 9900, 0, 0, 0, 0, 1, -360, 360],
[14, 15, 0.0171, 0.0547, 0.0148, 9900, 0, 0, 0, 0, 1, -360, 360],
[18, 19, 0.461, 0.685, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[19, 20, 0.283, 0.434, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[21, 20, 0, 0.7767, 0, 9900, 0, 0, 1.043, 0, 1, -360, 360],
[21, 22, 0.0736, 0.117, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[22, 23, 0.0099, 0.0152, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[23, 24, 0.166, 0.256, 0.0084, 9900, 0, 0, 0, 0, 1, -360, 360],
[24, 25, 0, 1.182, 0, 9900, 0, 0, 1, 0, 1, -360, 360],
[24, 25, 0, 1.23, 0, 9900, 0, 0, 1, 0, 1, -360, 360],
[24, 26, 0, 0.0473, 0, 9900, 0, 0, 1.043, 0, 1, -360, 360],
[26, 27, 0.165, 0.254, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[27, 28, 0.0618, 0.0954, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[28, 29, 0.0418, 0.0587, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[7, 29, 0, 0.0648, 0, 9900, 0, 0, 0.967, 0, 1, -360, 360],
[25, 30, 0.135, 0.202, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[30, 31, 0.326, 0.497, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[31, 32, 0.507, 0.755, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[32, 33, 0.0392, 0.036, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[34, 32, 0, 0.953, 0, 9900, 0, 0, 0.975, 0, 1, -360, 360],
[34, 35, 0.052, 0.078, 0.0032, 9900, 0, 0, 0, 0, 1, -360, 360],
[35, 36, 0.043, 0.0537, 0.0016, 9900, 0, 0, 0, 0, 1, -360, 360],
[36, 37, 0.029, 0.0366, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 38, 0.0651, 0.1009, 0.002, 9900, 0, 0, 0, 0, 1, -360, 360],
[37, 39, 0.0239, 0.0379, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[36, 40, 0.03, 0.0466, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[22, 38, 0.0192, 0.0295, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[11, 41, 0, 0.749, 0, 9900, 0, 0, 0.955, 0, 1, -360, 360],
[41, 42, 0.207, 0.352, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[41, 43, 0, 0.412, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[38, 44, 0.0289, 0.0585, 0.002, 9900, 0, 0, 0, 0, 1, -360, 360],
[15, 45, 0, 0.1042, 0, 9900, 0, 0, 0.955, 0, 1, -360, 360],
[14, 46, 0, 0.0735, 0, 9900, 0, 0, 0.9, 0, 1, -360, 360],
[46, 47, 0.023, 0.068, 0.0032, 9900, 0, 0, 0, 0, 1, -360, 360],
[47, 48, 0.0182, 0.0233, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[48, 49, 0.0834, 0.129, 0.0048, 9900, 0, 0, 0, 0, 1, -360, 360],
[49, 50, 0.0801, 0.128, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[50, 51, 0.1386, 0.22, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[10, 51, 0, 0.0712, 0, 9900, 0, 0, 0.93, 0, 1, -360, 360],
[13, 49, 0, 0.191, 0, 9900, 0, 0, 0.895, 0, 1, -360, 360],
[29, 52, 0.1442, 0.187, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[52, 53, 0.0762, 0.0984, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[53, 54, 0.1878, 0.232, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[54, 55, 0.1732, 0.2265, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[11, 43, 0, 0.153, 0, 9900, 0, 0, 0.958, 0, 1, -360, 360],
[44, 45, 0.0624, 0.1242, 0.004, 9900, 0, 0, 0, 0, 1, -360, 360],
[40, 56, 0, 1.195, 0, 9900, 0, 0, 0.958, 0, 1, -360, 360],
[56, 41, 0.553, 0.549, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[56, 42, 0.2125, 0.354, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[39, 57, 0, 1.355, 0, 9900, 0, 0, 0.98, 0, 1, -360, 360],
[57, 56, 0.174, 0.26, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[38, 49, 0.115, 0.177, 0.003, 9900, 0, 0, 0, 0, 1, -360, 360],
[38, 48, 0.0312, 0.0482, 0, 9900, 0, 0, 0, 0, 1, -360, 360],
[9, 55, 0, 0.1205, 0, 9900, 0, 0, 0.94, 0, 1, -360, 360]
])
##----- OPF Data -----##
## generator cost data
# 1 startup shutdown n x1 y1 ... xn yn
# 2 startup shutdown n c(n-1) ... c0
ppc["gencost"] = array([
[2, 0, 0, 3, 0.0775795, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.25, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0222222, 20, 0],
[2, 0, 0, 3, 0.01, 40, 0],
[2, 0, 0, 3, 0.0322581, 20, 0]
])
return ppc
|
AscSecTeam/knackered-runner | refs/heads/master | Knackered-Backend/ChartGenerator.py | 1 | # this class takes aggregated check data and generates an SVG chart using pygal
import pygal
import os
import shutil
class ChartGenerator:
def __init__(self, location):
self.config = pygal.Config()
self.custom_config(self.config)
self.chart_location = location + 'chart.svg'
self.backup_location = location + 'chart_files/backups/'
def custom_config(self, config):
config.show_legend = False
config.human_readable = True
config.fill = True
config.label_font_size = 24
config.major_label_font_size = 24
def generate_chart(self, check_round, teams):
# first, set the title dynamically
self.config.title = 'Scoring as of round ' + str(check_round)
# create a chart object with special settings declared in customConfig()
chart = pygal.Bar(self.config)
# add each team's score to chart
for team in teams:
bar_count = team.id
score = team.score
chart.add('Team ' + str(bar_count) + ' score', [{'value': score, 'label': 'Team ' + str(bar_count)}])
self.make_chart_backup(check_round)
# we have a chart! put it somewhere accessible.
chart.render_to_file(self.chart_location)
def make_chart_backup(self, check_round):
# create a backup directory
if not os.path.exists(self.backup_location):
os.makedirs(self.backup_location)
# move current chart to backups
if os.path.isfile(self.chart_location):
shutil.move(self.chart_location, self.backup_location + 'chart_' + str(check_round) + '.svg')
|
sophilabs/pyuy | refs/heads/master | pyuy/main/cms_plugins.py | 2 | from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext_lazy as _
from models import MenuPlugin
class Menu(CMSPluginBase):
model = MenuPlugin
name = _("Menu")
render_template = "menu.html"
def render(self, context, instance, placeholder):
if instance.url:
link = instance.url
elif instance.page:
link = instance.page.get_absolute_url()
else:
link = ""
user = context.get('user')
show = instance.condition == 'B' or \
(instance.condition == 'A' and user.is_authenticated()) or \
(instance.condition == 'U' and not user.is_authenticated())
context.update({
'show': show,
'title': instance.title,
'link': link,
'class': instance.css_class,
'target':instance.target,
'placeholder': placeholder,
'object': instance
})
return context
plugin_pool.register_plugin(Menu) |
adobe/chromium | refs/heads/master | chrome/browser/resources/web_dev_style/css_checker.py | 24 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for Chromium WebUI resources.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl/git cl, and see
http://www.chromium.org/developers/web-development-style-guide for the rules
we're checking against here.
"""
# TODO(dbeam): Real CSS parser? pycss? http://code.google.com/p/pycss/
class CSSChecker(object):
def __init__(self, input_api, output_api, file_filter=None):
self.input_api = input_api
self.output_api = output_api
self.file_filter = file_filter
def RunChecks(self):
# We use this a lot, so make a nick name variable.
re = self.input_api.re
def _collapseable_hex(s):
return (len(s) == 6 and s[0] == s[1] and s[2] == s[3] and s[4] == s[5])
def _is_gray(s):
return s[0] == s[1] == s[2] if len(s) == 3 else s[0:2] == s[2:4] == s[4:6]
def _remove_all(s):
return _remove_grit(_remove_ats(_remove_comments(s)))
def _remove_ats(s):
return re.sub(re.compile(r'@\w+.*?{(.*{.*?})+.*?}', re.DOTALL), '\\1', s)
def _remove_comments(s):
return re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', s)
def _remove_grit(s):
grit_reg = r'<if[^>]+>.*?<\s*/\s*if[^>]*>|<include[^>]+>'
return re.sub(re.compile(grit_reg, re.DOTALL), '', s)
def _rgb_from_hex(s):
if len(s) == 3:
r, g, b = s[0] + s[0], s[1] + s[1], s[2] + s[2]
else:
r, g, b = s[0:2], s[2:4], s[4:6]
return int(r, base=16), int(g, base=16), int(b, base=16)
def alphabetize_props(contents):
errors = []
for rule in re.finditer(r'{(.*?)}', contents, re.DOTALL):
semis = map(lambda t: t.strip(), rule.group(1).split(';'))[:-1]
rules = filter(lambda r: ': ' in r, semis)
props = map(lambda r: r[0:r.find(':')], rules)
if props != sorted(props):
errors.append(' %s;\n' % (';\n '.join(rules)))
return errors
def braces_have_space_before_and_nothing_after(line):
return re.search(r'(?:^|\S){|{\s*\S+\s*$', line)
def classes_use_dashes(line):
# Intentionally dumbed down version of CSS 2.1 grammar for class without
# non-ASCII, escape chars, or whitespace.
m = re.search(r'\.(-?[_a-zA-Z0-9-]+).*[,{]\s*$', line)
return (m and (m.group(1).lower() != m.group(1) or
m.group(1).find('_') >= 0))
# Ignore single frames in a @keyframe, i.e. 0% { margin: 50px; }
frame_reg = r'\s*\d+%\s*{\s*[_a-zA-Z0-9-]+:(\s*[_a-zA-Z0-9-]+)+\s*;\s*}\s*'
def close_brace_on_new_line(line):
return (line.find('}') >= 0 and re.search(r'[^ }]', line) and
not re.match(frame_reg, line))
def colons_have_space_after(line):
return re.search(r'(?<!data):(?!//)\S[^;]+;\s*', line)
def favor_single_quotes(line):
return line.find('"') >= 0
# Shared between hex_could_be_shorter and rgb_if_not_gray.
hex_reg = (r'#([a-fA-F0-9]{3}|[a-fA-F0-9]{6})(?=[^_a-zA-Z0-9-]|$)'
r'(?!.*(?:{.*|,\s*)$)')
def hex_could_be_shorter(line):
m = re.search(hex_reg, line)
return (m and _is_gray(m.group(1)) and _collapseable_hex(m.group(1)))
small_seconds = r'(?:^|[^_a-zA-Z0-9-])(0?\.[0-9]+)s(?!-?[_a-zA-Z0-9-])'
def milliseconds_for_small_times(line):
return re.search(small_seconds, line)
def no_data_uris_in_source_files(line):
return re.search(r'\(\s*\'?\s*data:', line)
def one_rule_per_line(line):
return re.search(r'[_a-zA-Z0-9-](?<!data):(?!//)[^;]+;\s*[^ }]\s*', line)
any_reg = re.compile(r':(?:-webkit-)?any\(.*?\)', re.DOTALL)
multi_sels = re.compile(r'(?:}[\n\s]*)?([^,]+,(?=[^{}]+?{).*[,{])\s*$',
re.MULTILINE)
def one_selector_per_line(contents):
errors = []
for b in re.finditer(multi_sels, re.sub(any_reg, '', contents)):
errors.append(' ' + b.group(1).strip().splitlines()[-1:][0])
return errors
def rgb_if_not_gray(line):
m = re.search(hex_reg, line)
return (m and not _is_gray(m.group(1)))
def suggest_ms_from_s(line):
ms = int(float(re.search(small_seconds, line).group(1)) * 1000)
return ' (replace with %dms)' % ms
def suggest_rgb_from_hex(line):
suggestions = ['rgb(%d, %d, %d)' % _rgb_from_hex(h.group(1))
for h in re.finditer(hex_reg, line)]
return ' (replace with %s)' % ', '.join(suggestions)
def suggest_short_hex(line):
h = re.search(hex_reg, line).group(1)
return ' (replace with #%s)' % (h[0] + h[2] + h[4])
hsl = r'hsl\([^\)]*(?:[, ]|(?<=\())(?:0?\.?)?0%'
zeros = (r'^.*(?:^|\D)'
r'(?:\.0|0(?:\.0?|px|em|%|in|cm|mm|pc|pt|ex|deg|g?rad|m?s|k?hz))'
r'(?:\D|$)(?=[^{}]+?}).*$')
def zero_length_values(contents):
errors = []
for z in re.finditer(re.compile(zeros, re.MULTILINE), contents):
first_line = z.group(0).strip().splitlines()[0]
if not re.search(hsl, first_line):
errors.append(' ' + first_line)
return errors
added_or_modified_files_checks = [
{ 'desc': 'Alphabetize properties and list vendor specific (i.e. '
'-webkit) above standard.',
'test': alphabetize_props,
'multiline': True,
},
{ 'desc': 'Start braces ({) end a selector, have a space before them '
'and no rules after.',
'test': braces_have_space_before_and_nothing_after,
},
{ 'desc': 'Classes use .dash-form.',
'test': classes_use_dashes,
},
{ 'desc': 'Always put a rule closing brace (}) on a new line.',
'test': close_brace_on_new_line,
},
{ 'desc': 'Colons (:) should have a space after them.',
'test': colons_have_space_after,
},
{ 'desc': 'Use single quotes (\') instead of double quotes (") in '
'strings.',
'test': favor_single_quotes,
},
{ 'desc': 'Use abbreviated hex (#rgb) when in form #rrggbb.',
'test': hex_could_be_shorter,
'after': suggest_short_hex,
},
{ 'desc': 'Use milliseconds for time measurements under 1 second.',
'test': milliseconds_for_small_times,
'after': suggest_ms_from_s,
},
{ 'desc': 'Don\'t use data URIs in source files. Use grit instead.',
'test': no_data_uris_in_source_files,
},
{ 'desc': 'One rule per line (what not to do: color: red; margin: 0;).',
'test': one_rule_per_line,
},
{ 'desc': 'One selector per line (what not to do: a, b {}).',
'test': one_selector_per_line,
'multiline': True,
},
{ 'desc': 'Use rgb() over #hex when not a shade of gray (like #333).',
'test': rgb_if_not_gray,
'after': suggest_rgb_from_hex,
},
{ 'desc': 'Make all zero length terms (i.e. 0px) 0 unless inside of '
'hsl() or part of @keyframe.',
'test': zero_length_values,
'multiline': True,
},
]
results = []
affected_files = self.input_api.AffectedFiles(include_deletes=False,
file_filter=self.file_filter)
files = []
for f in affected_files:
# Remove all /*comments*/, @at-keywords, and grit <if|include> tags; we're
# not using a real parser. TODO(dbeam): Check alpha in <if> blocks.
file_contents = _remove_all('\n'.join(f.NewContents()))
files.append((f.LocalPath(), file_contents))
# Only look at CSS files for now.
for f in filter(lambda f: f[0].endswith('.css'), files):
file_errors = []
for check in added_or_modified_files_checks:
# If the check is multiline, it receieves the whole file and gives us
# back a list of things wrong. If the check isn't multiline, we pass it
# each line and the check returns something truthy if there's an issue.
if ('multiline' in check and check['multiline']):
check_errors = check['test'](f[1])
if len(check_errors) > 0:
# There are currently no multiline checks with ['after'].
file_errors.append('- %s\n%s' %
(check['desc'], '\n'.join(check_errors).rstrip()))
else:
check_errors = []
lines = f[1].splitlines()
for lnum in range(0, len(lines)):
line = lines[lnum]
if check['test'](line):
error = ' ' + line.strip()
if 'after' in check:
error += check['after'](line)
check_errors.append(error)
if len(check_errors) > 0:
file_errors.append('- %s\n%s' %
(check['desc'], '\n'.join(check_errors)))
if file_errors:
results.append(self.output_api.PresubmitPromptWarning(
'%s:\n%s' % (f[0], '\n\n'.join(file_errors))))
if results:
# Add your name if you're here often mucking around in the code.
authors = ['dbeam@chromium.org']
results.append(self.output_api.PresubmitNotifyResult(
'Was the CSS checker useful? Send feedback or hate mail to %s.' %
', '.join(authors)))
return results
|
kobolabs/calibre | refs/heads/kobo | src/calibre/ebooks/rtf2xml/paragraph_def.py | 24 | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import sys, os
from calibre.ebooks.rtf2xml import copy, border_parse
from calibre.ptempfile import better_mktemp
class ParagraphDef:
"""
=================
Purpose
=================
Write paragraph definition tags.
States:
1. before_1st_para_def.
Before any para_def token is found. This means all the text in the preamble.
Look for the token 'cw<pf<par-def___'. This will changet the state to collect_tokens.
2. collect_tokens.
Found a paragraph_def. Need to get all tokens.
Change with start of a paragrph ('mi<mk<para-start'). State then becomes
in_paragraphs
If another paragraph definition is found, the state does not change.
But the dictionary is reset.
3. in_paragraphs
State changes when 'mi<mk<para-end__', or end of paragraph is found.
State then becomes 'self.__state = 'after_para_end'
4. after_para_end
If 'mi<mk<para-start' (the start of a paragraph) or 'mi<mk<para-end__' (the end of a paragraph--must be empty paragraph?) are found:
state changes to 'in_paragraphs'
If 'cw<pf<par-def___' (paragraph_definition) is found:
state changes to collect_tokens
if 'mi<mk<body-close', 'mi<mk<par-in-fld', 'cw<tb<cell______','cw<tb<row-def___','cw<tb<row_______', 'mi<mk<sect-close', 'mi<mk<header-beg', 'mi<mk<header-end'
are found. (All these tokens mark the start of a bigger element. para_def must
be closed:
state changes to 'after_para_def'
5. after_para_def
'mi<mk<para-start' changes state to in_paragraphs
if another paragraph_def is found, the state changes to collect_tokens.
"""
def __init__(self,
in_file,
bug_handler,
default_font,
copy = None,
run_level = 1,):
"""
Required:
'file'--file to parse
'default_font' --document default font
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__default_font = default_font
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
def __initiate_values(self):
"""
Initiate all values.
"""
# Dictionary needed to convert shortened style names to readable names
self.__token_dict={
# paragraph formatting => pf
'par-end___' : 'para',
'par-def___' : 'paragraph-definition',
'keep-w-nex' : 'keep-with-next',
'widow-cntl' : 'widow-control',
'adjust-rgt' : 'adjust-right',
'language__' : 'language',
'right-inde' : 'right-indent',
'fir-ln-ind' : 'first-line-indent',
'left-inden' : 'left-indent',
'space-befo' : 'space-before',
'space-afte' : 'space-after',
'line-space' : 'line-spacing',
'default-ta' : 'default-tab',
'align_____' : 'align',
'widow-cntr' : 'widow-control',
# stylesheet = > ss
'style-shet' : 'stylesheet',
'based-on__' : 'based-on-style',
'next-style' : 'next-style',
'char-style' : 'character-style',
# this is changed to get a nice attribute
'para-style' : 'name',
# graphics => gr
'picture___' : 'pict',
'obj-class_' : 'obj_class',
'mac-pic___' : 'mac-pict',
# section => sc
'section___' : 'section-new',
'sect-defin' : 'section-reset',
'sect-note_' : 'endnotes-in-section',
# list=> ls
'list-text_' : 'list-text',
# this line must be wrong because it duplicates an earlier one
'list-text_' : 'list-text',
'list______' : 'list',
'list-lev-d' : 'list-level-definition',
'list-cardi' : 'list-cardinal-numbering',
'list-decim' : 'list-decimal-numbering',
'list-up-al' : 'list-uppercase-alphabetic-numbering',
'list-up-ro' : 'list-uppercae-roman-numbering',
'list-ord__' : 'list-ordinal-numbering',
'list-ordte' : 'list-ordinal-text-numbering',
'list-bulli' : 'list-bullet',
'list-simpi' : 'list-simple',
'list-conti' : 'list-continue',
'list-hang_' : 'list-hang',
# 'list-tebef' : 'list-text-before',
'list-level' : 'level',
'list-id___' : 'list-id',
'list-start' : 'list-start',
'nest-level' : 'nest-level',
# duplicate
'list-level' : 'list-level',
# notes => nt
'footnote__' : 'footnote',
'type______' : 'type',
# anchor => an
'toc_______' : 'anchor-toc',
'book-mk-st' : 'bookmark-start',
'book-mk-en' : 'bookmark-end',
'index-mark' : 'anchor-index',
'place_____' : 'place',
# field => fd
'field_____' : 'field',
'field-inst' : 'field-instruction',
'field-rslt' : 'field-result',
'datafield_' : 'data-field',
# info-tables => it
'font-table' : 'font-table',
'colr-table' : 'color-table',
'lovr-table' : 'list-override-table',
'listtable_' : 'list-table',
'revi-table' : 'revision-table',
# character info => ci
'hidden____' : 'hidden',
'italics___' : 'italics',
'bold______' : 'bold',
'strike-thr' : 'strike-through',
'shadow____' : 'shadow',
'outline___' : 'outline',
'small-caps' : 'small-caps',
'caps______' : 'caps',
'dbl-strike' : 'double-strike-through',
'emboss____' : 'emboss',
'engrave___' : 'engrave',
'subscript_' : 'subscript',
'superscrip' : 'superscipt',
'font-style' : 'font-style',
'font-color' : 'font-color',
'font-size_' : 'font-size',
'font-up___' : 'superscript',
'font-down_' : 'subscript',
'red_______' : 'red',
'blue______' : 'blue',
'green_____' : 'green',
# table => tb
'row-def___' : 'row-definition',
'cell______' : 'cell',
'row_______' : 'row',
'in-table__' : 'in-table',
'columns___' : 'columns',
'row-pos-le' : 'row-position-left',
'cell-posit' : 'cell-position',
# preamble => pr
# underline
'underlined' : 'underlined',
# border => bd
'bor-t-r-hi' : 'border-table-row-horizontal-inside',
'bor-t-r-vi' : 'border-table-row-vertical-inside',
'bor-t-r-to' : 'border-table-row-top',
'bor-t-r-le' : 'border-table-row-left',
'bor-t-r-bo' : 'border-table-row-bottom',
'bor-t-r-ri' : 'border-table-row-right',
'bor-cel-bo' : 'border-cell-bottom',
'bor-cel-to' : 'border-cell-top',
'bor-cel-le' : 'border-cell-left',
'bor-cel-ri' : 'border-cell-right',
'bor-par-bo' : 'border-paragraph-bottom',
'bor-par-to' : 'border-paragraph-top',
'bor-par-le' : 'border-paragraph-left',
'bor-par-ri' : 'border-paragraph-right',
'bor-par-bo' : 'border-paragraph-box',
'bor-for-ev' : 'border-for-every-paragraph',
'bor-outsid' : 'border-outisde',
'bor-none__' : 'border',
# border type => bt
'bdr-single' : 'single',
'bdr-doubtb' : 'double-thickness-border',
'bdr-shadow' : 'shadowed-border',
'bdr-double' : 'double-border',
'bdr-dotted' : 'dotted-border',
'bdr-dashed' : 'dashed',
'bdr-hair__' : 'hairline',
'bdr-inset_' : 'inset',
'bdr-das-sm' : 'dash-small',
'bdr-dot-sm' : 'dot-dash',
'bdr-dot-do' : 'dot-dot-dash',
'bdr-outset' : 'outset',
'bdr-trippl' : 'tripple',
'bdr-thsm__' : 'thick-thin-small',
'bdr-htsm__' : 'thin-thick-small',
'bdr-hthsm_' : 'thin-thick-thin-small',
'bdr-thm__' : 'thick-thin-medium',
'bdr-htm__' : 'thin-thick-medium',
'bdr-hthm_' : 'thin-thick-thin-medium',
'bdr-thl__' : 'thick-thin-large',
'bdr-hthl_' : 'think-thick-think-large',
'bdr-wavy_' : 'wavy',
'bdr-d-wav' : 'double-wavy',
'bdr-strip' : 'striped',
'bdr-embos' : 'emboss',
'bdr-engra' : 'engrave',
'bdr-frame' : 'frame',
'bdr-li-wid' : 'line-width',
}
self.__tabs_dict = {
'cw<pf<tab-stop__' : self.__tab_stop_func,
'cw<pf<tab-center' : self.__tab_type_func,
'cw<pf<tab-right_' : self.__tab_type_func,
'cw<pf<tab-dec___' : self.__tab_type_func,
'cw<pf<leader-dot' : self.__tab_leader_func,
'cw<pf<leader-hyp' : self.__tab_leader_func,
'cw<pf<leader-und' : self.__tab_leader_func,
'cw<pf<tab-bar-st' : self.__tab_bar_func,
}
self.__tab_type_dict = {
'cw<pf<tab-center' : 'center',
'cw<pf<tab-right_' : 'right',
'cw<pf<tab-dec___' : 'decimal',
'cw<pf<leader-dot' : 'leader-dot',
'cw<pf<leader-hyp' : 'leader-hyphen',
'cw<pf<leader-und' : 'leader-underline',
}
self.__border_obj = border_parse.BorderParse()
self.__style_num_strings = []
self.__body_style_strings = []
self.__state = 'before_1st_para_def'
self.__att_val_dict = {}
self.__start_marker = 'mi<mk<pard-start\n' # outside para tags
self.__start2_marker = 'mi<mk<pardstart_\n' # inside para tags
self.__end2_marker = 'mi<mk<pardend___\n' # inside para tags
self.__end_marker = 'mi<mk<pard-end__\n' # outside para tags
self.__text_string = ''
self.__state_dict = {
'before_1st_para_def' : self.__before_1st_para_def_func,
'collect_tokens' : self.__collect_tokens_func,
'after_para_def' : self.__after_para_def_func,
'in_paragraphs' : self.__in_paragraphs_func,
'after_para_end' : self.__after_para_end_func,
}
self.__collect_tokens_dict = {
'mi<mk<para-start' : self.__end_para_def_func,
'cw<pf<par-def___' : self.__para_def_in_para_def_func,
'cw<tb<cell______' : self.__empty_table_element_func,
'cw<tb<row_______' : self.__empty_table_element_func,
}
self.__after_para_def_dict = {
'mi<mk<para-start' : self.__start_para_after_def_func,
'cw<pf<par-def___' : self.__found_para_def_func,
'cw<tb<cell______' : self.__empty_table_element_func,
'cw<tb<row_______' : self.__empty_table_element_func,
}
self.__in_paragraphs_dict = {
'mi<mk<para-end__' : self.__found_para_end_func,
}
self.__after_para_end_dict = {
'mi<mk<para-start' : self.__continue_block_func,
'mi<mk<para-end__' : self.__continue_block_func,
'cw<pf<par-def___' : self.__new_para_def_func,
'mi<mk<body-close' : self.__stop_block_func,
'mi<mk<par-in-fld' : self.__stop_block_func,
'cw<tb<cell______' : self.__stop_block_func,
'cw<tb<row-def___' : self.__stop_block_func,
'cw<tb<row_______' : self.__stop_block_func,
'mi<mk<sect-close' : self.__stop_block_func,
'mi<mk<sect-start' : self.__stop_block_func,
'mi<mk<header-beg' : self.__stop_block_func,
'mi<mk<header-end' : self.__stop_block_func,
'mi<mk<head___clo' : self.__stop_block_func,
'mi<mk<fldbk-end_' : self.__stop_block_func,
'mi<mk<lst-txbeg_' : self.__stop_block_func,
}
def __before_1st_para_def_func(self, line):
"""
Required:
line -- line to parse
Returns:
nothing
Logic:
Look for the beginning of a paragaraph definition
"""
##cw<pf<par-def___<nu<true
if self.__token_info == 'cw<pf<par-def___':
self.__found_para_def_func()
else:
self.__write_obj.write(line)
def __found_para_def_func(self):
self.__state = 'collect_tokens'
# not exactly right--have to reset the dictionary--give it default
# values
self.__reset_dict()
def __collect_tokens_func(self, line):
"""
Required:
line --line to parse
Returns:
nothing
Logic:
Check the collect_tokens_dict for either the beginning of a
paragraph or a new paragraph definition. Take the actions
according to the value in the dict.
Otherwise, check if the token is not a control word. If it is not,
change the state to after_para_def.
Otherwise, check if the token is a paragraph definition word; if
so, add it to the attributes and values dictionary.
"""
action = self.__collect_tokens_dict.get(self.__token_info)
if action:
action(line)
elif line[0:2] != 'cw':
self.__write_obj.write(line)
self.__state = 'after_para_def'
elif line[0:5] == 'cw<bd':
self.__parse_border(line)
else:
action = self.__tabs_dict.get(self.__token_info)
if action:
action(line)
else:
token = self.__token_dict.get(line[6:16])
if token:
self.__att_val_dict[token] = line[20:-1]
def __tab_stop_func(self, line):
"""
"""
self.__att_val_dict['tabs'] += '%s:' % self.__tab_type
self.__att_val_dict['tabs'] += '%s;' % line[20:-1]
self.__tab_type = 'left'
def __tab_type_func(self, line):
"""
"""
type = self.__tab_type_dict.get(self.__token_info)
if type != None:
self.__tab_type = type
else:
if self.__run_level > 3:
msg = 'no entry for %s\n' % self.__token_info
raise self.__bug_handler, msg
def __tab_leader_func(self, line):
"""
"""
leader = self.__tab_type_dict.get(self.__token_info)
if leader != None:
self.__att_val_dict['tabs'] += '%s^' % leader
else:
if self.__run_level > 3:
msg = 'no entry for %s\n' % self.__token_info
raise self.__bug_handler, msg
def __tab_bar_func(self, line):
"""
"""
# self.__att_val_dict['tabs-bar'] += '%s:' % line[20:-1]
self.__att_val_dict['tabs'] += 'bar:%s;' % (line[20:-1])
self.__tab_type = 'left'
def __parse_border(self, line):
"""
Requires:
line --line to parse
Returns:
nothing (updates dictionary)
Logic:
Uses the border_parse module to return a dictionary of attribute
value pairs for a border line.
"""
border_dict = self.__border_obj.parse_border(line)
self.__att_val_dict.update(border_dict)
def __para_def_in_para_def_func(self, line):
"""
Requires:
line --line to parse
Returns:
nothing
Logic:
I have found a \pard while I am collecting tokens. I want to reset
the dectionary and do nothing else.
"""
# Change this
self.__state = 'collect_tokens'
self.__reset_dict()
def __end_para_def_func(self, line):
"""
Requires:
Nothing
Returns:
Nothing
Logic:
The previous state was collect tokens, and I have found the start
of a paragraph. I want to outut the defintion tag; output the line
itself (telling me of the beginning of a paragraph);change the
state to 'in_paragraphs';
"""
self.__write_para_def_beg()
self.__write_obj.write(line)
self.__state = 'in_paragraphs'
def __start_para_after_def_func(self, line):
"""
Requires:
Nothing
Returns:
Nothing
Logic:
The state was is after_para_def. and I have found the start of a
paragraph. I want to outut the defintion tag; output the line
itself (telling me of the beginning of a paragraph);change the
state to 'in_paragraphs'.
(I now realize that this is absolutely identical to the function above!)
"""
self.__write_para_def_beg()
self.__write_obj.write(line)
self.__state = 'in_paragraphs'
def __after_para_def_func(self, line):
"""
Requires:
line -- line to parse
Returns:
nothing
Logic:
Check if the token info is the start of a paragraph. If so, call
on the function found in the value of the dictionary.
"""
action = self.__after_para_def_dict.get(self.__token_info)
if self.__token_info == 'cw<pf<par-def___':
self.__found_para_def_func()
elif action:
action(line)
else:
self.__write_obj.write(line)
def __in_paragraphs_func(self, line):
"""
Requires:
line --current line
Returns:
nothing
Logic:
Look for the end of a paragraph, the start of a cell or row.
"""
action = self.__in_paragraphs_dict.get(self.__token_info)
if action:
action(line)
else:
self.__write_obj.write(line)
def __found_para_end_func(self,line):
"""
Requires:
line -- line to print out
Returns:
Nothing
Logic:
State is in paragraphs. You have found the end of a paragraph. You
need to print out the line and change the state to after
paragraphs.
"""
self.__state = 'after_para_end'
self.__write_obj.write(line)
def __after_para_end_func(self, line):
"""
Requires:
line -- line to output
Returns:
nothing
Logic:
The state is after the end of a paragraph. You are collecting all
the lines in a string and waiting to see if you need to write
out the paragraph definition. If you find another paragraph
definition, then you write out the old paragraph dictionary and
print out the string. You change the state to collect tokens.
If you find any larger block elemens, such as cell, row,
field-block, or section, you write out the paragraph defintion and
then the text string.
If you find the beginning of a paragraph, then you don't need to
write out the paragraph definition. Write out the string, and
change the state to in paragraphs.
"""
self.__text_string += line
action = self.__after_para_end_dict.get(self.__token_info)
if action:
action(line)
def __continue_block_func(self, line):
"""
Requires:
line --line to print out
Returns:
Nothing
Logic:
The state is after the end of a paragraph. You have found the
start of a paragaph, so you don't need to print out the paragaph
definition. Print out the string, the line, and change the state
to in paragraphs.
"""
self.__state = 'in_paragraphs'
self.__write_obj.write(self.__text_string)
self.__text_string = ''
# found a new paragraph definition after an end of a paragraph
def __new_para_def_func(self, line):
"""
Requires:
line -- line to output
Returns:
Nothing
Logic:
You have found a new paragraph defintion at the end of a
paragraph. Output the end of the old paragraph defintion. Output
the text string. Output the line. Change the state to collect
tokens. (And don't forget to set the text string to ''!)
"""
self.__write_para_def_end_func()
self.__found_para_def_func()
# after a paragraph and found reason to stop this block
def __stop_block_func(self, line):
"""
Requires:
line --(shouldn't be here?)
Returns:
nothing
Logic:
The state is after a paragraph, and you have found a larger block
than paragraph-definition. You want to write the end tag of the
old defintion and reset the text string (handled by other
methods).
"""
self.__write_para_def_end_func()
self.__state = 'after_para_def'
def __write_para_def_end_func(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Print out the end of the pargraph definition tag, and the markers
that let me know when I have reached this tag. (These markers are
used for later parsing.)
"""
self.__write_obj.write(self.__end2_marker)
self.__write_obj.write('mi<tg<close_____<paragraph-definition\n')
self.__write_obj.write(self.__end_marker)
self.__write_obj.write(self.__text_string)
self.__text_string = ''
keys = self.__att_val_dict.keys()
if 'font-style' in keys:
self.__write_obj.write('mi<mk<font-end__\n')
if 'caps' in keys:
self.__write_obj.write('mi<mk<caps-end__\n')
def __get_num_of_style(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Get a unique value for each style.
"""
my_string = ''
new_style = 0
# when determining uniqueness for a style, ingorne these values, since
# they don't tell us if the style is unique
ignore_values = ['style-num', 'nest-level', 'in-table']
keys = self.__att_val_dict.keys()
keys.sort()
for key in keys:
if key in ignore_values:
continue
my_string += '%s:%s' % (key, self.__att_val_dict[key])
if my_string in self.__style_num_strings:
num = self.__style_num_strings.index(my_string)
num += 1 # since indexing starts at zero, rather than 1
else:
self.__style_num_strings.append(my_string)
num = len(self.__style_num_strings)
new_style = 1
num = '%04d' % num
self.__att_val_dict['style-num'] = 's' + str(num)
if new_style:
self.__write_body_styles()
def __write_body_styles(self):
style_string = ''
style_string += 'mi<tg<empty-att_<paragraph-style-in-body'
style_string += '<name>%s' % self.__att_val_dict['name']
style_string += '<style-number>%s' % self.__att_val_dict['style-num']
tabs_list = ['tabs-left', 'tabs-right', 'tabs-decimal', 'tabs-center',
'tabs-bar', 'tabs']
if self.__att_val_dict['tabs'] != '':
the_value = self.__att_val_dict['tabs']
# the_value = the_value[:-1]
style_string += ('<%s>%s' % ('tabs', the_value))
keys = self.__att_val_dict.keys()
keys.sort()
for key in keys:
if key != 'name' and key !='style-num' and key != 'in-table'\
and key not in tabs_list:
style_string += ('<%s>%s' % (key, self.__att_val_dict[key]))
style_string += '\n'
self.__body_style_strings.append(style_string)
def __write_para_def_beg(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
Print out the beginning of the pargraph definition tag, and the markers
that let me know when I have reached this tag. (These markers are
used for later parsing.)
"""
self.__get_num_of_style()
table = self.__att_val_dict.get('in-table')
if table:
# del self.__att_val_dict['in-table']
self.__write_obj.write('mi<mk<in-table__\n')
else:
self.__write_obj.write('mi<mk<not-in-tbl\n')
left_indent = self.__att_val_dict.get('left-indent')
if left_indent:
self.__write_obj.write('mi<mk<left_inden<%s\n' % left_indent)
is_list = self.__att_val_dict.get('list-id')
if is_list:
self.__write_obj.write('mi<mk<list-id___<%s\n' % is_list)
else:
self.__write_obj.write('mi<mk<no-list___\n')
self.__write_obj.write('mi<mk<style-name<%s\n' % self.__att_val_dict['name'])
self.__write_obj.write(self.__start_marker)
self.__write_obj.write('mi<tg<open-att__<paragraph-definition')
self.__write_obj.write('<name>%s' % self.__att_val_dict['name'])
self.__write_obj.write('<style-number>%s' % self.__att_val_dict['style-num'])
tabs_list = ['tabs-left', 'tabs-right', 'tabs-decimal', 'tabs-center',
'tabs-bar', 'tabs']
"""
for tab_item in tabs_list:
if self.__att_val_dict[tab_item] != '':
the_value = self.__att_val_dict[tab_item]
the_value = the_value[:-1]
self.__write_obj.write('<%s>%s' % (tab_item, the_value))
"""
if self.__att_val_dict['tabs'] != '':
the_value = self.__att_val_dict['tabs']
# the_value = the_value[:-1]
self.__write_obj.write('<%s>%s' % ('tabs', the_value))
keys = self.__att_val_dict.keys()
keys.sort()
for key in keys:
if key != 'name' and key !='style-num' and key != 'in-table'\
and key not in tabs_list:
self.__write_obj.write('<%s>%s' % (key, self.__att_val_dict[key]))
self.__write_obj.write('\n')
self.__write_obj.write(self.__start2_marker)
if 'font-style' in keys:
face = self.__att_val_dict['font-style']
self.__write_obj.write('mi<mk<font______<%s\n' % face)
if 'caps' in keys:
value = self.__att_val_dict['caps']
self.__write_obj.write('mi<mk<caps______<%s\n' % value)
def __empty_table_element_func(self, line):
self.__write_obj.write('mi<mk<in-table__\n')
self.__write_obj.write(line)
self.__state = 'after_para_def'
def __reset_dict(self):
"""
Requires:
nothing
Returns:
nothing
Logic:
The dictionary containing values and attributes must be reset each
time a new paragraphs definition is found.
"""
self.__att_val_dict.clear()
self.__att_val_dict['name'] = 'Normal'
self.__att_val_dict['font-style'] = self.__default_font
self.__tab_type = 'left'
self.__att_val_dict['tabs-left'] = ''
self.__att_val_dict['tabs-right'] = ''
self.__att_val_dict['tabs-center'] = ''
self.__att_val_dict['tabs-decimal'] = ''
self.__att_val_dict['tabs-bar'] = ''
self.__att_val_dict['tabs'] = ''
def make_paragraph_def(self):
"""
Requires:
nothing
Returns:
nothing (changes the original file)
Logic:
Read one line in at a time. Determine what action to take based on
the state.
"""
self.__initiate_values()
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
action = self.__state_dict.get(self.__state)
if action == None:
sys.stderr.write('no no matching state in module sections.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
copy_obj = copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "paragraphs_def.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
return self.__body_style_strings
|
DynamicGravitySystems/DGP | refs/heads/develop | dgp/__main__.py | 1 | # -*- coding: utf-8 -*-
import sys
import time
import traceback
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPixmap
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QSplashScreen
from dgp.gui.main import MainWindow
app = None
def excepthook(type_, value, traceback_):
"""This allows IDE to properly display unhandled exceptions which are
otherwise silently ignored as the application is terminated.
Override default excepthook with
>>> sys.excepthook = excepthook
See: http://pyqt.sourceforge.net/Docs/PyQt5/incompatibilities.html
"""
traceback.print_exception(type_, value, traceback_)
QtCore.qFatal('')
def main():
_align = Qt.AlignBottom | Qt.AlignHCenter
global app
sys.excepthook = excepthook
app = QApplication(sys.argv)
splash = QSplashScreen(QPixmap(":/icons/dgp_large"))
splash.showMessage("Loading Dynamic Gravity Processor", _align)
splash.show()
time.sleep(.5)
window = MainWindow()
splash.finish(window)
window.sigStatusMessage.connect(lambda msg: splash.showMessage(msg, _align))
window.load()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
ProfessionalIT/maxigenios-website | refs/heads/master | sdk/google_appengine/lib/django-1.4/django/contrib/staticfiles/finders.py | 83 | import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import default_storage, Storage, FileSystemStorage
from django.utils.datastructures import SortedDict
from django.utils.functional import empty, memoize, LazyObject
from django.utils.importlib import import_module
from django.utils._os import safe_join
from django.contrib.staticfiles import utils
from django.contrib.staticfiles.storage import AppStaticStorage
_finders = SortedDict()
class BaseFinder(object):
"""
A base file finder to be used for custom staticfiles finder classes.
"""
def find(self, path, all=False):
"""
Given a relative file path this ought to find an
absolute file path.
If the ``all`` parameter is ``False`` (default) only
the first found file path will be returned; if set
to ``True`` a list of all found files paths is returned.
"""
raise NotImplementedError()
def list(self, ignore_patterns):
"""
Given an optional list of paths to ignore, this should return
a two item iterable consisting of the relative path and storage
instance.
"""
raise NotImplementedError()
class FileSystemFinder(BaseFinder):
"""
A static files finder that uses the ``STATICFILES_DIRS`` setting
to locate files.
"""
def __init__(self, apps=None, *args, **kwargs):
# List of locations with static files
self.locations = []
# Maps dir paths to an appropriate storage instance
self.storages = SortedDict()
if not isinstance(settings.STATICFILES_DIRS, (list, tuple)):
raise ImproperlyConfigured(
"Your STATICFILES_DIRS setting is not a tuple or list; "
"perhaps you forgot a trailing comma?")
for root in settings.STATICFILES_DIRS:
if isinstance(root, (list, tuple)):
prefix, root = root
else:
prefix = ''
if os.path.abspath(settings.STATIC_ROOT) == os.path.abspath(root):
raise ImproperlyConfigured(
"The STATICFILES_DIRS setting should "
"not contain the STATIC_ROOT setting")
if (prefix, root) not in self.locations:
self.locations.append((prefix, root))
for prefix, root in self.locations:
filesystem_storage = FileSystemStorage(location=root)
filesystem_storage.prefix = prefix
self.storages[root] = filesystem_storage
super(FileSystemFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``STATICFILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches
def find_location(self, root, path, prefix=None):
"""
Finds a requested static file in a location, returning the found
absolute path (or ``None`` if no match).
"""
if prefix:
prefix = '%s%s' % (prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
path = safe_join(root, path)
if os.path.exists(path):
return path
def list(self, ignore_patterns):
"""
List all files in all locations.
"""
for prefix, root in self.locations:
storage = self.storages[root]
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
class AppDirectoriesFinder(BaseFinder):
"""
A static files finder that looks in the directory of each app as
specified in the source_dir attribute of the given storage class.
"""
storage_class = AppStaticStorage
def __init__(self, apps=None, *args, **kwargs):
# The list of apps that are handled
self.apps = []
# Mapping of app module paths to storage instances
self.storages = SortedDict()
if apps is None:
apps = settings.INSTALLED_APPS
for app in apps:
app_storage = self.storage_class(app)
if os.path.isdir(app_storage.location):
self.storages[app] = app_storage
if app not in self.apps:
self.apps.append(app)
super(AppDirectoriesFinder, self).__init__(*args, **kwargs)
def list(self, ignore_patterns):
"""
List all files in all app storages.
"""
for storage in self.storages.itervalues():
if storage.exists(''): # check if storage location exists
for path in utils.get_files(storage, ignore_patterns):
yield path, storage
def find(self, path, all=False):
"""
Looks for files in the app directories.
"""
matches = []
for app in self.apps:
match = self.find_in_app(app, path)
if match:
if not all:
return match
matches.append(match)
return matches
def find_in_app(self, app, path):
"""
Find a requested static file in an app's static locations.
"""
storage = self.storages.get(app, None)
if storage:
if storage.prefix:
prefix = '%s%s' % (storage.prefix, os.sep)
if not path.startswith(prefix):
return None
path = path[len(prefix):]
# only try to find a file if the source dir actually exists
if storage.exists(path):
matched_path = storage.path(path)
if matched_path:
return matched_path
class BaseStorageFinder(BaseFinder):
"""
A base static files finder to be used to extended
with an own storage class.
"""
storage = None
def __init__(self, storage=None, *args, **kwargs):
if storage is not None:
self.storage = storage
if self.storage is None:
raise ImproperlyConfigured("The staticfiles storage finder %r "
"doesn't have a storage class "
"assigned." % self.__class__)
# Make sure we have an storage instance here.
if not isinstance(self.storage, (Storage, LazyObject)):
self.storage = self.storage()
super(BaseStorageFinder, self).__init__(*args, **kwargs)
def find(self, path, all=False):
"""
Looks for files in the default file storage, if it's local.
"""
try:
self.storage.path('')
except NotImplementedError:
pass
else:
if self.storage.exists(path):
match = self.storage.path(path)
if all:
match = [match]
return match
return []
def list(self, ignore_patterns):
"""
List all files of the storage.
"""
for path in utils.get_files(self.storage, ignore_patterns):
yield path, self.storage
class DefaultStorageFinder(BaseStorageFinder):
"""
A static files finder that uses the default storage backend.
"""
storage = default_storage
def __init__(self, *args, **kwargs):
super(DefaultStorageFinder, self).__init__(*args, **kwargs)
base_location = getattr(self.storage, 'base_location', empty)
if not base_location:
raise ImproperlyConfigured("The storage backend of the "
"staticfiles finder %r doesn't have "
"a valid location." % self.__class__)
def find(path, all=False):
"""
Find a static file with the given path using all enabled finders.
If ``all`` is ``False`` (default), return the first matching
absolute path (or ``None`` if no match). Otherwise return a list.
"""
matches = []
for finder in get_finders():
result = finder.find(path, all=all)
if not all and result:
return result
if not isinstance(result, (list, tuple)):
result = [result]
matches.extend(result)
if matches:
return matches
# No match.
return all and [] or None
def get_finders():
for finder_path in settings.STATICFILES_FINDERS:
yield get_finder(finder_path)
def _get_finder(import_path):
"""
Imports the staticfiles finder class described by import_path, where
import_path is the full Python path to the class.
"""
module, attr = import_path.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, e))
try:
Finder = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" '
'class.' % (module, attr))
if not issubclass(Finder, BaseFinder):
raise ImproperlyConfigured('Finder "%s" is not a subclass of "%s"' %
(Finder, BaseFinder))
return Finder()
get_finder = memoize(_get_finder, _finders, 1)
|
DavidLKing/swe | refs/heads/master | mapping.py | 1 | import sys
class mapping:
def __init__(self):
parser = argparse.ArgumentParser()
# parser.add_argument('-f', '--folds_dir', help="folds directory (e.g. folds/gold")
requiredNamed = parser.add_argument_group('required named arguments')
requiredNamed.add_argument('-i', '--input', help='Input file name', required=True)
# parser.parse_args(['-h'])
self.args = parser.parse_args()
def nouns(self, gender, string):
pass
def verbs(self):
pass
def main(self, file):
for line in open(file, 'r').readlines():
if __name__ == '__main__':
m = mapping()
"""
pos=ADJ,case=ACC,comp=CMPR,gen=FEM,num=SG
pos=ADJ,case=ACC,comp=CMPR,gen=MASC,num=SG
pos=ADJ,case=ACC,comp=CMPR,gen=NEUT,num=SG
pos=ADJ,case=ACC,comp=CMPR,num=PL
pos=ADJ,case=ACC,comp=SPRL,gen=FEM,num=SG
pos=ADJ,case=ACC,comp=SPRL,gen=MASC,num=SG
pos=ADJ,case=ACC,comp=SPRL,gen=NEUT,num=SG
pos=ADJ,case=ACC,comp=SPRL,num=PL
pos=ADJ,case=ACC,gen=FEM,num=SG
pos=ADJ,case=ACC,gen=MASC,num=SG
pos=ADJ,case=ACC,gen=NEUT,num=SG
pos=ADJ,case=ACC,num=PL
pos=ADJ,case=DAT,comp=CMPR,gen=FEM,num=SG
pos=ADJ,case=DAT,comp=CMPR,gen=MASC,num=SG
pos=ADJ,case=DAT,comp=CMPR,gen=NEUT,num=SG
pos=ADJ,case=DAT,comp=SPRL,gen=FEM,num=SG
pos=ADJ,case=DAT,comp=SPRL,gen=MASC,num=SG
pos=ADJ,case=DAT,comp=SPRL,gen=NEUT,num=SG
pos=ADJ,case=DAT,gen=FEM,num=SG
pos=ADJ,case=DAT,gen=MASC,num=SG
pos=ADJ,case=DAT,gen=NEUT,num=SG
pos=ADJ,case=GEN,comp=CMPR,gen=FEM,num=SG
pos=ADJ,case=GEN,comp=CMPR,gen=MASC,num=SG
pos=ADJ,case=GEN,comp=CMPR,gen=NEUT,num=SG
pos=ADJ,case=GEN,comp=CMPR,num=PL
pos=ADJ,case=GEN,comp=SPRL,gen=FEM,num=SG
pos=ADJ,case=GEN,comp=SPRL,gen=MASC,num=SG
pos=ADJ,case=GEN,comp=SPRL,gen=NEUT,num=SG
pos=ADJ,case=GEN,comp=SPRL,num=PL
pos=ADJ,case=GEN,gen=FEM,num=SG
pos=ADJ,case=GEN,gen=MASC,num=SG
pos=ADJ,case=GEN,gen=NEUT,num=SG
pos=ADJ,case=GEN,num=PL
pos=ADJ,case=NOM,comp=CMPR,gen=FEM,num=SG
pos=ADJ,case=NOM,comp=CMPR,gen=MASC,num=SG
pos=ADJ,case=NOM,comp=CMPR,gen=NEUT,num=SG
pos=ADJ,case=NOM,comp=CMPR,num=PL
pos=ADJ,case=NOM,comp=SPRL,gen=FEM,num=SG
pos=ADJ,case=NOM,comp=SPRL,gen=MASC,num=SG
pos=ADJ,case=NOM,comp=SPRL,gen=NEUT,num=SG
pos=ADJ,case=NOM,comp=SPRL,num=PL
pos=ADJ,case=NOM,gen=FEM,num=SG
pos=ADJ,case=NOM,gen=MASC,num=SG
pos=ADJ,case=NOM,gen=NEUT,num=SG
pos=ADJ,case=NOM,num=PL
pos=ADJ,comp=CMPR
pos=ADJ,comp=SPRL,per=3,num=SG
pos=N,case=ACC,gen=FEM,num=PL
pos=N,case=ACC,gen=FEM,num=SG
pos=N,case=ACC,gen=MASC,num=PL
pos=N,case=ACC,gen=MASC,num=SG
pos=N,case=ACC,gen=NEUT,num=PL
pos=N,case=ACC,gen=NEUT,num=SG
pos=N,case=DAT,gen=FEM,num=PL
pos=N,case=DAT,gen=FEM,num=SG
pos=N,case=DAT,gen=MASC,num=PL
pos=N,case=DAT,gen=NEUT,num=PL
pos=N,case=GEN,gen=FEM,num=PL
pos=N,case=GEN,gen=FEM,num=SG
pos=N,case=GEN,gen=MASC,num=PL
pos=N,case=GEN,gen=MASC,num=SG
pos=N,case=GEN,gen=MASC,num=SG,alt=LGSPEC1
pos=N,case=GEN,gen=NEUT,num=PL
pos=N,case=GEN,gen=NEUT,num=SG
pos=N,case=GEN,gen=NEUT,num=SG,alt=LGSPEC1
pos=N,case=NOM,gen=FEM,num=PL
pos=N,case=NOM,gen=FEM,num=SG
pos=N,case=NOM,gen=MASC,num=PL
pos=N,case=NOM,gen=MASC,num=SG
pos=N,case=NOM,gen=NEUT,num=PL
pos=N,case=NOM,gen=NEUT,num=SG
pos=V,finite=NFIN
pos=V,mood=IND,tense=PRS,per=1,num=PL
pos=V,mood=IND,tense=PRS,per=1,num=SG
pos=V,mood=IND,tense=PRS,per=2,num=PL
pos=V,mood=IND,tense=PRS,per=2,num=SG
pos=V,mood=IND,tense=PRS,per=3,num=PL
pos=V,mood=IND,tense=PRS,per=3,num=SG
pos=V,mood=IND,tense=PST,aspect=PFV,per=1,num=PL
pos=V,mood=IND,tense=PST,aspect=PFV,per=1,num=SG
pos=V,mood=IND,tense=PST,aspect=PFV,per=2,num=PL
pos=V,mood=IND,tense=PST,aspect=PFV,per=2,num=SG
pos=V,mood=IND,tense=PST,aspect=PFV,per=3,num=PL
pos=V,mood=IND,tense=PST,aspect=PFV,per=3,num=SG
pos=V,mood={OPT/SBJV},tense=PRS,per=1,num=PL
pos=V,mood={OPT/SBJV},tense=PRS,per=1,num=SG
pos=V,mood={OPT/SBJV},tense=PRS,per=2,num=PL
pos=V,mood={OPT/SBJV},tense=PRS,per=2,num=SG
pos=V,mood={OPT/SBJV},tense=PRS,per=3,num=PL
pos=V,mood={OPT/SBJV},tense=PRS,per=3,num=SG
pos=V,mood={SBJV/COND},tense=PST,aspect=PFV,per=1,num=PL
pos=V,mood={SBJV/COND},tense=PST,aspect=PFV,per=1,num=SG
pos=V,mood={SBJV/COND},tense=PST,aspect=PFV,per=2,num=PL
pos=V,mood={SBJV/COND},tense=PST,aspect=PFV,per=2,num=SG
pos=V,mood={SBJV/COND},tense=PST,aspect=PFV,per=3,num=PL
pos=V,mood={SBJV/COND},tense=PST,aspect=PFV,per=3,num=SG
pos=V,tense=PRS
pos=V,tense=PST
""" |
BT-fgarbely/odoo | refs/heads/8.0 | addons/email_template/tests/__init__.py | 260 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_mail, test_ir_actions
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
childsish/lhc-python | refs/heads/master | tests/test_io/test_txt/__init__.py | 12133432 | |
mudithkr/zamboni | refs/heads/master | mkt/recommendations/tests/__init__.py | 12133432 | |
programadorjc/django | refs/heads/master | tests/i18n/other/__init__.py | 12133432 | |
ESS-LLP/erpnext-healthcare | refs/heads/master | erpnext/buying/print_format/__init__.py | 12133432 | |
rdbwebster/umsamples | refs/heads/master | src/__init__.py | 12133432 | |
leakim/svtplay-dl | refs/heads/master | lib/svtplay_dl/service/picsearch.py | 1 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
from __future__ import absolute_import
import re
import json
import copy
from svtplay_dl.service import Service, OpenGraphThumbMixin
from svtplay_dl.fetcher.rtmp import RTMP
from svtplay_dl.fetcher.hds import hdsparse
from svtplay_dl.error import ServiceError
class Picsearch(Service, OpenGraphThumbMixin):
supported_domains = ['dn.se', 'mobil.dn.se', 'di.se']
def get(self, options):
data = self.get_urldata()
if self.exclude(options):
yield ServiceError("Excluding video")
return
ajax_auth = re.search(r"picsearch_ajax_auth = '(\w+)'", data)
if not ajax_auth:
ajax_auth = re.search(r'screen9-ajax-auth="([^"]+)"', data)
if not ajax_auth:
yield ServiceError("Cant find token for video")
return
mediaid = re.search(r"mediaId = '([^']+)';", self.get_urldata())
if not mediaid:
mediaid = re.search(r'media-id="([^"]+)"', self.get_urldata())
if not mediaid:
mediaid = re.search(r'screen9-mid="([^"]+)"', self.get_urldata())
if not mediaid:
yield ServiceError("Cant find media id")
return
jsondata = self.http.request("get", "http://csp.picsearch.com/rest?jsonp=&eventParam=1&auth=%s&method=embed&mediaid=%s" % (ajax_auth.group(1), mediaid.group(1))).text
jsondata = json.loads(jsondata)
playlist = jsondata["media"]["playerconfig"]["playlist"][1]
if "bitrates" in playlist:
files = playlist["bitrates"]
server = jsondata["media"]["playerconfig"]["plugins"]["bwcheck"]["netConnectionUrl"]
for i in files:
options.other = "-y '%s'" % i["url"]
yield RTMP(copy.copy(options), server, i["height"])
if "provider" in playlist:
if playlist["provider"] != "rtmp":
if "live" in playlist:
options.live = playlist["live"]
if playlist["url"].endswith(".f4m"):
streams = hdsparse(copy.copy(options), self.http.request("get", playlist["url"], params={"hdcore": "3.7.0"}).text, playlist["url"])
if streams:
for n in list(streams.keys()):
yield streams[n]
|
MediaSapiens/autonormix | refs/heads/master | django/utils/text.py | 69 | import re
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
from django.utils.translation import ugettext_lazy
from htmlentitydefs import name2codepoint
# Capitalizes the first letter of a string.
capfirst = lambda x: x and force_unicode(x)[0].upper() + force_unicode(x)[1:]
capfirst = allow_lazy(capfirst, unicode)
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks and most spaces in
the text. Expects that existing line breaks are posix newlines.
"""
text = force_unicode(text)
def _generator():
it = iter(text.split(' '))
word = it.next()
yield word
pos = len(word) - word.rfind('\n') - 1
for word in it:
if "\n" in word:
lines = word.split('\n')
else:
lines = (word,)
pos += len(lines[0]) + 1
if pos > width:
yield '\n'
pos = len(lines[-1])
else:
yield ' '
if len(lines) > 1:
pos = len(lines[-1])
yield word
return u''.join(_generator())
wrap = allow_lazy(wrap, unicode)
def truncate_words(s, num, end_text='...'):
"""Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...)
Newlines in the string will be stripped.
"""
s = force_unicode(s)
length = int(num)
words = s.split()
if len(words) > length:
words = words[:length]
if not words[-1].endswith(end_text):
words.append(end_text)
return u' '.join(words)
truncate_words = allow_lazy(truncate_words, unicode)
def truncate_html_words(s, num, end_text='...'):
"""Truncates HTML to a certain number of words (not counting tags and
comments). Closes opened tags if they were correctly closed in the given
html. Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to ellipsis (...).
Newlines in the HTML are preserved.
"""
s = force_unicode(s)
length = int(num)
if length <= 0:
return u''
html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input')
# Set up regular expressions
re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U)
re_tag = re.compile(r'<(/)?([^ ]+?)(?: (/)| .*?)?>')
# Count non-HTML words and keep note of open tags
pos = 0
end_text_pos = 0
words = 0
open_tags = []
while words <= length:
m = re_words.search(s, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word
words += 1
if words == length:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or end_text_pos:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
tagname = tagname.lower() # Element names are always case-insensitive
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag, all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i+1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if words <= length:
# Don't try to close tags if we don't need to truncate
return s
out = s[:end_text_pos]
if end_text:
out += ' ' + end_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
truncate_html_words = allow_lazy(truncate_html_words, unicode)
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
u'johns_portrait_in_2004.jpg'
"""
s = force_unicode(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
get_valid_filename = allow_lazy(get_valid_filename, unicode)
def get_text_list(list_, last_word=ugettext_lazy(u'or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
u'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
u'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
u'a and b'
>>> get_text_list(['a'])
u'a'
>>> get_text_list([])
u''
"""
if len(list_) == 0: return u''
if len(list_) == 1: return force_unicode(list_[0])
return u'%s %s %s' % (', '.join([force_unicode(i) for i in list_][:-1]), force_unicode(last_word), force_unicode(list_[-1]))
get_text_list = allow_lazy(get_text_list, unicode)
def normalize_newlines(text):
return force_unicode(re.sub(r'\r\n|\r|\n', '\n', text))
normalize_newlines = allow_lazy(normalize_newlines, unicode)
def recapitalize(text):
"Recapitalizes text, placing caps after end-of-sentence punctuation."
text = force_unicode(text).lower()
capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])')
text = capsRE.sub(lambda x: x.group(1).upper(), text)
return text
recapitalize = allow_lazy(recapitalize)
def phone2numeric(phone):
"Converts a phone number with letters into its numeric equivalent."
letters = re.compile(r'[A-Z]', re.I)
char2number = lambda m: {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3',
'f': '3', 'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5',
'm': '6', 'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7',
't': '8', 'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}.get(m.group(0).lower())
return letters.sub(char2number, phone)
phone2numeric = allow_lazy(phone2numeric)
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
import cStringIO, gzip
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
ustring_re = re.compile(u"([\u0080-\uffff])")
def javascript_quote(s, quote_double_quotes=False):
def fix(match):
return r"\u%04x" % ord(match.group(1))
if type(s) == str:
s = s.decode('utf-8')
elif type(s) != unicode:
raise TypeError(s)
s = s.replace('\\', '\\\\')
s = s.replace('\r', '\\r')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace("'", "\\'")
if quote_double_quotes:
s = s.replace('"', '"')
return str(ustring_re.sub(fix, s))
javascript_quote = allow_lazy(javascript_quote, unicode)
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
[u'This', u'is', u'"a person\\\'s"', u'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
[u'Another', u"'person\\'s'", u'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
[u'A', u'"\\"funky\\" style"', u'test.']
"""
text = force_unicode(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
smart_split = allow_lazy(smart_split, unicode)
def _replace_entity(match):
text = match.group(1)
if text[0] == u'#':
text = text[1:]
try:
if text[0] in u'xX':
c = int(text[1:], 16)
else:
c = int(text)
return unichr(c)
except ValueError:
return match.group(0)
else:
try:
return unichr(name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape_entities(text):
return _entity_re.sub(_replace_entity, text)
unescape_entities = allow_lazy(unescape_entities, unicode)
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
unescape_string_literal = allow_lazy(unescape_string_literal)
|
AaronTao1990/scrapy | refs/heads/master | scrapy/contrib/downloadermiddleware/downloadtimeout.py | 144 | import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.downloadermiddleware.downloadtimeout` is deprecated, "
"use `scrapy.downloadermiddlewares.downloadtimeout` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.downloadermiddlewares.downloadtimeout import *
|
subodhchhabra/airflow | refs/heads/master | airflow/ti_deps/deps/dag_unpaused_dep.py | 20 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.ti_deps.deps.base_ti_dep import BaseTIDep
from airflow.utils.db import provide_session
class DagUnpausedDep(BaseTIDep):
NAME = "Dag Not Paused"
IGNOREABLE = True
@provide_session
def _get_dep_statuses(self, ti, session, dep_context):
if ti.task.dag.is_paused:
yield self._failing_status(
reason="Task's DAG '{0}' is paused.".format(ti.dag_id))
|
zzeleznick/zDjango | refs/heads/master | venv/lib/python2.7/site-packages/django/conf/locale/en/__init__.py | 12133432 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.