repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
otlet/JestemGraczem.pl
|
refs/heads/master
|
service/admin.py
|
1
|
from django.contrib import admin
from JestemGraczem.widgets import HtmlEditor
from .models import GamesServersList, AppSettings, LinkBlog, RSS
@admin.register(GamesServersList)
class GameServersListAdmin(admin.ModelAdmin):
list_display = ('name', 'official')
@admin.register(LinkBlog)
class LinkBlogAdmin(admin.ModelAdmin):
list_display = ('title', 'accepted', 'partner', 'sponsored', 'url')
@admin.register(RSS)
class RSSAdmin(admin.ModelAdmin):
list_display = ('title', 'url')
@admin.register(AppSettings)
class AppSettingsAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
class Meta:
fields = 'variable'
widgets = {
'code': HtmlEditor(attrs={'style': 'width: 90%; height: 100%;'}),
}
|
GitHublong/hue
|
refs/heads/master
|
desktop/core/ext-py/django-auth-ldap-1.2.0/docs/ext/daldocs.py
|
42
|
"""
Extra stuff for the django-auth-ldap Sphinx docs.
"""
def setup(app):
app.add_crossref_type(
directivename="setting",
rolename="setting",
indextemplate="pair: %s; setting",
)
|
blowmage/gcloud-python
|
refs/heads/master
|
gcloud/storage/test_batch.py
|
2
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
class TestMIMEApplicationHTTP(unittest2.TestCase):
def _getTargetClass(self):
from gcloud.storage.batch import MIMEApplicationHTTP
return MIMEApplicationHTTP
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_body_None(self):
METHOD = 'DELETE'
PATH = '/path/to/api'
LINES = [
"DELETE /path/to/api HTTP/1.1",
"",
]
mah = self._makeOne(METHOD, PATH, {}, None)
self.assertEqual(mah.get_content_type(), 'application/http')
self.assertEqual(mah.get_payload().splitlines(), LINES)
def test_ctor_body_str(self):
METHOD = 'GET'
PATH = '/path/to/api'
BODY = 'ABC'
HEADERS = {'Content-Length': len(BODY), 'Content-Type': 'text/plain'}
LINES = [
"GET /path/to/api HTTP/1.1",
"Content-Length: 3",
"Content-Type: text/plain",
"",
"ABC",
]
mah = self._makeOne(METHOD, PATH, HEADERS, BODY)
self.assertEqual(mah.get_payload().splitlines(), LINES)
def test_ctor_body_dict(self):
METHOD = 'GET'
PATH = '/path/to/api'
BODY = {'foo': 'bar'}
HEADERS = {}
LINES = [
'GET /path/to/api HTTP/1.1',
'Content-Length: 14',
'Content-Type: application/json',
'',
'{"foo": "bar"}',
]
mah = self._makeOne(METHOD, PATH, HEADERS, BODY)
self.assertEqual(mah.get_payload().splitlines(), LINES)
class TestBatch(unittest2.TestCase):
def setUp(self):
from gcloud.storage._testing import _setup_defaults
_setup_defaults(self)
def tearDown(self):
from gcloud.storage._testing import _tear_down_defaults
_tear_down_defaults(self)
def _getTargetClass(self):
from gcloud.storage.batch import Batch
return Batch
def _makeOne(self, *args, **kw):
return self._getTargetClass()(*args, **kw)
def test_ctor_w_explicit_connection(self):
http = _HTTP()
connection = _Connection(http=http)
batch = self._makeOne(connection)
self.assertTrue(batch._connection is connection)
self.assertEqual(len(batch._requests), 0)
self.assertEqual(len(batch._target_objects), 0)
def test_ctor_w_implicit_connection(self):
from gcloud.storage._testing import _monkey_defaults
http = _HTTP()
connection = _Connection(http=http)
with _monkey_defaults(connection=connection):
batch = self._makeOne()
self.assertTrue(batch._connection is connection)
self.assertEqual(len(batch._requests), 0)
self.assertEqual(len(batch._target_objects), 0)
def test__make_request_GET_normal(self):
from gcloud.storage.batch import _FutureDict
URL = 'http://example.com/api'
expected = _Response()
http = _HTTP((expected, ''))
connection = _Connection(http=http)
batch = self._makeOne(connection)
target = _MockObject()
response, content = batch._make_request('GET', URL,
target_object=target)
self.assertEqual(response.status, 204)
self.assertTrue(isinstance(content, _FutureDict))
self.assertTrue(target._properties is content)
self.assertEqual(http._requests, [])
EXPECTED_HEADERS = [
('Accept-Encoding', 'gzip'),
('Content-Length', 0),
]
solo_request, = batch._requests
self.assertEqual(solo_request[0], 'GET')
self.assertEqual(solo_request[1], URL)
headers = solo_request[2]
for key, value in EXPECTED_HEADERS:
self.assertEqual(headers[key], value)
self.assertEqual(solo_request[3], None)
def test__make_request_POST_normal(self):
from gcloud.storage.batch import _FutureDict
URL = 'http://example.com/api'
http = _HTTP() # no requests expected
connection = _Connection(http=http)
batch = self._makeOne(connection)
target = _MockObject()
response, content = batch._make_request('POST', URL, data={'foo': 1},
target_object=target)
self.assertEqual(response.status, 204)
self.assertTrue(isinstance(content, _FutureDict))
self.assertTrue(target._properties is content)
self.assertEqual(http._requests, [])
EXPECTED_HEADERS = [
('Accept-Encoding', 'gzip'),
('Content-Length', 10),
]
solo_request, = batch._requests
self.assertEqual(solo_request[0], 'POST')
self.assertEqual(solo_request[1], URL)
headers = solo_request[2]
for key, value in EXPECTED_HEADERS:
self.assertEqual(headers[key], value)
self.assertEqual(solo_request[3], {'foo': 1})
def test__make_request_PATCH_normal(self):
from gcloud.storage.batch import _FutureDict
URL = 'http://example.com/api'
http = _HTTP() # no requests expected
connection = _Connection(http=http)
batch = self._makeOne(connection)
target = _MockObject()
response, content = batch._make_request('PATCH', URL, data={'foo': 1},
target_object=target)
self.assertEqual(response.status, 204)
self.assertTrue(isinstance(content, _FutureDict))
self.assertTrue(target._properties is content)
self.assertEqual(http._requests, [])
EXPECTED_HEADERS = [
('Accept-Encoding', 'gzip'),
('Content-Length', 10),
]
solo_request, = batch._requests
self.assertEqual(solo_request[0], 'PATCH')
self.assertEqual(solo_request[1], URL)
headers = solo_request[2]
for key, value in EXPECTED_HEADERS:
self.assertEqual(headers[key], value)
self.assertEqual(solo_request[3], {'foo': 1})
def test__make_request_DELETE_normal(self):
from gcloud.storage.batch import _FutureDict
URL = 'http://example.com/api'
http = _HTTP() # no requests expected
connection = _Connection(http=http)
batch = self._makeOne(connection)
target = _MockObject()
response, content = batch._make_request('DELETE', URL,
target_object=target)
self.assertEqual(response.status, 204)
self.assertTrue(isinstance(content, _FutureDict))
self.assertTrue(target._properties is content)
self.assertEqual(http._requests, [])
EXPECTED_HEADERS = [
('Accept-Encoding', 'gzip'),
('Content-Length', 0),
]
solo_request, = batch._requests
self.assertEqual(solo_request[0], 'DELETE')
self.assertEqual(solo_request[1], URL)
headers = solo_request[2]
for key, value in EXPECTED_HEADERS:
self.assertEqual(headers[key], value)
self.assertEqual(solo_request[3], None)
def test__make_request_POST_too_many_requests(self):
URL = 'http://example.com/api'
http = _HTTP() # no requests expected
connection = _Connection(http=http)
batch = self._makeOne(connection)
batch._MAX_BATCH_SIZE = 1
batch._requests.append(('POST', URL, {}, {'bar': 2}))
self.assertRaises(ValueError,
batch._make_request, 'POST', URL, data={'foo': 1})
self.assertTrue(connection.http is http)
def test_finish_empty(self):
http = _HTTP() # no requests expected
connection = _Connection(http=http)
batch = self._makeOne(connection)
self.assertRaises(ValueError, batch.finish)
self.assertTrue(connection.http is http)
def _check_subrequest_no_payload(self, chunk, method, url):
lines = chunk.splitlines()
# blank + 2 headers + blank + request + blank + blank
self.assertEqual(len(lines), 7)
self.assertEqual(lines[0], '')
self.assertEqual(lines[1], 'Content-Type: application/http')
self.assertEqual(lines[2], 'MIME-Version: 1.0')
self.assertEqual(lines[3], '')
self.assertEqual(lines[4], '%s %s HTTP/1.1' % (method, url))
self.assertEqual(lines[5], '')
self.assertEqual(lines[6], '')
def _check_subrequest_payload(self, chunk, method, url, payload):
import json
lines = chunk.splitlines()
# blank + 2 headers + blank + request + 2 headers + blank + body
payload_str = json.dumps(payload)
self.assertEqual(lines[0], '')
self.assertEqual(lines[1], 'Content-Type: application/http')
self.assertEqual(lines[2], 'MIME-Version: 1.0')
self.assertEqual(lines[3], '')
self.assertEqual(lines[4], '%s %s HTTP/1.1' % (method, url))
if method == 'GET':
self.assertEqual(len(lines), 7)
self.assertEqual(lines[5], '')
self.assertEqual(lines[6], '')
else:
self.assertEqual(len(lines), 9)
self.assertEqual(lines[5], 'Content-Length: %d' % len(payload_str))
self.assertEqual(lines[6], 'Content-Type: application/json')
self.assertEqual(lines[7], '')
self.assertEqual(json.loads(lines[8]), payload)
def test_finish_nonempty(self):
import httplib2
URL = 'http://api.example.com/other_api'
expected = _Response()
expected['content-type'] = 'multipart/mixed; boundary="DEADBEEF="'
http = _HTTP((expected, _THREE_PART_MIME_RESPONSE))
connection = _Connection(http=http)
batch = self._makeOne(connection)
batch.API_BASE_URL = 'http://api.example.com'
batch._do_request('POST', URL, {}, {'foo': 1, 'bar': 2}, None)
batch._do_request('PATCH', URL, {}, {'bar': 3}, None)
batch._do_request('DELETE', URL, {}, None, None)
result = batch.finish()
self.assertEqual(len(result), len(batch._requests))
response0 = httplib2.Response({
'content-length': '20',
'content-type': 'application/json; charset=UTF-8',
'status': '200',
})
self.assertEqual(result[0], (response0, {'foo': 1, 'bar': 2}))
response1 = response0
self.assertEqual(result[1], (response1, {u'foo': 1, u'bar': 3}))
response2 = httplib2.Response({
'content-length': '0',
'status': '204',
})
self.assertEqual(result[2], (response2, ''))
self.assertEqual(len(http._requests), 1)
method, uri, headers, body = http._requests[0]
self.assertEqual(method, 'POST')
self.assertEqual(uri, 'http://api.example.com/batch')
self.assertEqual(len(headers), 2)
ctype, boundary = [x.strip()
for x in headers['Content-Type'].split(';')]
self.assertEqual(ctype, 'multipart/mixed')
self.assertTrue(boundary.startswith('boundary="=='))
self.assertTrue(boundary.endswith('=="'))
self.assertEqual(headers['MIME-Version'], '1.0')
divider = '--' + boundary[len('boundary="'):-1]
chunks = body.split(divider)[1:-1] # discard prolog / epilog
self.assertEqual(len(chunks), 3)
self._check_subrequest_payload(chunks[0], 'POST', URL,
{'foo': 1, 'bar': 2})
self._check_subrequest_payload(chunks[1], 'PATCH', URL, {'bar': 3})
self._check_subrequest_no_payload(chunks[2], 'DELETE', URL)
def test_finish_responses_mismatch(self):
URL = 'http://api.example.com/other_api'
expected = _Response()
expected['content-type'] = 'multipart/mixed; boundary="DEADBEEF="'
http = _HTTP((expected, _TWO_PART_MIME_RESPONSE_WITH_FAIL))
connection = _Connection(http=http)
batch = self._makeOne(connection)
batch.API_BASE_URL = 'http://api.example.com'
batch._requests.append(('GET', URL, {}, None))
self.assertRaises(ValueError, batch.finish)
def test_finish_nonempty_with_status_failure(self):
from gcloud.exceptions import NotFound
URL = 'http://api.example.com/other_api'
expected = _Response()
expected['content-type'] = 'multipart/mixed; boundary="DEADBEEF="'
http = _HTTP((expected, _TWO_PART_MIME_RESPONSE_WITH_FAIL))
connection = _Connection(http=http)
batch = self._makeOne(connection)
batch.API_BASE_URL = 'http://api.example.com'
target1 = _MockObject()
target2 = _MockObject()
batch._do_request('GET', URL, {}, None, target1)
batch._do_request('GET', URL, {}, None, target2)
# Make sure futures are not populated.
self.assertEqual([future for future in batch._target_objects],
[target1, target2])
target2_future_before = target2._properties
self.assertRaises(NotFound, batch.finish)
self.assertEqual(target1._properties,
{'foo': 1, 'bar': 2})
self.assertTrue(target2._properties is target2_future_before)
self.assertEqual(len(http._requests), 1)
method, uri, headers, body = http._requests[0]
self.assertEqual(method, 'POST')
self.assertEqual(uri, 'http://api.example.com/batch')
self.assertEqual(len(headers), 2)
ctype, boundary = [x.strip()
for x in headers['Content-Type'].split(';')]
self.assertEqual(ctype, 'multipart/mixed')
self.assertTrue(boundary.startswith('boundary="=='))
self.assertTrue(boundary.endswith('=="'))
self.assertEqual(headers['MIME-Version'], '1.0')
divider = '--' + boundary[len('boundary="'):-1]
chunks = body.split(divider)[1:-1] # discard prolog / epilog
self.assertEqual(len(chunks), 2)
self._check_subrequest_payload(chunks[0], 'GET', URL, {})
self._check_subrequest_payload(chunks[1], 'GET', URL, {})
def test_finish_nonempty_non_multipart_response(self):
URL = 'http://api.example.com/other_api'
expected = _Response()
expected['content-type'] = 'text/plain'
http = _HTTP((expected, 'NOT A MIME_RESPONSE'))
connection = _Connection(http=http)
batch = self._makeOne(connection)
batch._requests.append(('POST', URL, {}, {'foo': 1, 'bar': 2}))
batch._requests.append(('PATCH', URL, {}, {'bar': 3}))
batch._requests.append(('DELETE', URL, {}, None))
self.assertRaises(ValueError, batch.finish)
def test_as_context_mgr_wo_error(self):
from gcloud.storage.batch import _BATCHES
URL = 'http://example.com/api'
expected = _Response()
expected['content-type'] = 'multipart/mixed; boundary="DEADBEEF="'
http = _HTTP((expected, _THREE_PART_MIME_RESPONSE))
connection = _Connection(http=http)
self.assertEqual(list(_BATCHES), [])
target1 = _MockObject()
target2 = _MockObject()
target3 = _MockObject()
with self._makeOne(connection) as batch:
self.assertEqual(list(_BATCHES), [batch])
batch._make_request('POST', URL, {'foo': 1, 'bar': 2},
target_object=target1)
batch._make_request('PATCH', URL, {'bar': 3},
target_object=target2)
batch._make_request('DELETE', URL, target_object=target3)
self.assertEqual(list(_BATCHES), [])
self.assertEqual(len(batch._requests), 3)
self.assertEqual(batch._requests[0][0], 'POST')
self.assertEqual(batch._requests[1][0], 'PATCH')
self.assertEqual(batch._requests[2][0], 'DELETE')
self.assertEqual(batch._target_objects, [target1, target2, target3])
self.assertEqual(target1._properties,
{'foo': 1, 'bar': 2})
self.assertEqual(target2._properties,
{'foo': 1, 'bar': 3})
self.assertEqual(target3._properties, '')
def test_as_context_mgr_w_error(self):
from gcloud.storage.batch import _FutureDict
from gcloud.storage.batch import _BATCHES
URL = 'http://example.com/api'
http = _HTTP()
connection = _Connection(http=http)
self.assertEqual(list(_BATCHES), [])
target1 = _MockObject()
target2 = _MockObject()
target3 = _MockObject()
try:
with self._makeOne(connection) as batch:
self.assertEqual(list(_BATCHES), [batch])
batch._make_request('POST', URL, {'foo': 1, 'bar': 2},
target_object=target1)
batch._make_request('PATCH', URL, {'bar': 3},
target_object=target2)
batch._make_request('DELETE', URL, target_object=target3)
raise ValueError()
except ValueError:
pass
self.assertEqual(list(_BATCHES), [])
self.assertEqual(len(http._requests), 0)
self.assertEqual(len(batch._requests), 3)
self.assertEqual(batch._target_objects, [target1, target2, target3])
# Since the context manager fails, finish will not get called and
# the _properties will still be futures.
self.assertTrue(isinstance(target1._properties, _FutureDict))
self.assertTrue(isinstance(target2._properties, _FutureDict))
self.assertTrue(isinstance(target3._properties, _FutureDict))
class Test__unpack_batch_response(unittest2.TestCase):
def _callFUT(self, response, content):
from gcloud.storage.batch import _unpack_batch_response
return _unpack_batch_response(response, content)
def _unpack_helper(self, response, content):
import httplib2
result = list(self._callFUT(response, content))
self.assertEqual(len(result), 3)
response0 = httplib2.Response({
'content-length': '20',
'content-type': 'application/json; charset=UTF-8',
'status': '200',
})
self.assertEqual(result[0], (response0, {u'bar': 2, u'foo': 1}))
response1 = response0
self.assertEqual(result[1], (response1, {u'foo': 1, u'bar': 3}))
response2 = httplib2.Response({
'content-length': '0',
'status': '204',
})
self.assertEqual(result[2], (response2, ''))
def test_bytes(self):
RESPONSE = {'content-type': b'multipart/mixed; boundary="DEADBEEF="'}
CONTENT = _THREE_PART_MIME_RESPONSE
self._unpack_helper(RESPONSE, CONTENT)
def test_unicode(self):
RESPONSE = {'content-type': u'multipart/mixed; boundary="DEADBEEF="'}
CONTENT = _THREE_PART_MIME_RESPONSE.decode('utf-8')
self._unpack_helper(RESPONSE, CONTENT)
_TWO_PART_MIME_RESPONSE_WITH_FAIL = b"""\
--DEADBEEF=
Content-Type: application/http
Content-ID: <response-8a09ca85-8d1d-4f45-9eb0-da8e8b07ec83+1>
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
Content-Length: 20
{"foo": 1, "bar": 2}
--DEADBEEF=
Content-Type: application/http
Content-ID: <response-8a09ca85-8d1d-4f45-9eb0-da8e8b07ec83+2>
HTTP/1.1 404 Not Found
Content-Type: application/json; charset=UTF-8
Content-Length: 35
{"error": {"message": "Not Found"}}
--DEADBEEF=--
"""
_THREE_PART_MIME_RESPONSE = b"""\
--DEADBEEF=
Content-Type: application/http
Content-ID: <response-8a09ca85-8d1d-4f45-9eb0-da8e8b07ec83+1>
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
Content-Length: 20
{"foo": 1, "bar": 2}
--DEADBEEF=
Content-Type: application/http
Content-ID: <response-8a09ca85-8d1d-4f45-9eb0-da8e8b07ec83+2>
HTTP/1.1 200 OK
Content-Type: application/json; charset=UTF-8
Content-Length: 20
{"foo": 1, "bar": 3}
--DEADBEEF=
Content-Type: application/http
Content-ID: <response-8a09ca85-8d1d-4f45-9eb0-da8e8b07ec83+3>
HTTP/1.1 204 No Content
Content-Length: 0
--DEADBEEF=--
"""
class Test__FutureDict(unittest2.TestCase):
def _makeOne(self, *args, **kw):
from gcloud.storage.batch import _FutureDict
return _FutureDict(*args, **kw)
def test_get(self):
future = self._makeOne()
self.assertRaises(KeyError, future.get, None)
def test___getitem__(self):
future = self._makeOne()
value = orig_value = object()
with self.assertRaises(KeyError):
value = future[None]
self.assertTrue(value is orig_value)
def test___setitem__(self):
future = self._makeOne()
with self.assertRaises(KeyError):
future[None] = None
class _Connection(object):
project = 'TESTING'
def __init__(self, **kw):
self.__dict__.update(kw)
def build_api_url(self, path, **_): # pragma: NO COVER
return 'http://api.example.com%s' % path
def _make_request(self, method, url, data=None, content_type=None,
headers=None):
if content_type is not None: # pragma: NO COVER
headers['Content-Type'] = content_type
return self.http.request(method, uri=url, headers=headers, body=data)
def api_request(self, method, path, query_params=None,
data=None, content_type=None,
api_base_url=None, api_version=None,
expect_json=True): # pragma: NO COVER
pass
class _Response(dict):
def __init__(self, status=200, **kw):
self.status = status
super(_Response, self).__init__(**kw)
class _HTTP(object):
def __init__(self, *responses):
self._requests = []
self._responses = list(responses)
def request(self, method, uri, headers, body):
self._requests.append((method, uri, headers, body))
response, self._responses = self._responses[0], self._responses[1:]
return response
class _MockObject(object):
pass
|
OneBitSoftware/jwtSample
|
refs/heads/master
|
src/Spa/env1/Lib/site-packages/werkzeug/http.py
|
148
|
# -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from time import time, gmtime
try:
from email.utils import parsedate_tz
except ImportError: # pragma: no cover
from email.Utils import parsedate_tz
try:
from urllib2 import parse_http_list as _parse_list_header
except ImportError: # pragma: no cover
from urllib.request import parse_http_list as _parse_list_header
from datetime import datetime, timedelta
from hashlib import md5
import base64
from werkzeug._internal import _cookie_quote, _make_cookie_domain, \
_cookie_parse_impl
from werkzeug._compat import to_unicode, iteritems, text_type, \
string_types, try_coerce_native, to_bytes, PY2, \
integer_types
_cookie_charset = 'latin1'
# for explanation of "media-range", etc. see Sections 5.3.{1,2} of RFC 7231
_accept_re = re.compile(
r'''( # media-range capturing-parenthesis
[^\s;,]+ # type/subtype
(?:[ \t]*;[ \t]* # ";"
(?: # parameter non-capturing-parenthesis
[^\s;,q][^\s;,]* # token that doesn't start with "q"
| # or
q[^\s;,=][^\s;,]* # token that is more than just "q"
)
)* # zero or more parameters
) # end of media-range
(?:[ \t]*;[ \t]*q= # weight is a "q" parameter
(\d*(?:\.\d+)?) # qvalue capturing-parentheses
[^,]* # "extension" accept params: who cares?
)? # accept params are optional
''', re.VERBOSE)
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*(%s|[^\s;=]+)\s*(?:=\s*(%s|[^;]+))?\s*' %
(_quoted_string_re, _quoted_string_re))
_entity_headers = frozenset([
'allow', 'content-encoding', 'content-language', 'content-length',
'content-location', 'content-md5', 'content-range', 'content-type',
'expires', 'last-modified'
])
_hop_by_hop_headers = frozenset([
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailer', 'transfer-encoding',
'upgrade'
])
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required', # see RFC 6585
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
449: 'Retry With', # proprietary MS extension
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
def wsgi_to_bytes(data):
"""coerce wsgi unicode represented bytes to real ones
"""
if isinstance(data, bytes):
return data
return data.encode('latin1') #XXX: utf8 fallback?
def bytes_to_wsgi(data):
assert isinstance(data, bytes), 'data must be bytes'
if isinstance(data, str):
return data
else:
return data.decode('latin1')
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
if isinstance(value, bytes):
value = bytes_to_wsgi(value)
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append('%s=%s' % (key, quote_header_value(value)))
return '; '.join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iteritems(iterable):
if value is None:
items.append(key)
else:
items.append('%s=%s' % (
key,
quote_header_value(value, allow_token=allow_token)
))
else:
items = [quote_header_value(x, allow_token=allow_token)
for x in iterable]
return ', '.join(items)
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value, cls=dict):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` arugment):
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
.. versionchanged:: 0.9
Added support for `cls` argument.
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
"""
result = cls()
if not isinstance(value, text_type):
#XXX: validate
value = bytes_to_wsgi(value)
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value, key == 'filename')
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
value = wsgi_to_bytes(value)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == b'basic':
try:
username, password = base64.b64decode(auth_info).split(b':', 1)
except Exception as e:
return
return Authorization('basic', {'username': bytes_to_wsgi(username),
'password': bytes_to_wsgi(password)})
elif auth_type == b'digest':
auth_map = parse_dict_header(auth_info)
for key in 'username', 'realm', 'nonce', 'uri', 'response':
if not key in auth_map:
return
if 'qop' in auth_map:
if not auth_map.get('nc') or not auth_map.get('cnonce'):
return
return Authorization('digest', auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
on_update)
def parse_if_range_header(value):
"""Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
"""
if not value:
return IfRange()
date = parse_date(value)
if date is not None:
return IfRange(date=date)
# drop weakness information
return IfRange(unquote_etag(value)[0])
def parse_range_header(value, make_inclusive=True):
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
"""
if not value or '=' not in value:
return None
ranges = []
last_end = 0
units, rng = value.split('=', 1)
units = units.strip().lower()
for item in rng.split(','):
item = item.strip()
if '-' not in item:
return None
if item.startswith('-'):
if last_end < 0:
return None
begin = int(item)
end = None
last_end = -1
elif '-' in item:
begin, end = item.split('-', 1)
begin = int(begin)
if begin < last_end or last_end < 0:
return None
if end:
end = int(end) + 1
if begin >= end:
return None
else:
end = None
last_end = end
ranges.append((begin, end))
return Range(units, ranges)
def parse_content_range_header(value, on_update=None):
"""Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
"""
if value is None:
return None
try:
units, rangedef = (value or '').strip().split(None, 1)
except ValueError:
return None
if '/' not in rangedef:
return None
rng, length = rangedef.split('/', 1)
if length == '*':
length = None
elif length.isdigit():
length = int(length)
else:
return None
if rng == '*':
return ContentRange(units, None, None, length, on_update=on_update)
elif '-' not in rng:
return None
start, stop = rng.split('-', 1)
try:
start = int(start)
stop = int(stop) + 1
except ValueError:
return None
if is_byte_range_valid(start, stop, length):
return ContentRange(units, start, stop, length, on_update=on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'w/' + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('w/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag[:2] in ('w/', 'W/'):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
return datetime(*((year,) + t[1:7])) - \
timedelta(seconds=t[-1] or 0)
except (ValueError, OverflowError):
return None
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
d.tm_mday, delim,
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
)
def cookie_date(expires=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
return _dump_date(expires, '-')
def http_date(timestamp=None):
"""Formats the time to match the RFC1123 date format.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
:param timestamp: If provided that date is used, otherwise the current.
"""
return _dump_date(timestamp, ' ')
def is_resource_modified(environ, etag=None, data=None, last_modified=None):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:return: `True` if the resource was modified, otherwise `False`.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError('both data and etag given')
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return False
unmodified = False
if isinstance(last_modified, string_types):
last_modified = parse_date(last_modified)
# ensure that microsecond is zero because the HTTP spec does not transmit
# that either and we might have some false positives. See issue #39
if last_modified is not None:
last_modified = last_modified.replace(microsecond=0)
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
if if_none_match:
unmodified = if_none_match.contains_raw(etag)
return not unmodified
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [(key, value) for key, value in headers if
not is_entity_header(key) or key.lower() in allowed]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [(key, value) for key, value in headers if
not is_hop_by_hop_header(key)]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _hop_by_hop_headers
def parse_cookie(header, charset='utf-8', errors='replace', cls=None):
"""Parse a cookie. Either from a string or WSGI environ.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
This function now returns a :class:`TypeConversionDict` instead of a
regular dict. The `cls` parameter was added.
:param header: the header to be used to parse the cookie. Alternatively
this can be a WSGI environment.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`TypeConversionDict` is
used.
"""
if isinstance(header, dict):
header = header.get('HTTP_COOKIE', '')
elif header is None:
header = ''
# If the value is an unicode string it's mangled through latin1. This
# is done because on PEP 3333 on Python 3 all headers are assumed latin1
# which however is incorrect for cookies, which are sent in page encoding.
# As a result we
if isinstance(header, text_type):
header = header.encode('latin1', 'replace')
if cls is None:
cls = TypeConversionDict
def _parse_pairs():
for key, val in _cookie_parse_impl(header):
key = to_unicode(key, charset, errors, allow_none_charset=True)
val = to_unicode(val, charset, errors, allow_none_charset=True)
yield try_coerce_native(key), val
return cls(_parse_pairs())
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False,
charset='utf-8', sync_expires=True):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
path = iri_to_uri(path, charset)
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age))
buf = [key + b'=' + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for k, v, q in ((b'Domain', domain, True),
(b'Expires', expires, False,),
(b'Max-Age', max_age, False),
(b'Secure', secure, None),
(b'HttpOnly', httponly, None),
(b'Path', path, False)):
if q is None:
if v:
buf.append(k)
continue
if v is None:
continue
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset)
if q:
v = _cookie_quote(v)
tmp += b'=' + v
buf.append(bytes(tmp))
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b'; '.join(buf)
if not PY2:
rv = rv.decode('latin1')
return rv
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length
# circular dependency fun
from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \
WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \
RequestCacheControl
# DEPRECATED
# backwards compatible imports
from werkzeug.datastructures import MIMEAccept, CharsetAccept, \
LanguageAccept, Headers
from werkzeug.urls import iri_to_uri
|
stackforge/ospurge
|
refs/heads/master
|
ospurge/tests/resources/__init__.py
|
12133432
| |
tadek-project/tadek-ui
|
refs/heads/master
|
src/result/__init__.py
|
12133432
| |
sassoftware/mint
|
refs/heads/master
|
mint_test/resttest/apitest/modeltest/imagestest.py
|
1
|
#!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mint_test import mint_rephelp
from mint.rest.api import models
from mint.rest.modellib import converter
class ImageModelTest(mint_rephelp.MintDatabaseHelper):
def testImageListModel(self):
images = converter.fromText('xml', imageList, models.ImageList, None,
None)
assert(images.images[0].imageId == 1)
def testImageFileURL(self):
xml = """\

"""
image = converter.fromText("xml", xml, models.Image, None, None)
self.failUnlessEqual(image.name, "my image")
self.failUnlessEqual([ x.title for x in image.files.files ],
['image title'])
self.failUnlessEqual([ x.size for x in image.files.files ],
[1234])
self.failUnlessEqual([ x.sha1 for x in image.files.files ],
['abc'])
self.failUnlessEqual([ x.fileName for x in image.files.files ],
['file name'])
self.failUnlessEqual(
[ [ (y.url, y.urlType) for y in x.urls ] for x in image.files.files ],
[ [
('http://localhost:1234', 0),
('http://localhost:12345', None),
(None, 1),
] ],
)
class Controller(object):
def url(slf, request, *args, **kwargs):
return args[0]
class Request(object):
def __init__(slf, baseUrl):
slf.baseUrl = baseUrl
# Need to set the file ID
counter = 42
for f in image.files.files:
for url in f.urls:
url.urlType = 0
url.fileId = counter
counter += 1
# Now make sure we can dump the data back in xml, and the url
# attribute/text field doesn't cause problems.
newxml = converter.toText("xml", image, Controller(),
Request("irc://goo/"))
tmpl = '<url urlType="0">irc://goo/downloadImage?fileId=%s&urlType=0</url>'
for fileId in [42, 43, 44]:
self.assertIn(tmpl % fileId, newxml)
data = """<?xml version='1.0' encoding='UTF-8'?>
<release id="http://%(server)s:%(port)s/api/products/testproject/releases/1">
<hostname>testproject</hostname>
<name>Release Name</name>
<imageIds>
<imageId>1</imageId>
</imageIds>
</release>
"""
imageList = """<?xml version='1.0' encoding='UTF-8'?>
<images>

</images>
"""
|
MAPC/warren-st-development-database
|
refs/heads/master
|
development/migrations/0004_auto__chg_field_project_status.py
|
2
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Project.status'
db.alter_column('development_project', 'status_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['development.ProjectStatus'], null=True))
def backwards(self, orm):
# User chose to not deal with backwards NULL issues for 'Project.status'
raise RuntimeError("Cannot reverse this migration. 'Project.status' and its values cannot be restored.")
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'development.communitytype': {
'Meta': {'object_name': 'CommunityType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'development.municipality': {
'Meta': {'ordering': "['name']", 'object_name': 'Municipality'},
'communitytype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.CommunityType']", 'null': 'True', 'blank': 'True'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'muni_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
'development.project': {
'Meta': {'ordering': "['dd_id']", 'object_name': 'Project'},
'affordable_comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'as_of_right': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'ch40': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.ZoningTool']", 'null': 'True', 'blank': 'True'}),
'clustosrd': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'commsf': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'complyr': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_created_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'dd_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ddname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dev_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'draft': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'edinstpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'emploss': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'gqpop': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hotelrms': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'indmfpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'project_last_modified_by'", 'null': 'True', 'to': "orm['auth.User']"}),
'lgmultifam': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'mapcintrnl': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'mfdisc': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mxduse': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'ofcmdpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'otheremprat2': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'othpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ovr55': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'parking_spaces': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pctaffall': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'phased': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'prjacrs': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'projecttype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.ProjectType']", 'null': 'True'}),
'projecttype_detail': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rdv': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'retpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rndpct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'rptdemp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'singfamhu': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'stalled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.ProjectStatus']", 'null': 'True'}),
'taz': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Taz']", 'null': 'True', 'blank': 'True'}),
'total_cost': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'total_cost_allocated_pct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'totemp': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tothu': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'twnhsmmult': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'url_add': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'walkscore': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'whspct': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'xcoord': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'ycoord': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'development.projectstatus': {
'Meta': {'object_name': 'ProjectStatus'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'development.projecttype': {
'Meta': {'ordering': "['order']", 'object_name': 'ProjectType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'development.taz': {
'Meta': {'ordering': "['taz_id']", 'object_name': 'Taz'},
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {}),
'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['development.Municipality']"}),
'taz_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'development.zoningtool': {
'Meta': {'object_name': 'ZoningTool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '3'})
}
}
complete_apps = ['development']
|
tkingless/webtesting
|
refs/heads/master
|
venvs/tutorials/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/connection.py
|
353
|
from __future__ import absolute_import
import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
if host.startswith('['):
host = host.strip('[]')
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
|
caesar2164/edx-platform
|
refs/heads/master
|
lms/djangoapps/badges/__init__.py
|
12133432
| |
MattRijk/django-ecomsite
|
refs/heads/master
|
lib/python2.7/site-packages/django/conf/locale/et/__init__.py
|
12133432
| |
broferek/ansible
|
refs/heads/devel
|
lib/ansible/modules/messaging/rabbitmq/__init__.py
|
12133432
| |
acbecker/pyhm
|
refs/heads/master
|
pyhm/distributions.py
|
12133432
| |
zhouyejoe/spark
|
refs/heads/master
|
python/pyspark/sql/readwriter.py
|
5
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = unicode = str
from py4j.java_gateway import JavaClass
from pyspark import RDD, since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.column import _to_seq
from pyspark.sql.types import *
from pyspark.sql import utils
__all__ = ["DataFrameReader", "DataFrameWriter"]
def to_str(value):
"""
A wrapper over str(), but converts bool values to lower case strings.
If None is given, just returns None, instead of converting it to string "None".
"""
if isinstance(value, bool):
return str(value).lower()
elif value is None:
return value
else:
return str(value)
class OptionUtils(object):
def _set_opts(self, schema=None, **options):
"""
Set named options (filter out those the value is None)
"""
if schema is not None:
self.schema(schema)
for k, v in options.items():
if v is not None:
self.option(k, v)
class DataFrameReader(OptionUtils):
"""
Interface used to load a :class:`DataFrame` from external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`spark.read`
to access this.
.. versionadded:: 1.4
"""
def __init__(self, spark):
self._jreader = spark._ssql_ctx.read()
self._spark = spark
def _df(self, jdf):
from pyspark.sql.dataframe import DataFrame
return DataFrame(jdf, self._spark)
@since(1.4)
def format(self, source):
"""Specifies the input data source format.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df = spark.read.format('json').load('python/test_support/sql/people.json')
>>> df.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._jreader = self._jreader.format(source)
return self
@since(1.4)
def schema(self, schema):
"""Specifies the input schema.
Some data sources (e.g. JSON) can infer the input schema automatically from data.
By specifying the schema here, the underlying data source can skip the schema
inference step, and thus speed up data loading.
:param schema: a :class:`pyspark.sql.types.StructType` object or a DDL-formatted string
(For example ``col0 INT, col1 DOUBLE``).
>>> s = spark.read.schema("col0 INT, col1 DOUBLE")
"""
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
if isinstance(schema, StructType):
jschema = spark._jsparkSession.parseDataType(schema.json())
self._jreader = self._jreader.schema(jschema)
elif isinstance(schema, basestring):
self._jreader = self._jreader.schema(schema)
else:
raise TypeError("schema should be StructType or string")
return self
@since(1.5)
def option(self, key, value):
"""Adds an input option for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jreader = self._jreader.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds input options for the underlying data source.
You can set the following option(s) for reading files:
* ``timeZone``: sets the string that indicates a timezone to be used to parse timestamps
in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jreader = self._jreader.option(k, to_str(options[k]))
return self
@since(1.4)
def load(self, path=None, format=None, schema=None, **options):
"""Loads data from a data source and returns it as a :class`DataFrame`.
:param path: optional string or a list of string for file-system backed data sources.
:param format: optional string for format of the data source. Default to 'parquet'.
:param schema: optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param options: all other string options
>>> df = spark.read.format("parquet").load('python/test_support/sql/parquet_partitioned',
... opt1=True, opt2=1, opt3='str')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
>>> df = spark.read.format('json').load(['python/test_support/sql/people.json',
... 'python/test_support/sql/people1.json'])
>>> df.dtypes
[('age', 'bigint'), ('aka', 'string'), ('name', 'string')]
"""
if format is not None:
self.format(format)
if schema is not None:
self.schema(schema)
self.options(**options)
if isinstance(path, basestring):
return self._df(self._jreader.load(path))
elif path is not None:
if type(path) != list:
path = [path]
return self._df(self._jreader.load(self._spark._sc._jvm.PythonUtils.toSeq(path)))
else:
return self._df(self._jreader.load())
@since(1.4)
def json(self, path, schema=None, primitivesAsString=None, prefersDecimal=None,
allowComments=None, allowUnquotedFieldNames=None, allowSingleQuotes=None,
allowNumericLeadingZero=None, allowBackslashEscapingAnyCharacter=None,
mode=None, columnNameOfCorruptRecord=None, dateFormat=None, timestampFormat=None,
multiLine=None, allowUnquotedControlChars=None, lineSep=None, samplingRatio=None,
dropFieldIfAllNull=None, encoding=None):
"""
Loads JSON files and returns the results as a :class:`DataFrame`.
`JSON Lines <http://jsonlines.org/>`_ (newline-delimited JSON) is supported by default.
For JSON (one record per file), set the ``multiLine`` parameter to ``true``.
If the ``schema`` parameter is not specified, this function goes
through the input once to determine the input schema.
:param path: string represents path to the JSON dataset, or a list of paths,
or RDD of Strings storing JSON objects.
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema or
a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param primitivesAsString: infers all primitive values as a string type. If None is set,
it uses the default value, ``false``.
:param prefersDecimal: infers all floating-point values as a decimal type. If the values
do not fit in decimal, then it infers them as doubles. If None is
set, it uses the default value, ``false``.
:param allowComments: ignores Java/C++ style comment in JSON records. If None is set,
it uses the default value, ``false``.
:param allowUnquotedFieldNames: allows unquoted JSON field names. If None is set,
it uses the default value, ``false``.
:param allowSingleQuotes: allows single quotes in addition to double quotes. If None is
set, it uses the default value, ``true``.
:param allowNumericLeadingZero: allows leading zeros in numbers (e.g. 00012). If None is
set, it uses the default value, ``false``.
:param allowBackslashEscapingAnyCharacter: allows accepting quoting of all character
using backslash quoting mechanism. If None is
set, it uses the default value, ``false``.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \
into a field configured by ``columnNameOfCorruptRecord``, and sets other \
fields to ``null``. To keep corrupt records, an user can set a string type \
field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \
schema does not have the field, it drops corrupt records during parsing. \
When inferring a schema, it implicitly adds a ``columnNameOfCorruptRecord`` \
field in an output schema.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param multiLine: parse one record, which may span multiple lines, per file. If None is
set, it uses the default value, ``false``.
:param allowUnquotedControlChars: allows JSON Strings to contain unquoted control
characters (ASCII characters with value less than 32,
including tab and line feed characters) or not.
:param encoding: allows to forcibly set one of standard basic or extended encoding for
the JSON files. For example UTF-16BE, UTF-32LE. If None is set,
the encoding of input JSON will be detected automatically
when the multiLine option is set to ``true``.
:param lineSep: defines the line separator that should be used for parsing. If None is
set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.
:param samplingRatio: defines fraction of input JSON objects used for schema inferring.
If None is set, it uses the default value, ``1.0``.
:param dropFieldIfAllNull: whether to ignore column of all null values or empty
array/struct during schema inference. If None is set, it
uses the default value, ``false``.
>>> df1 = spark.read.json('python/test_support/sql/people.json')
>>> df1.dtypes
[('age', 'bigint'), ('name', 'string')]
>>> rdd = sc.textFile('python/test_support/sql/people.json')
>>> df2 = spark.read.json(rdd)
>>> df2.dtypes
[('age', 'bigint'), ('name', 'string')]
"""
self._set_opts(
schema=schema, primitivesAsString=primitivesAsString, prefersDecimal=prefersDecimal,
allowComments=allowComments, allowUnquotedFieldNames=allowUnquotedFieldNames,
allowSingleQuotes=allowSingleQuotes, allowNumericLeadingZero=allowNumericLeadingZero,
allowBackslashEscapingAnyCharacter=allowBackslashEscapingAnyCharacter,
mode=mode, columnNameOfCorruptRecord=columnNameOfCorruptRecord, dateFormat=dateFormat,
timestampFormat=timestampFormat, multiLine=multiLine,
allowUnquotedControlChars=allowUnquotedControlChars, lineSep=lineSep,
samplingRatio=samplingRatio, dropFieldIfAllNull=dropFieldIfAllNull, encoding=encoding)
if isinstance(path, basestring):
path = [path]
if type(path) == list:
return self._df(self._jreader.json(self._spark._sc._jvm.PythonUtils.toSeq(path)))
elif isinstance(path, RDD):
def func(iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = path.mapPartitions(func)
keyed._bypass_serializer = True
jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString())
return self._df(self._jreader.json(jrdd))
else:
raise TypeError("path can be only string, list or RDD")
@since(1.4)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:param tableName: string, name of the table.
>>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.createOrReplaceTempView('tmpTable')
>>> spark.read.table('tmpTable').dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.table(tableName))
@since(1.4)
def parquet(self, *paths):
"""Loads Parquet files, returning the result as a :class:`DataFrame`.
You can set the following Parquet-specific option(s) for reading Parquet files:
* ``mergeSchema``: sets whether we should merge schemas collected from all \
Parquet part-files. This will override ``spark.sql.parquet.mergeSchema``. \
The default value is specified in ``spark.sql.parquet.mergeSchema``.
>>> df = spark.read.parquet('python/test_support/sql/parquet_partitioned')
>>> df.dtypes
[('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
"""
return self._df(self._jreader.parquet(_to_seq(self._spark._sc, paths)))
@ignore_unicode_prefix
@since(1.6)
def text(self, paths, wholetext=False, lineSep=None):
"""
Loads text files and returns a :class:`DataFrame` whose schema starts with a
string column named "value", and followed by partitioned columns if there
are any.
By default, each line in the text file is a new row in the resulting DataFrame.
:param paths: string, or list of strings, for input path(s).
:param wholetext: if true, read each file from input path(s) as a single row.
:param lineSep: defines the line separator that should be used for parsing. If None is
set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``.
>>> df = spark.read.text('python/test_support/sql/text-test.txt')
>>> df.collect()
[Row(value=u'hello'), Row(value=u'this')]
>>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True)
>>> df.collect()
[Row(value=u'hello\\nthis')]
"""
self._set_opts(wholetext=wholetext, lineSep=lineSep)
if isinstance(paths, basestring):
paths = [paths]
return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths)))
@since(2.0)
def csv(self, path, schema=None, sep=None, encoding=None, quote=None, escape=None,
comment=None, header=None, inferSchema=None, ignoreLeadingWhiteSpace=None,
ignoreTrailingWhiteSpace=None, nullValue=None, nanValue=None, positiveInf=None,
negativeInf=None, dateFormat=None, timestampFormat=None, maxColumns=None,
maxCharsPerColumn=None, maxMalformedLogPerPartition=None, mode=None,
columnNameOfCorruptRecord=None, multiLine=None, charToEscapeQuoteEscaping=None,
samplingRatio=None, enforceSchema=None, emptyValue=None):
r"""Loads a CSV file and returns the result as a :class:`DataFrame`.
This function will go through the input once to determine the input schema if
``inferSchema`` is enabled. To avoid going through the entire data once, disable
``inferSchema`` option or specify the schema explicitly using ``schema``.
:param path: string, or list of strings, for input path(s),
or RDD of Strings storing CSV rows.
:param schema: an optional :class:`pyspark.sql.types.StructType` for the input schema
or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
:param sep: sets a single character as a separator for each field and value.
If None is set, it uses the default value, ``,``.
:param encoding: decodes the CSV files by the given encoding type. If None is set,
it uses the default value, ``UTF-8``.
:param quote: sets a single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If you would like to turn off quotations, you need to set an
empty string.
:param escape: sets a single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``.
:param comment: sets a single character used for skipping lines beginning with this
character. By default (None), it is disabled.
:param header: uses the first line as names of columns. If None is set, it uses the
default value, ``false``.
:param inferSchema: infers the input schema automatically from data. It requires one extra
pass over the data. If None is set, it uses the default value, ``false``.
:param enforceSchema: If it is set to ``true``, the specified or inferred schema will be
forcibly applied to datasource files, and headers in CSV files will be
ignored. If the option is set to ``false``, the schema will be
validated against all headers in CSV files or the first header in RDD
if the ``header`` option is set to ``true``. Field names in the schema
and column names in CSV headers are checked by their positions
taking into account ``spark.sql.caseSensitive``. If None is set,
``true`` is used by default. Though the default value is ``true``,
it is recommended to disable the ``enforceSchema`` option
to avoid incorrect results.
:param ignoreLeadingWhiteSpace: A flag indicating whether or not leading whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param ignoreTrailingWhiteSpace: A flag indicating whether or not trailing whitespaces from
values being read should be skipped. If None is set, it
uses the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string. Since 2.0.1, this ``nullValue`` param
applies to all supported types including the string type.
:param nanValue: sets the string representation of a non-number value. If None is set, it
uses the default value, ``NaN``.
:param positiveInf: sets the string representation of a positive infinity value. If None
is set, it uses the default value, ``Inf``.
:param negativeInf: sets the string representation of a negative infinity value. If None
is set, it uses the default value, ``Inf``.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param maxColumns: defines a hard limit of how many columns a record can have. If None is
set, it uses the default value, ``20480``.
:param maxCharsPerColumn: defines the maximum number of characters allowed for any given
value being read. If None is set, it uses the default value,
``-1`` meaning unlimited length.
:param maxMalformedLogPerPartition: this parameter is no longer used since Spark 2.2.0.
If specified, it is ignored.
:param mode: allows a mode for dealing with corrupt records during parsing. If None is
set, it uses the default value, ``PERMISSIVE``.
* ``PERMISSIVE`` : when it meets a corrupted record, puts the malformed string \
into a field configured by ``columnNameOfCorruptRecord``, and sets other \
fields to ``null``. To keep corrupt records, an user can set a string type \
field named ``columnNameOfCorruptRecord`` in an user-defined schema. If a \
schema does not have the field, it drops corrupt records during parsing. \
A record with less/more tokens than schema is not a corrupted record to CSV. \
When it meets a record having fewer tokens than the length of the schema, \
sets ``null`` to extra fields. When the record has more tokens than the \
length of the schema, it drops extra tokens.
* ``DROPMALFORMED`` : ignores the whole corrupted records.
* ``FAILFAST`` : throws an exception when it meets corrupted records.
:param columnNameOfCorruptRecord: allows renaming the new field having malformed string
created by ``PERMISSIVE`` mode. This overrides
``spark.sql.columnNameOfCorruptRecord``. If None is set,
it uses the value specified in
``spark.sql.columnNameOfCorruptRecord``.
:param multiLine: parse records, which may span multiple lines. If None is
set, it uses the default value, ``false``.
:param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for
the quote character. If None is set, the default value is
escape character when escape and quote characters are
different, ``\0`` otherwise.
:param samplingRatio: defines fraction of rows used for schema inferring.
If None is set, it uses the default value, ``1.0``.
:param emptyValue: sets the string representation of an empty value. If None is set, it uses
the default value, empty string.
>>> df = spark.read.csv('python/test_support/sql/ages.csv')
>>> df.dtypes
[('_c0', 'string'), ('_c1', 'string')]
>>> rdd = sc.textFile('python/test_support/sql/ages.csv')
>>> df2 = spark.read.csv(rdd)
>>> df2.dtypes
[('_c0', 'string'), ('_c1', 'string')]
"""
self._set_opts(
schema=schema, sep=sep, encoding=encoding, quote=quote, escape=escape, comment=comment,
header=header, inferSchema=inferSchema, ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace, nullValue=nullValue,
nanValue=nanValue, positiveInf=positiveInf, negativeInf=negativeInf,
dateFormat=dateFormat, timestampFormat=timestampFormat, maxColumns=maxColumns,
maxCharsPerColumn=maxCharsPerColumn,
maxMalformedLogPerPartition=maxMalformedLogPerPartition, mode=mode,
columnNameOfCorruptRecord=columnNameOfCorruptRecord, multiLine=multiLine,
charToEscapeQuoteEscaping=charToEscapeQuoteEscaping, samplingRatio=samplingRatio,
enforceSchema=enforceSchema, emptyValue=emptyValue)
if isinstance(path, basestring):
path = [path]
if type(path) == list:
return self._df(self._jreader.csv(self._spark._sc._jvm.PythonUtils.toSeq(path)))
elif isinstance(path, RDD):
def func(iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = path.mapPartitions(func)
keyed._bypass_serializer = True
jrdd = keyed._jrdd.map(self._spark._jvm.BytesToString())
# see SPARK-22112
# There aren't any jvm api for creating a dataframe from rdd storing csv.
# We can do it through creating a jvm dataset firstly and using the jvm api
# for creating a dataframe from dataset storing csv.
jdataset = self._spark._ssql_ctx.createDataset(
jrdd.rdd(),
self._spark._jvm.Encoders.STRING())
return self._df(self._jreader.csv(jdataset))
else:
raise TypeError("path can be only string, list or RDD")
@since(1.5)
def orc(self, path):
"""Loads ORC files, returning the result as a :class:`DataFrame`.
.. note:: Currently ORC support is only available together with Hive support.
>>> df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> df.dtypes
[('a', 'bigint'), ('b', 'int'), ('c', 'int')]
"""
if isinstance(path, basestring):
path = [path]
return self._df(self._jreader.orc(_to_seq(self._spark._sc, path)))
@since(1.4)
def jdbc(self, url, table, column=None, lowerBound=None, upperBound=None, numPartitions=None,
predicates=None, properties=None):
"""
Construct a :class:`DataFrame` representing the database table named ``table``
accessible via JDBC URL ``url`` and connection ``properties``.
Partitions of the table will be retrieved in parallel if either ``column`` or
``predicates`` is specified. ``lowerBound`, ``upperBound`` and ``numPartitions``
is needed when ``column`` is specified.
If both ``column`` and ``predicates`` are specified, ``column`` will be used.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: the name of the table
:param column: the name of an integer column that will be used for partitioning;
if this parameter is specified, then ``numPartitions``, ``lowerBound``
(inclusive), and ``upperBound`` (exclusive) will form partition strides
for generated WHERE clause expressions used to split the column
``column`` evenly
:param lowerBound: the minimum value of ``column`` used to decide partition stride
:param upperBound: the maximum value of ``column`` used to decide partition stride
:param numPartitions: the number of partitions
:param predicates: a list of expressions suitable for inclusion in WHERE clauses;
each one defines one partition of the :class:`DataFrame`
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
:return: a DataFrame
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
if column is not None:
assert lowerBound is not None, "lowerBound can not be None when ``column`` is specified"
assert upperBound is not None, "upperBound can not be None when ``column`` is specified"
assert numPartitions is not None, \
"numPartitions can not be None when ``column`` is specified"
return self._df(self._jreader.jdbc(url, table, column, int(lowerBound), int(upperBound),
int(numPartitions), jprop))
if predicates is not None:
gateway = self._spark._sc._gateway
jpredicates = utils.toJArray(gateway, gateway.jvm.java.lang.String, predicates)
return self._df(self._jreader.jdbc(url, table, jpredicates, jprop))
return self._df(self._jreader.jdbc(url, table, jprop))
class DataFrameWriter(OptionUtils):
"""
Interface used to write a :class:`DataFrame` to external storage systems
(e.g. file systems, key-value stores, etc). Use :func:`DataFrame.write`
to access this.
.. versionadded:: 1.4
"""
def __init__(self, df):
self._df = df
self._spark = df.sql_ctx
self._jwrite = df._jdf.write()
def _sq(self, jsq):
from pyspark.sql.streaming import StreamingQuery
return StreamingQuery(jsq)
@since(1.4)
def mode(self, saveMode):
"""Specifies the behavior when data or table already exists.
Options include:
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
# At the JVM side, the default value of mode is already set to "error".
# So, if the given saveMode is None, we will not call JVM-side's mode method.
if saveMode is not None:
self._jwrite = self._jwrite.mode(saveMode)
return self
@since(1.4)
def format(self, source):
"""Specifies the underlying output data source.
:param source: string, name of the data source, e.g. 'json', 'parquet'.
>>> df.write.format('json').save(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self._jwrite = self._jwrite.format(source)
return self
@since(1.5)
def option(self, key, value):
"""Adds an output option for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
self._jwrite = self._jwrite.option(key, to_str(value))
return self
@since(1.4)
def options(self, **options):
"""Adds output options for the underlying data source.
You can set the following option(s) for writing files:
* ``timeZone``: sets the string that indicates a timezone to be used to format
timestamps in the JSON/CSV datasources or partition values.
If it isn't set, it uses the default value, session local timezone.
"""
for k in options:
self._jwrite = self._jwrite.option(k, to_str(options[k]))
return self
@since(1.4)
def partitionBy(self, *cols):
"""Partitions the output by the given columns on the file system.
If specified, the output is laid out on the file system similar
to Hive's partitioning scheme.
:param cols: name of columns
>>> df.write.partitionBy('year', 'month').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
if len(cols) == 1 and isinstance(cols[0], (list, tuple)):
cols = cols[0]
self._jwrite = self._jwrite.partitionBy(_to_seq(self._spark._sc, cols))
return self
@since(2.3)
def bucketBy(self, numBuckets, col, *cols):
"""Buckets the output by the given columns.If specified,
the output is laid out on the file system similar to Hive's bucketing scheme.
:param numBuckets: the number of buckets to save
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
.. note:: Applicable for file-based data sources in combination with
:py:meth:`DataFrameWriter.saveAsTable`.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .mode("overwrite")
... .saveAsTable('bucketed_table'))
"""
if not isinstance(numBuckets, int):
raise TypeError("numBuckets should be an int, got {0}.".format(type(numBuckets)))
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.bucketBy(numBuckets, col, _to_seq(self._spark._sc, cols))
return self
@since(2.3)
def sortBy(self, col, *cols):
"""Sorts the output in each bucket by the given columns on the file system.
:param col: a name of a column, or a list of names.
:param cols: additional names (optional). If `col` is a list it should be empty.
>>> (df.write.format('parquet') # doctest: +SKIP
... .bucketBy(100, 'year', 'month')
... .sortBy('day')
... .mode("overwrite")
... .saveAsTable('sorted_bucketed_table'))
"""
if isinstance(col, (list, tuple)):
if cols:
raise ValueError("col is a {0} but cols are not empty".format(type(col)))
col, cols = col[0], col[1:]
if not all(isinstance(c, basestring) for c in cols) or not(isinstance(col, basestring)):
raise TypeError("all names should be `str`")
self._jwrite = self._jwrite.sortBy(col, _to_seq(self._spark._sc, cols))
return self
@since(1.4)
def save(self, path=None, format=None, mode=None, partitionBy=None, **options):
"""Saves the contents of the :class:`DataFrame` to a data source.
The data source is specified by the ``format`` and a set of ``options``.
If ``format`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
:param path: the path in a Hadoop supported file system
:param format: the format used to save
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param options: all other string options
>>> df.write.mode('append').parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
if path is None:
self._jwrite.save()
else:
self._jwrite.save(path)
@since(1.4)
def insertInto(self, tableName, overwrite=False):
"""Inserts the content of the :class:`DataFrame` to the specified table.
It requires that the schema of the class:`DataFrame` is the same as the
schema of the table.
Optionally overwriting any existing data.
"""
self._jwrite.mode("overwrite" if overwrite else "append").insertInto(tableName)
@since(1.4)
def saveAsTable(self, name, format=None, mode=None, partitionBy=None, **options):
"""Saves the content of the :class:`DataFrame` as the specified table.
In the case the table already exists, behavior of this function depends on the
save mode, specified by the `mode` function (default to throwing an exception).
When `mode` is `Overwrite`, the schema of the :class:`DataFrame` does not need to be
the same as that of the existing table.
* `append`: Append contents of this :class:`DataFrame` to existing data.
* `overwrite`: Overwrite existing data.
* `error` or `errorifexists`: Throw an exception if data already exists.
* `ignore`: Silently ignore this operation if data already exists.
:param name: the table name
:param format: the format used to save
:param mode: one of `append`, `overwrite`, `error`, `errorifexists`, `ignore` \
(default: error)
:param partitionBy: names of partitioning columns
:param options: all other string options
"""
self.mode(mode).options(**options)
if partitionBy is not None:
self.partitionBy(partitionBy)
if format is not None:
self.format(format)
self._jwrite.saveAsTable(name)
@since(1.4)
def json(self, path, mode=None, compression=None, dateFormat=None, timestampFormat=None,
lineSep=None, encoding=None):
"""Saves the content of the :class:`DataFrame` in JSON format
(`JSON Lines text format or newline-delimited JSON <http://jsonlines.org/>`_) at the
specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param encoding: specifies encoding (charset) of saved json files. If None is set,
the default UTF-8 charset will be used.
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
>>> df.write.json(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(
compression=compression, dateFormat=dateFormat, timestampFormat=timestampFormat,
lineSep=lineSep, encoding=encoding)
self._jwrite.json(path)
@since(1.4)
def parquet(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in Parquet format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, uncompressed, snappy, gzip,
lzo, brotli, lz4, and zstd). This will override
``spark.sql.parquet.compression.codec``. If None is set, it uses the
value specified in ``spark.sql.parquet.compression.codec``.
>>> df.write.parquet(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.parquet(path)
@since(1.6)
def text(self, path, compression=None, lineSep=None):
"""Saves the content of the DataFrame in a text file at the specified path.
:param path: the path in any Hadoop supported file system
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param lineSep: defines the line separator that should be used for writing. If None is
set, it uses the default value, ``\\n``.
The DataFrame must have only one column that is of string type.
Each row becomes a new line in the output file.
"""
self._set_opts(compression=compression, lineSep=lineSep)
self._jwrite.text(path)
@since(2.0)
def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None,
header=None, nullValue=None, escapeQuotes=None, quoteAll=None, dateFormat=None,
timestampFormat=None, ignoreLeadingWhiteSpace=None, ignoreTrailingWhiteSpace=None,
charToEscapeQuoteEscaping=None, encoding=None, emptyValue=None):
r"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, bzip2, gzip, lz4,
snappy and deflate).
:param sep: sets a single character as a separator for each field and value. If None is
set, it uses the default value, ``,``.
:param quote: sets a single character used for escaping quoted values where the
separator can be part of the value. If None is set, it uses the default
value, ``"``. If an empty string is set, it uses ``u0000`` (null character).
:param escape: sets a single character used for escaping quotes inside an already
quoted value. If None is set, it uses the default value, ``\``
:param escapeQuotes: a flag indicating whether values containing quotes should always
be enclosed in quotes. If None is set, it uses the default value
``true``, escaping all values containing a quote character.
:param quoteAll: a flag indicating whether all values should always be enclosed in
quotes. If None is set, it uses the default value ``false``,
only escaping values containing a quote character.
:param header: writes the names of columns as the first line. If None is set, it uses
the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
the default value, empty string.
:param dateFormat: sets the string that indicates a date format. Custom date formats
follow the formats at ``java.text.SimpleDateFormat``. This
applies to date type. If None is set, it uses the
default value, ``yyyy-MM-dd``.
:param timestampFormat: sets the string that indicates a timestamp format. Custom date
formats follow the formats at ``java.text.SimpleDateFormat``.
This applies to timestamp type. If None is set, it uses the
default value, ``yyyy-MM-dd'T'HH:mm:ss.SSSXXX``.
:param ignoreLeadingWhiteSpace: a flag indicating whether or not leading whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param ignoreTrailingWhiteSpace: a flag indicating whether or not trailing whitespaces from
values being written should be skipped. If None is set, it
uses the default value, ``true``.
:param charToEscapeQuoteEscaping: sets a single character used for escaping the escape for
the quote character. If None is set, the default value is
escape character when escape and quote characters are
different, ``\0`` otherwise..
:param encoding: sets the encoding (charset) of saved csv files. If None is set,
the default UTF-8 charset will be used.
:param emptyValue: sets the string representation of an empty value. If None is set, it uses
the default value, ``""``.
>>> df.write.csv(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header,
nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll,
dateFormat=dateFormat, timestampFormat=timestampFormat,
ignoreLeadingWhiteSpace=ignoreLeadingWhiteSpace,
ignoreTrailingWhiteSpace=ignoreTrailingWhiteSpace,
charToEscapeQuoteEscaping=charToEscapeQuoteEscaping,
encoding=encoding, emptyValue=emptyValue)
self._jwrite.csv(path)
@since(1.5)
def orc(self, path, mode=None, partitionBy=None, compression=None):
"""Saves the content of the :class:`DataFrame` in ORC format at the specified path.
.. note:: Currently ORC support is only available together with Hive support.
:param path: the path in any Hadoop supported file system
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param partitionBy: names of partitioning columns
:param compression: compression codec to use when saving to file. This can be one of the
known case-insensitive shorten names (none, snappy, zlib, and lzo).
This will override ``orc.compress`` and
``spark.sql.orc.compression.codec``. If None is set, it uses the value
specified in ``spark.sql.orc.compression.codec``.
>>> orc_df = spark.read.orc('python/test_support/sql/orc_partitioned')
>>> orc_df.write.orc(os.path.join(tempfile.mkdtemp(), 'data'))
"""
self.mode(mode)
if partitionBy is not None:
self.partitionBy(partitionBy)
self._set_opts(compression=compression)
self._jwrite.orc(path)
@since(1.4)
def jdbc(self, url, table, mode=None, properties=None):
"""Saves the content of the :class:`DataFrame` to an external database table via JDBC.
.. note:: Don't create too many partitions in parallel on a large cluster;
otherwise Spark might crash your external database systems.
:param url: a JDBC URL of the form ``jdbc:subprotocol:subname``
:param table: Name of the table in the external database.
:param mode: specifies the behavior of the save operation when data already exists.
* ``append``: Append contents of this :class:`DataFrame` to existing data.
* ``overwrite``: Overwrite existing data.
* ``ignore``: Silently ignore this operation if data already exists.
* ``error`` or ``errorifexists`` (default case): Throw an exception if data already \
exists.
:param properties: a dictionary of JDBC database connection arguments. Normally at
least properties "user" and "password" with their corresponding values.
For example { 'user' : 'SYSTEM', 'password' : 'mypassword' }
"""
if properties is None:
properties = dict()
jprop = JavaClass("java.util.Properties", self._spark._sc._gateway._gateway_client)()
for k in properties:
jprop.setProperty(k, properties[k])
self.mode(mode)._jwrite.jdbc(url, table, jprop)
def _test():
import doctest
import os
import tempfile
import py4j
from pyspark.context import SparkContext
from pyspark.sql import SparkSession, Row
import pyspark.sql.readwriter
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.readwriter.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
try:
spark = SparkSession.builder.getOrCreate()
except py4j.protocol.Py4JError:
spark = SparkSession(sc)
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.read.parquet('python/test_support/sql/parquet_partitioned')
(failure_count, test_count) = doctest.testmod(
pyspark.sql.readwriter, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
sc.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
mhvk/numpy
|
refs/heads/placeholder
|
numpy/core/code_generators/generate_numpy_api.py
|
11
|
import os
import genapi
from genapi import \
TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
import numpy_api
# use annotated api when running under cpychecker
h_template = r"""
#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
typedef struct {
PyObject_HEAD
npy_bool obval;
} PyBoolScalarObject;
extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
%s
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
#endif
#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
extern void **PyArray_API;
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
void **PyArray_API;
#else
static void **PyArray_API=NULL;
#endif
#endif
%s
#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
static int
_import_array(void)
{
int st;
PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
PyObject *c_api = NULL;
if (numpy == NULL) {
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
Py_DECREF(numpy);
if (c_api == NULL) {
PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
return -1;
}
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
Py_DECREF(c_api);
if (PyArray_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
return -1;
}
/* Perform runtime check of C API version */
if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"ABI version 0x%%x but this version of numpy is 0x%%x", \
(int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
return -1;
}
if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"API version 0x%%x but this version of numpy is 0x%%x", \
(int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
return -1;
}
/*
* Perform runtime check of endianness and check it matches the one set by
* the headers (npy_endian.h) as a safeguard
*/
st = PyArray_GetEndianness();
if (st == NPY_CPU_UNKNOWN_ENDIAN) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
return -1;
}
#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
if (st != NPY_CPU_BIG) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"big endian, but detected different endianness at runtime");
return -1;
}
#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
if (st != NPY_CPU_LITTLE) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"little endian, but detected different endianness at runtime");
return -1;
}
#endif
return 0;
}
#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NULL; } }
#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
#endif
#endif
"""
c_template = r"""
/* These pointers will be stored in the C-object for use in other
extension modules
*/
void *PyArray_API[] = {
%s
};
"""
c_api_header = """
===========
NumPy C-API
===========
"""
def generate_api(output_dir, force=False):
basename = 'multiarray_api'
h_file = os.path.join(output_dir, '__%s.h' % basename)
c_file = os.path.join(output_dir, '__%s.c' % basename)
d_file = os.path.join(output_dir, '%s.txt' % basename)
targets = (h_file, c_file, d_file)
sources = numpy_api.multiarray_api
if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
return targets
else:
do_generate_api(targets, sources)
return targets
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
doc_file = targets[2]
global_vars = sources[0]
scalar_bool_values = sources[1]
types_api = sources[2]
multiarray_funcs = sources[3]
multiarray_api = sources[:]
module_list = []
extension_list = []
init_list = []
# Check multiarray api indexes
multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API',
multiarray_funcs)
# FIXME: ordered_funcs_api is unused
ordered_funcs_api = genapi.order_dict(multiarray_funcs)
# Create dict name -> *Api instance
api_name = 'PyArray_API'
multiarray_api_dict = {}
for f in numpyapi_list:
name = f.name
index = multiarray_funcs[name][0]
annotations = multiarray_funcs[name][1:]
multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations,
f.return_type,
f.args, api_name)
for name, val in global_vars.items():
index, type = val
multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
for name, val in scalar_bool_values.items():
index = val[0]
multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
for name, val in types_api.items():
index = val[0]
internal_type = None if len(val) == 1 else val[1]
multiarray_api_dict[name] = TypeApi(
name, index, 'PyTypeObject', api_name, internal_type)
if len(multiarray_api_dict) != len(multiarray_api_index):
keys_dict = set(multiarray_api_dict.keys())
keys_index = set(multiarray_api_index.keys())
raise AssertionError(
"Multiarray API size mismatch - "
"index has extra keys {}, dict has extra keys {}"
.format(keys_index - keys_dict, keys_dict - keys_index)
)
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
api_item = multiarray_api_dict[name]
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
# Write to header
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
genapi.write_file(header_file, s)
# Write to c-code
s = c_template % ',\n'.join(init_list)
genapi.write_file(c_file, s)
# write to documentation
s = c_api_header
for func in numpyapi_list:
s += func.to_ReST()
s += '\n\n'
genapi.write_file(doc_file, s)
return targets
|
Viktor-Evst/luigi
|
refs/heads/master
|
test/contrib/hive_test.py
|
9
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import sys
import tempfile
from helpers import unittest
import luigi.contrib.hive
import mock
from luigi import LocalTarget
class HiveTest(unittest.TestCase):
count = 0
def mock_hive_cmd(self, args, check_return=True):
self.last_hive_cmd = args
self.count += 1
return "statement{0}".format(self.count)
def setUp(self):
self.run_hive_cmd_saved = luigi.contrib.hive.run_hive
luigi.contrib.hive.run_hive = self.mock_hive_cmd
def tearDown(self):
luigi.contrib.hive.run_hive = self.run_hive_cmd_saved
def test_run_hive_command(self):
pre_count = self.count
res = luigi.contrib.hive.run_hive_cmd("foo")
self.assertEqual(["-e", "foo"], self.last_hive_cmd)
self.assertEqual("statement{0}".format(pre_count + 1), res)
def test_run_hive_script_not_exists(self):
def test():
luigi.contrib.hive.run_hive_script("/tmp/some-non-existant-file______")
self.assertRaises(RuntimeError, test)
def test_run_hive_script_exists(self):
with tempfile.NamedTemporaryFile(delete=True) as f:
pre_count = self.count
res = luigi.contrib.hive.run_hive_script(f.name)
self.assertEqual(["-f", f.name], self.last_hive_cmd)
self.assertEqual("statement{0}".format(pre_count + 1), res)
def test_create_parent_dirs(self):
dirname = "/tmp/hive_task_test_dir"
class FooHiveTask(object):
def output(self):
return LocalTarget(os.path.join(dirname, "foo"))
runner = luigi.contrib.hive.HiveQueryRunner()
runner.prepare_outputs(FooHiveTask())
self.assertTrue(os.path.exists(dirname))
class HiveCommandClientTest(unittest.TestCase):
"""Note that some of these tests are really for the CDH releases of Hive, to which I do not currently have access.
Hopefully there are no significant differences in the expected output"""
def setUp(self):
self.client = luigi.contrib.hive.HiveCommandClient()
self.apacheclient = luigi.contrib.hive.ApacheHiveCommandClient()
self.metastoreclient = luigi.contrib.hive.MetastoreClient()
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_default_table_location(self, run_command):
run_command.return_value = "Protect Mode: None \n" \
"Retention: 0 \n" \
"Location: hdfs://localhost:9000/user/hive/warehouse/mytable \n" \
"Table Type: MANAGED_TABLE \n"
returned = self.client.table_location("mytable")
self.assertEqual('hdfs://localhost:9000/user/hive/warehouse/mytable', returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_table_exists(self, run_command):
run_command.return_value = "OK"
returned = self.client.table_exists("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"mytable"
returned = self.client.table_exists("mytable")
self.assertTrue(returned)
# Issue #896 test case insensitivity
returned = self.client.table_exists("MyTable")
self.assertTrue(returned)
run_command.return_value = "day=2013-06-28/hour=3\n" \
"day=2013-06-28/hour=4\n" \
"day=2013-07-07/hour=2\n"
self.client.partition_spec = mock.Mock(name="partition_spec")
self.client.partition_spec.return_value = "somepart"
returned = self.client.table_exists("mytable", partition={'a': 'b'})
self.assertTrue(returned)
run_command.return_value = ""
returned = self.client.table_exists("mytable", partition={'a': 'b'})
self.assertFalse(returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_table_schema(self, run_command):
run_command.return_value = "FAILED: SemanticException [Error 10001]: blah does not exist\nSome other stuff"
returned = self.client.table_schema("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"col1 string None \n" \
"col2 string None \n" \
"col3 string None \n" \
"day string None \n" \
"hour smallint None \n\n" \
"# Partition Information \n" \
"# col_name data_type comment \n\n" \
"day string None \n" \
"hour smallint None \n" \
"Time taken: 2.08 seconds, Fetched: 34 row(s)\n"
expected = [('OK',),
('col1', 'string', 'None'),
('col2', 'string', 'None'),
('col3', 'string', 'None'),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('',),
('# Partition Information',),
('# col_name', 'data_type', 'comment'),
('',),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('Time taken: 2.08 seconds, Fetched: 34 row(s)',)]
returned = self.client.table_schema("mytable")
self.assertEqual(expected, returned)
def test_partition_spec(self):
returned = self.client.partition_spec({'a': 'b', 'c': 'd'})
self.assertEqual("a='b',c='d'", returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_apacheclient_table_exists(self, run_command):
run_command.return_value = "OK"
returned = self.apacheclient.table_exists("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"mytable"
returned = self.apacheclient.table_exists("mytable")
self.assertTrue(returned)
# Issue #896 test case insensitivity
returned = self.apacheclient.table_exists("MyTable")
self.assertTrue(returned)
run_command.return_value = "day=2013-06-28/hour=3\n" \
"day=2013-06-28/hour=4\n" \
"day=2013-07-07/hour=2\n"
self.apacheclient.partition_spec = mock.Mock(name="partition_spec")
self.apacheclient.partition_spec.return_value = "somepart"
returned = self.apacheclient.table_exists("mytable", partition={'a': 'b'})
self.assertTrue(returned)
run_command.return_value = ""
returned = self.apacheclient.table_exists("mytable", partition={'a': 'b'})
self.assertFalse(returned)
@mock.patch("luigi.contrib.hive.run_hive_cmd")
def test_apacheclient_table_schema(self, run_command):
run_command.return_value = "FAILED: SemanticException [Error 10001]: Table not found mytable\nSome other stuff"
returned = self.apacheclient.table_schema("mytable")
self.assertFalse(returned)
run_command.return_value = "OK\n" \
"col1 string None \n" \
"col2 string None \n" \
"col3 string None \n" \
"day string None \n" \
"hour smallint None \n\n" \
"# Partition Information \n" \
"# col_name data_type comment \n\n" \
"day string None \n" \
"hour smallint None \n" \
"Time taken: 2.08 seconds, Fetched: 34 row(s)\n"
expected = [('OK',),
('col1', 'string', 'None'),
('col2', 'string', 'None'),
('col3', 'string', 'None'),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('',),
('# Partition Information',),
('# col_name', 'data_type', 'comment'),
('',),
('day', 'string', 'None'),
('hour', 'smallint', 'None'),
('Time taken: 2.08 seconds, Fetched: 34 row(s)',)]
returned = self.apacheclient.table_schema("mytable")
self.assertEqual(expected, returned)
@mock.patch("luigi.contrib.hive.HiveThriftContext")
def test_metastoreclient_partition_existence_regardless_of_order(self, thrift_context):
thrift_context.return_value = thrift_context
client_mock = mock.Mock(name="clientmock")
client_mock.return_value = client_mock
thrift_context.__enter__ = client_mock
client_mock.get_partition_names = mock.Mock(return_value=["p1=x/p2=y", "p1=a/p2=b"])
partition_spec = OrderedDict([("p1", "a"), ("p2", "b")])
self.assertTrue(self.metastoreclient.table_exists("table", "default", partition_spec))
partition_spec = OrderedDict([("p2", "b"), ("p1", "a")])
self.assertTrue(self.metastoreclient.table_exists("table", "default", partition_spec))
def test_metastore_partition_spec_has_the_same_order(self):
partition_spec = OrderedDict([("p1", "a"), ("p2", "b")])
spec_string = luigi.contrib.hive.MetastoreClient().partition_spec(partition_spec)
self.assertEqual(spec_string, "p1=a/p2=b")
partition_spec = OrderedDict([("p2", "b"), ("p1", "a")])
spec_string = luigi.contrib.hive.MetastoreClient().partition_spec(partition_spec)
self.assertEqual(spec_string, "p1=a/p2=b")
@mock.patch("luigi.configuration")
def test_client_def(self, hive_syntax):
hive_syntax.get_config.return_value.get.return_value = "cdh4"
client = luigi.contrib.hive.get_default_client()
self.assertEqual(luigi.contrib.hive.HiveCommandClient, type(client))
hive_syntax.get_config.return_value.get.return_value = "cdh3"
client = luigi.contrib.hive.get_default_client()
self.assertEqual(luigi.contrib.hive.HiveCommandClient, type(client))
hive_syntax.get_config.return_value.get.return_value = "apache"
client = luigi.contrib.hive.get_default_client()
self.assertEqual(luigi.contrib.hive.ApacheHiveCommandClient, type(client))
@mock.patch('subprocess.Popen')
def test_run_hive_command(self, popen):
# I'm testing this again to check the return codes
# I didn't want to tear up all the existing tests to change how run_hive is mocked
comm = mock.Mock(name='communicate_mock')
comm.return_value = "some return stuff", ""
preturn = mock.Mock(name='open_mock')
preturn.returncode = 0
preturn.communicate = comm
popen.return_value = preturn
returned = luigi.contrib.hive.run_hive(["blah", "blah"])
self.assertEqual("some return stuff", returned)
preturn.returncode = 17
self.assertRaises(luigi.contrib.hive.HiveCommandError, luigi.contrib.hive.run_hive, ["blah", "blah"])
comm.return_value = "", "some stderr stuff"
returned = luigi.contrib.hive.run_hive(["blah", "blah"], False)
self.assertEqual("", returned)
class TestHiveMisc(unittest.TestCase):
def test_import_old(self):
import luigi.hive
self.assertEqual(luigi.hive.HiveQueryTask, luigi.contrib.hive.HiveQueryTask)
class MyHiveTask(luigi.contrib.hive.HiveQueryTask):
param = luigi.Parameter()
def query(self):
return 'banana banana %s' % self.param
class TestHiveTask(unittest.TestCase):
@mock.patch('luigi.contrib.hadoop.run_and_track_hadoop_job')
def test_run(self, run_and_track_hadoop_job):
success = luigi.run(['MyHiveTask', '--param', 'foo', '--local-scheduler', '--no-lock'])
self.assertTrue(success)
self.assertEqual('hive', run_and_track_hadoop_job.call_args[0][0][0])
class TestHiveTarget(unittest.TestCase):
def test_hive_table_target(self):
client = mock.Mock()
target = luigi.contrib.hive.HiveTableTarget(database='db', table='foo', client=client)
target.exists()
client.table_exists.assert_called_with('foo', 'db')
def test_hive_partition_target(self):
client = mock.Mock()
target = luigi.contrib.hive.HivePartitionTarget(database='db', table='foo', partition='bar', client=client)
target.exists()
client.table_exists.assert_called_with('foo', 'db', 'bar')
if __name__ == '__main__':
unittest.main()
|
vikt0rs/six
|
refs/heads/master
|
test_six.py
|
64
|
import operator
import sys
import types
import unittest
import py
import six
def test_add_doc():
def f():
"""Icky doc"""
pass
six._add_doc(f, """New doc""")
assert f.__doc__ == "New doc"
def test_import_module():
from logging import handlers
m = six._import_module("logging.handlers")
assert m is handlers
def test_integer_types():
assert isinstance(1, six.integer_types)
assert isinstance(-1, six.integer_types)
assert isinstance(six.MAXSIZE + 23, six.integer_types)
assert not isinstance(.1, six.integer_types)
def test_string_types():
assert isinstance("hi", six.string_types)
assert isinstance(six.u("hi"), six.string_types)
assert issubclass(six.text_type, six.string_types)
def test_class_types():
class X:
pass
class Y(object):
pass
assert isinstance(X, six.class_types)
assert isinstance(Y, six.class_types)
assert not isinstance(X(), six.class_types)
def test_text_type():
assert type(six.u("hi")) is six.text_type
def test_binary_type():
assert type(six.b("hi")) is six.binary_type
def test_MAXSIZE():
try:
# This shouldn't raise an overflow error.
six.MAXSIZE.__index__()
except AttributeError:
# Before Python 2.6.
pass
py.test.raises(
(ValueError, OverflowError),
operator.mul, [None], six.MAXSIZE + 1)
def test_lazy():
if six.PY3:
html_name = "html.parser"
else:
html_name = "HTMLParser"
assert html_name not in sys.modules
mod = six.moves.html_parser
assert sys.modules[html_name] is mod
assert "htmlparser" not in six._MovedItems.__dict__
try:
import _tkinter
except ImportError:
have_tkinter = False
else:
have_tkinter = True
have_gdbm = True
try:
import gdbm
except ImportError:
try:
import dbm.gnu
except ImportError:
have_gdbm = False
@py.test.mark.parametrize("item_name",
[item.name for item in six._moved_attributes])
def test_move_items(item_name):
"""Ensure that everything loads correctly."""
try:
item = getattr(six.moves, item_name)
if isinstance(item, types.ModuleType):
__import__("six.moves." + item_name)
except AttributeError:
if item_name == "zip_longest" and sys.version_info < (2, 6):
py.test.skip("zip_longest only available on 2.6+")
except ImportError:
if item_name == "winreg" and not sys.platform.startswith("win"):
py.test.skip("Windows only module")
if item_name.startswith("tkinter"):
if not have_tkinter:
py.test.skip("requires tkinter")
if item_name == "tkinter_ttk" and sys.version_info[:2] <= (2, 6):
py.test.skip("ttk only available on 2.7+")
if item_name.startswith("dbm_gnu") and not have_gdbm:
py.test.skip("requires gdbm")
raise
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_parse_moved_attributes])
def test_move_items_urllib_parse(item_name):
"""Ensure that everything loads correctly."""
if item_name == "ParseResult" and sys.version_info < (2, 5):
py.test.skip("ParseResult is only found on 2.5+")
if item_name in ("parse_qs", "parse_qsl") and sys.version_info < (2, 6):
py.test.skip("parse_qs[l] is new in 2.6")
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.parse)
getattr(six.moves.urllib.parse, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_error_moved_attributes])
def test_move_items_urllib_error(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.error)
getattr(six.moves.urllib.error, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_request_moved_attributes])
def test_move_items_urllib_request(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.request)
getattr(six.moves.urllib.request, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_response_moved_attributes])
def test_move_items_urllib_response(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.response)
getattr(six.moves.urllib.response, item_name)
@py.test.mark.parametrize("item_name",
[item.name for item in six._urllib_robotparser_moved_attributes])
def test_move_items_urllib_robotparser(item_name):
"""Ensure that everything loads correctly."""
if sys.version_info[:2] >= (2, 6):
assert item_name in dir(six.moves.urllib.robotparser)
getattr(six.moves.urllib.robotparser, item_name)
def test_import_moves_error_1():
from six.moves.urllib.parse import urljoin
from six import moves
# In 1.4.1: AttributeError: 'Module_six_moves_urllib_parse' object has no attribute 'urljoin'
assert moves.urllib.parse.urljoin
def test_import_moves_error_2():
from six import moves
assert moves.urllib.parse.urljoin
# In 1.4.1: ImportError: cannot import name urljoin
from six.moves.urllib.parse import urljoin
def test_import_moves_error_3():
from six.moves.urllib.parse import urljoin
# In 1.4.1: ImportError: cannot import name urljoin
from six.moves.urllib_parse import urljoin
def test_from_imports():
from six.moves.queue import Queue
assert isinstance(Queue, six.class_types)
from six.moves.configparser import ConfigParser
assert isinstance(ConfigParser, six.class_types)
def test_filter():
from six.moves import filter
f = filter(lambda x: x % 2, range(10))
assert six.advance_iterator(f) == 1
def test_filter_false():
from six.moves import filterfalse
f = filterfalse(lambda x: x % 3, range(10))
assert six.advance_iterator(f) == 0
assert six.advance_iterator(f) == 3
assert six.advance_iterator(f) == 6
def test_map():
from six.moves import map
assert six.advance_iterator(map(lambda x: x + 1, range(2))) == 1
def test_zip():
from six.moves import zip
assert six.advance_iterator(zip(range(2), range(2))) == (0, 0)
@py.test.mark.skipif("sys.version_info < (2, 6)")
def test_zip_longest():
from six.moves import zip_longest
it = zip_longest(range(2), range(1))
assert six.advance_iterator(it) == (0, 0)
assert six.advance_iterator(it) == (1, None)
class TestCustomizedMoves:
def teardown_method(self, meth):
try:
del six._MovedItems.spam
except AttributeError:
pass
try:
del six.moves.__dict__["spam"]
except KeyError:
pass
def test_moved_attribute(self):
attr = six.MovedAttribute("spam", "foo", "bar")
if six.PY3:
assert attr.mod == "bar"
else:
assert attr.mod == "foo"
assert attr.attr == "spam"
attr = six.MovedAttribute("spam", "foo", "bar", "lemma")
assert attr.attr == "lemma"
attr = six.MovedAttribute("spam", "foo", "bar", "lemma", "theorm")
if six.PY3:
assert attr.attr == "theorm"
else:
assert attr.attr == "lemma"
def test_moved_module(self):
attr = six.MovedModule("spam", "foo")
if six.PY3:
assert attr.mod == "spam"
else:
assert attr.mod == "foo"
attr = six.MovedModule("spam", "foo", "bar")
if six.PY3:
assert attr.mod == "bar"
else:
assert attr.mod == "foo"
def test_custom_move_module(self):
attr = six.MovedModule("spam", "six", "six")
six.add_move(attr)
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
attr = six.MovedModule("spam", "six", "six")
six.add_move(attr)
from six.moves import spam
assert spam is six
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
def test_custom_move_attribute(self):
attr = six.MovedAttribute("spam", "six", "six", "u", "u")
six.add_move(attr)
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
attr = six.MovedAttribute("spam", "six", "six", "u", "u")
six.add_move(attr)
from six.moves import spam
assert spam is six.u
six.remove_move("spam")
assert not hasattr(six.moves, "spam")
def test_empty_remove(self):
py.test.raises(AttributeError, six.remove_move, "eggs")
def test_get_unbound_function():
class X(object):
def m(self):
pass
assert six.get_unbound_function(X.m) is X.__dict__["m"]
def test_get_method_self():
class X(object):
def m(self):
pass
x = X()
assert six.get_method_self(x.m) is x
py.test.raises(AttributeError, six.get_method_self, 42)
def test_get_method_function():
class X(object):
def m(self):
pass
x = X()
assert six.get_method_function(x.m) is X.__dict__["m"]
py.test.raises(AttributeError, six.get_method_function, hasattr)
def test_get_function_closure():
def f():
x = 42
def g():
return x
return g
cell = six.get_function_closure(f())[0]
assert type(cell).__name__ == "cell"
def test_get_function_code():
def f():
pass
assert isinstance(six.get_function_code(f), types.CodeType)
if not hasattr(sys, "pypy_version_info"):
py.test.raises(AttributeError, six.get_function_code, hasattr)
def test_get_function_defaults():
def f(x, y=3, b=4):
pass
assert six.get_function_defaults(f) == (3, 4)
def test_get_function_globals():
def f():
pass
assert six.get_function_globals(f) is globals()
def test_dictionary_iterators(monkeypatch):
def stock_method_name(iterwhat):
"""Given a method suffix like "lists" or "values", return the name
of the dict method that delivers those on the version of Python
we're running in."""
if six.PY3:
return iterwhat
return 'iter' + iterwhat
class MyDict(dict):
if not six.PY3:
def lists(self, **kw):
return [1, 2, 3]
def iterlists(self, **kw):
return iter([1, 2, 3])
f = MyDict.iterlists
del MyDict.iterlists
setattr(MyDict, stock_method_name('lists'), f)
d = MyDict(zip(range(10), reversed(range(10))))
for name in "keys", "values", "items", "lists":
meth = getattr(six, "iter" + name)
it = meth(d)
assert not isinstance(it, list)
assert list(it) == list(getattr(d, name)())
py.test.raises(StopIteration, six.advance_iterator, it)
record = []
def with_kw(*args, **kw):
record.append(kw["kw"])
return old(*args)
old = getattr(MyDict, stock_method_name(name))
monkeypatch.setattr(MyDict, stock_method_name(name), with_kw)
meth(d, kw=42)
assert record == [42]
monkeypatch.undo()
@py.test.mark.skipif(sys.version_info[:2] < (2, 7),
reason="view methods on dictionaries only available on 2.7+")
def test_dictionary_views():
def stock_method_name(viewwhat):
"""Given a method suffix like "keys" or "values", return the name
of the dict method that delivers those on the version of Python
we're running in."""
if six.PY3:
return viewwhat
return 'view' + viewwhat
d = dict(zip(range(10), (range(11, 20))))
for name in "keys", "values", "items":
meth = getattr(six, "view" + name)
view = meth(d)
assert set(view) == set(getattr(d, name)())
def test_advance_iterator():
assert six.next is six.advance_iterator
l = [1, 2]
it = iter(l)
assert six.next(it) == 1
assert six.next(it) == 2
py.test.raises(StopIteration, six.next, it)
py.test.raises(StopIteration, six.next, it)
def test_iterator():
class myiter(six.Iterator):
def __next__(self):
return 13
assert six.advance_iterator(myiter()) == 13
class myitersub(myiter):
def __next__(self):
return 14
assert six.advance_iterator(myitersub()) == 14
def test_callable():
class X:
def __call__(self):
pass
def method(self):
pass
assert six.callable(X)
assert six.callable(X())
assert six.callable(test_callable)
assert six.callable(hasattr)
assert six.callable(X.method)
assert six.callable(X().method)
assert not six.callable(4)
assert not six.callable("string")
def test_create_bound_method():
class X(object):
pass
def f(self):
return self
x = X()
b = six.create_bound_method(f, x)
assert isinstance(b, types.MethodType)
assert b() is x
if six.PY3:
def test_b():
data = six.b("\xff")
assert isinstance(data, bytes)
assert len(data) == 1
assert data == bytes([255])
def test_u():
s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
assert isinstance(s, str)
assert s == "hi \u0439 \U00000439 \\ \\\\ \n"
else:
def test_b():
data = six.b("\xff")
assert isinstance(data, str)
assert len(data) == 1
assert data == "\xff"
def test_u():
s = six.u("hi \u0439 \U00000439 \\ \\\\ \n")
assert isinstance(s, unicode)
assert s == "hi \xd0\xb9 \xd0\xb9 \\ \\\\ \n".decode("utf8")
def test_u_escapes():
s = six.u("\u1234")
assert len(s) == 1
def test_unichr():
assert six.u("\u1234") == six.unichr(0x1234)
assert type(six.u("\u1234")) is type(six.unichr(0x1234))
def test_int2byte():
assert six.int2byte(3) == six.b("\x03")
py.test.raises((OverflowError, ValueError), six.int2byte, 256)
def test_byte2int():
assert six.byte2int(six.b("\x03")) == 3
assert six.byte2int(six.b("\x03\x04")) == 3
py.test.raises(IndexError, six.byte2int, six.b(""))
def test_bytesindex():
assert six.indexbytes(six.b("hello"), 3) == ord("l")
def test_bytesiter():
it = six.iterbytes(six.b("hi"))
assert six.next(it) == ord("h")
assert six.next(it) == ord("i")
py.test.raises(StopIteration, six.next, it)
def test_StringIO():
fp = six.StringIO()
fp.write(six.u("hello"))
assert fp.getvalue() == six.u("hello")
def test_BytesIO():
fp = six.BytesIO()
fp.write(six.b("hello"))
assert fp.getvalue() == six.b("hello")
def test_exec_():
def f():
l = []
six.exec_("l.append(1)")
assert l == [1]
f()
ns = {}
six.exec_("x = 42", ns)
assert ns["x"] == 42
glob = {}
loc = {}
six.exec_("global y; y = 42; x = 12", glob, loc)
assert glob["y"] == 42
assert "x" not in glob
assert loc["x"] == 12
assert "y" not in loc
def test_reraise():
def get_next(tb):
if six.PY3:
return tb.tb_next.tb_next
else:
return tb.tb_next
e = Exception("blah")
try:
raise e
except Exception:
tp, val, tb = sys.exc_info()
try:
six.reraise(tp, val, tb)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert tb is get_next(tb2)
try:
six.reraise(tp, val)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert tb2 is not tb
try:
six.reraise(tp, val, tb2)
except Exception:
tp2, value2, tb3 = sys.exc_info()
assert tp2 is Exception
assert value2 is e
assert get_next(tb3) is tb2
try:
six.reraise(tp, None, tb)
except Exception:
tp2, value2, tb2 = sys.exc_info()
assert tp2 is Exception
assert value2 is not val
assert isinstance(value2, Exception)
assert tb is get_next(tb2)
def test_raise_from():
try:
try:
raise Exception("blah")
except Exception:
ctx = sys.exc_info()[1]
f = Exception("foo")
six.raise_from(f, None)
except Exception:
tp, val, tb = sys.exc_info()
if sys.version_info[:2] > (3, 0):
# We should have done a raise f from None equivalent.
assert val.__cause__ is None
assert val.__context__ is ctx
if sys.version_info[:2] >= (3, 3):
# And that should suppress the context on the exception.
assert val.__suppress_context__
# For all versions the outer exception should have raised successfully.
assert str(val) == "foo"
def test_print_():
save = sys.stdout
out = sys.stdout = six.moves.StringIO()
try:
six.print_("Hello,", "person!")
finally:
sys.stdout = save
assert out.getvalue() == "Hello, person!\n"
out = six.StringIO()
six.print_("Hello,", "person!", file=out)
assert out.getvalue() == "Hello, person!\n"
out = six.StringIO()
six.print_("Hello,", "person!", file=out, end="")
assert out.getvalue() == "Hello, person!"
out = six.StringIO()
six.print_("Hello,", "person!", file=out, sep="X")
assert out.getvalue() == "Hello,Xperson!\n"
out = six.StringIO()
six.print_(six.u("Hello,"), six.u("person!"), file=out)
result = out.getvalue()
assert isinstance(result, six.text_type)
assert result == six.u("Hello, person!\n")
six.print_("Hello", file=None) # This works.
out = six.StringIO()
six.print_(None, file=out)
assert out.getvalue() == "None\n"
class FlushableStringIO(six.StringIO):
def __init__(self):
six.StringIO.__init__(self)
self.flushed = False
def flush(self):
self.flushed = True
out = FlushableStringIO()
six.print_("Hello", file=out)
assert not out.flushed
six.print_("Hello", file=out, flush=True)
assert out.flushed
@py.test.mark.skipif("sys.version_info[:2] >= (2, 6)")
def test_print_encoding(monkeypatch):
# Fool the type checking in print_.
monkeypatch.setattr(six, "file", six.BytesIO, raising=False)
out = six.BytesIO()
out.encoding = "utf-8"
out.errors = None
six.print_(six.u("\u053c"), end="", file=out)
assert out.getvalue() == six.b("\xd4\xbc")
out = six.BytesIO()
out.encoding = "ascii"
out.errors = "strict"
py.test.raises(UnicodeEncodeError, six.print_, six.u("\u053c"), file=out)
out.errors = "backslashreplace"
six.print_(six.u("\u053c"), end="", file=out)
assert out.getvalue() == six.b("\\u053c")
def test_print_exceptions():
py.test.raises(TypeError, six.print_, x=3)
py.test.raises(TypeError, six.print_, end=3)
py.test.raises(TypeError, six.print_, sep=42)
def test_with_metaclass():
class Meta(type):
pass
class X(six.with_metaclass(Meta)):
pass
assert type(X) is Meta
assert issubclass(X, object)
class Base(object):
pass
class X(six.with_metaclass(Meta, Base)):
pass
assert type(X) is Meta
assert issubclass(X, Base)
class Base2(object):
pass
class X(six.with_metaclass(Meta, Base, Base2)):
pass
assert type(X) is Meta
assert issubclass(X, Base)
assert issubclass(X, Base2)
assert X.__mro__ == (X, Base, Base2, object)
def test_wraps():
def f(g):
@six.wraps(g)
def w():
return 42
return w
def k():
pass
original_k = k
k = f(f(k))
assert hasattr(k, '__wrapped__')
k = k.__wrapped__
assert hasattr(k, '__wrapped__')
k = k.__wrapped__
assert k is original_k
assert not hasattr(k, '__wrapped__')
def f(g, assign, update):
def w():
return 42
w.glue = {"foo" : "bar"}
return six.wraps(g, assign, update)(w)
k.glue = {"melon" : "egg"}
k.turnip = 43
k = f(k, ["turnip"], ["glue"])
assert k.__name__ == "w"
assert k.turnip == 43
assert k.glue == {"melon" : "egg", "foo" : "bar"}
def test_add_metaclass():
class Meta(type):
pass
class X:
"success"
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, object)
assert X.__module__ == __name__
assert X.__doc__ == "success"
class Base(object):
pass
class X(Base):
pass
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, Base)
class Base2(object):
pass
class X(Base, Base2):
pass
X = six.add_metaclass(Meta)(X)
assert type(X) is Meta
assert issubclass(X, Base)
assert issubclass(X, Base2)
# Test a second-generation subclass of a type.
class Meta1(type):
m1 = "m1"
class Meta2(Meta1):
m2 = "m2"
class Base:
b = "b"
Base = six.add_metaclass(Meta1)(Base)
class X(Base):
x = "x"
X = six.add_metaclass(Meta2)(X)
assert type(X) is Meta2
assert issubclass(X, Base)
assert type(Base) is Meta1
assert "__dict__" not in vars(X)
instance = X()
instance.attr = "test"
assert vars(instance) == {"attr": "test"}
assert instance.b == Base.b
assert instance.x == X.x
# Test a class with slots.
class MySlots(object):
__slots__ = ["a", "b"]
MySlots = six.add_metaclass(Meta1)(MySlots)
assert MySlots.__slots__ == ["a", "b"]
instance = MySlots()
instance.a = "foo"
py.test.raises(AttributeError, setattr, instance, "c", "baz")
# Test a class with string for slots.
class MyStringSlots(object):
__slots__ = "ab"
MyStringSlots = six.add_metaclass(Meta1)(MyStringSlots)
assert MyStringSlots.__slots__ == "ab"
instance = MyStringSlots()
instance.ab = "foo"
py.test.raises(AttributeError, setattr, instance, "a", "baz")
py.test.raises(AttributeError, setattr, instance, "b", "baz")
class MySlotsWeakref(object):
__slots__ = "__weakref__",
MySlotsWeakref = six.add_metaclass(Meta)(MySlotsWeakref)
assert type(MySlotsWeakref) is Meta
@py.test.mark.skipif("sys.version_info[:2] < (2, 7)")
def test_assertCountEqual():
class TestAssertCountEqual(unittest.TestCase):
def test(self):
with self.assertRaises(AssertionError):
six.assertCountEqual(self, (1, 2), [3, 4, 5])
six.assertCountEqual(self, (1, 2), [2, 1])
TestAssertCountEqual('test').test()
@py.test.mark.skipif("sys.version_info[:2] < (2, 7)")
def test_assertRegex():
class TestAssertRegex(unittest.TestCase):
def test(self):
with self.assertRaises(AssertionError):
six.assertRegex(self, 'test', r'^a')
six.assertRegex(self, 'test', r'^t')
TestAssertRegex('test').test()
@py.test.mark.skipif("sys.version_info[:2] < (2, 7)")
def test_assertRaisesRegex():
class TestAssertRaisesRegex(unittest.TestCase):
def test(self):
with six.assertRaisesRegex(self, AssertionError, '^Foo'):
raise AssertionError('Foo')
with self.assertRaises(AssertionError):
with six.assertRaisesRegex(self, AssertionError, r'^Foo'):
raise AssertionError('Bar')
TestAssertRaisesRegex('test').test()
def test_python_2_unicode_compatible():
@six.python_2_unicode_compatible
class MyTest(object):
def __str__(self):
return six.u('hello')
def __bytes__(self):
return six.b('hello')
my_test = MyTest()
if six.PY2:
assert str(my_test) == six.b("hello")
assert unicode(my_test) == six.u("hello")
elif six.PY3:
assert bytes(my_test) == six.b("hello")
assert str(my_test) == six.u("hello")
assert getattr(six.moves.builtins, 'bytes', str)(my_test) == six.b("hello")
|
nazeehshoura/crawler
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/core/handlers/base.py
|
49
|
from __future__ import unicode_literals
import logging
import sys
import types
from django import http
from django.conf import settings
from django.core import urlresolvers
from django.core import signals
from django.core.exceptions import MiddlewareNotUsed, PermissionDenied, SuspiciousOperation
from django.db import connections, transaction
from django.utils.encoding import force_text
from django.utils.module_loading import import_string
from django.utils import six
from django.views import debug
logger = logging.getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._template_response_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_string(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def make_view_atomic(self, view):
non_atomic_requests = getattr(view, '_non_atomic_requests', set())
for db in connections.all():
if (db.settings_dict['ATOMIC_REQUESTS']
and db.alias not in non_atomic_requests):
view = transaction.atomic(using=db.alias)(view)
return view
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, 'urlconf'):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
wrapped_callback = self.make_view_atomic(callback)
try:
response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s didn't return an HttpResponse object. It returned None instead."
% (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and then render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404 as e:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
except PermissionDenied:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
try:
callback, param_dict = resolver.resolve403()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(
sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request,
resolver, sys.exc_info())
except SuspiciousOperation as e:
# The request logger receives events for any problematic request
# The security logger receives events for all SuspiciousOperations
security_logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
security_logger.error(
force_text(e),
extra={
'status_code': 400,
'request': request
})
try:
callback, param_dict = resolver.resolve400()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(
sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request,
resolver, sys.exc_info())
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
response._closable_objects.append(request)
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
six.reraise(*exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
|
xlhtc007/osqf2015
|
refs/heads/master
|
model.py
|
2
|
import pandas as pd
import numpy as np
import datetime
import blaze as bz
from cytoolz import sliding_window, count
from scipy.stats import chi2
import logging
from bokeh.models import ColumnDataSource
# def quantile(scenarios, level):
# return np.percentile(scenarios, 100-level, interpolation='linear')
class VaR(object):
"""docstring for VaR"""
def __init__(self, confidence_level):
self.level = confidence_level
def __call__(self, scenarios, neutral_scenario=0):
pnls = scenarios - neutral_scenario
return - np.percentile(pnls, 100-self.level, interpolation='linear'), pnls
def likelihood_statistic(self, n_outliers, n_obs):
p_obs = n_outliers * 1.0 / n_obs
p_expected = 1. - self.level
stat_expected = p_expected ** n_outliers * (1-p_expected) ** (n_obs-n_outliers)
stat_obs = p_obs ** n_outliers * (1-p_obs) ** (n_obs - n_outliers)
return -2 * np.log(stat_expected / stat_obs)
def confidence(self, likelihood_stat):
p_value = chi2.cdf(likelihood_stat, 1)
return p_value
@classmethod
def traffic_light(cls, x, upper=0.99, lower=0.95):
lights = np.ceil(np.clip((x - lower) / (upper-lower), 0., 1.01)).astype('int')
return lights
class RiskFactor(object):
"""docstring for RiskFactor"""
def __init__(self, ts):
# super(RiskFactor, self).__init__()
self.ts = ts
def logreturns(self, n_days=1):
self.ts['LogReturns'] = np.log( self.ts.Value.pct_change(periods=n_days) + 1)
def devol(self, _lambda=0.06):
_com = (1 - _lambda) / _lambda
self.ts['Vola'] = pd.ewmstd( self.ts.LogReturns, com=_com, ignore_na=True)
self.ts['DevolLogReturns'] = self.ts.LogReturns / self.ts.Vola
def fhs(self, n_scenarios=250, start_date=None, end_date=None):
x = sliding_window(n_scenarios+1, range(len(self.ts.index)))
scenarios = np.zeros((len(self.ts.index), n_scenarios+1))
for i, el in enumerate(x):
l = list(el)
cur_idx, hist_idx = l[-1], l[:-1]
neutral = self.ts.Value.values[cur_idx]
ret = self.ts.DevolLogReturns.values[hist_idx]
vol = self.ts.Vola.values[cur_idx]
scenarios[cur_idx, 1:] = self.scenario_values(ret, neutral, vol)
scenarios[cur_idx, 0] = neutral
return scenarios
@classmethod
def from_blaze(cls, filename, date_col='Date', value_col='Close'):
df = bz.odo(filename, pd.DataFrame)[[date_col, value_col]] #[1000:1100]
df = df.rename(columns = {value_col: 'Value'})
ts = df.set_index(date_col)
return cls(ts)
@classmethod
def scenario_values(cls, returns, neutral, current_vola):
scenarios = neutral * np.exp(current_vola * returns)
return scenarios
class CurrencyRiskFactor(RiskFactor):
"""docstring for CurrencyRiskFactor"""
def __init__(self, *args):
super(CurrencyRiskFactor, self).__init__(*args)
@classmethod
def from_blaze(clz, filename, date_col='Date', value_col='Rate'):
return super(CurrencyRiskFactor, clz).from_blaze(filename, date_col=date_col, value_col=value_col)
class Future(object):
"""docstring for Future"""
def __init__(self, ul, ccy):
super(Future, self).__init__()
self.ccy = ccy
self.underlying = ul
# TODO align risk factors
def pv(self):
pass
class StockModel(object):
"""docstring for StockModel"""
def __init__(self):
super(StockModel, self).__init__()
file_name = "notebooks/db2.bcolz"
self.df = bz.odo(file_name, pd.DataFrame)[['Date', 'Close']] #[1000:1100]
self.devol()
self.returns_df = None
def devol(self, _lambda=0.06, n_days=1):
_com = (1 - _lambda) / _lambda
self.df['LogReturns'] = np.log(self.df.Close.pct_change(periods=n_days) + 1)
self.df['Vola'] = pd.ewmstd( self.df.LogReturns, com=_com, ignore_na=True)[2:]
self.df['DevolLogReturns'] = self.df.LogReturns / self.df.Vola
self.df.set_index('Date', inplace=True)
def compute_scenarios(self, d, n_scenarios=750):
# identify returns
dates = pd.to_datetime(d, unit='ms')
max_date = dates[0].date()
min_date = max_date.replace(year=max_date.year-3)
logging.info('Computing returns between ') #, str(max_date), ' and ', str(min_date))
self.returns_df = self.df[min_date:max_date].ix[-n_scenarios-1:]
neutral, vola = self.returns_df.ix[max_date][['Close', 'Vola']]
scenarios = neutral * np.exp( vola * self.returns_df.ix[:-1].DevolLogReturns )
return scenarios, neutral
def compute_var(self, scenarios, neutral_scenario, level=99.):
pnls = scenarios - neutral_scenario
return - np.percentile(pnls, 100-level, interpolation='linear'), pnls
def compute_data_source(self):
source = ColumnDataSource(self.df.reset_index()[2:])
source.add(self.df[2:].LogReturns.ge(0).map(lambda x: "steelblue" if x else "red"), 'LogReturnsColor')
source.add(self.df[2:].DevolLogReturns / 2., 'y_mids')
return source
def compute_histo_source(self, source, scenarios, bins=30):
hist, edges = np.histogram(scenarios, density=True, bins=bins)
source.data = dict(top=hist, bottom=0, left=edges[:-1], right = edges[1:])
|
benoitsteiner/tensorflow-xsmm
|
refs/heads/master
|
tensorflow/contrib/signal/python/kernel_tests/window_ops_test.py
|
48
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for window_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.contrib.signal.python.kernel_tests import test_util
from tensorflow.contrib.signal.python.ops import window_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
def _scipy_raised_cosine(length, symmetric=True, a=0.5, b=0.5):
"""A simple implementation of a raised cosine window that matches SciPy.
https://en.wikipedia.org/wiki/Window_function#Hann_window
https://github.com/scipy/scipy/blob/v0.14.0/scipy/signal/windows.py#L615
Args:
length: The window length.
symmetric: Whether to create a symmetric window.
a: The alpha parameter of the raised cosine window.
b: The beta parameter of the raised cosine window.
Returns:
A raised cosine window of length `length`.
"""
if length == 1:
return np.ones(1)
odd = length % 2
if not symmetric and not odd:
length += 1
window = a - b * np.cos(2.0 * np.pi * np.arange(length) / (length - 1))
if not symmetric and not odd:
window = window[:-1]
return window
class WindowOpsTest(test.TestCase):
def setUp(self):
self._window_lengths = [1, 2, 3, 4, 5, 31, 64, 128]
self._dtypes = [(dtypes.float16, 1e-2),
(dtypes.float32, 1e-6),
(dtypes.float64, 1e-9)]
def _compare_window_fns(self, np_window_fn, tf_window_fn):
with self.test_session(use_gpu=True):
for window_length in self._window_lengths:
for periodic in [False, True]:
for tf_dtype, tol in self._dtypes:
np_dtype = tf_dtype.as_numpy_dtype
expected = np_window_fn(window_length,
symmetric=not periodic).astype(np_dtype)
actual = tf_window_fn(window_length, periodic=periodic,
dtype=tf_dtype).eval()
self.assertAllClose(expected, actual, tol, tol)
def test_hann_window(self):
"""Check that hann_window matches scipy.signal.hann behavior."""
# The Hann window is a raised cosine window with parameters alpha=0.5 and
# beta=0.5.
# https://en.wikipedia.org/wiki/Window_function#Hann_window
self._compare_window_fns(
functools.partial(_scipy_raised_cosine, a=0.5, b=0.5),
window_ops.hann_window)
def test_hamming_window(self):
"""Check that hamming_window matches scipy.signal.hamming's behavior."""
# The Hamming window is a raised cosine window with parameters alpha=0.54
# and beta=0.46.
# https://en.wikipedia.org/wiki/Window_function#Hamming_window
self._compare_window_fns(
functools.partial(_scipy_raised_cosine, a=0.54, b=0.46),
window_ops.hamming_window)
def test_constant_folding(self):
"""Window functions should be constant foldable for constant inputs."""
for window_fn in (window_ops.hann_window, window_ops.hamming_window):
for dtype, _ in self._dtypes:
for periodic in [False, True]:
g = ops.Graph()
with g.as_default():
window = window_fn(100, periodic=periodic, dtype=dtype)
rewritten_graph = test_util.grappler_optimize(g, [window])
self.assertEqual(1, len(rewritten_graph.node))
if __name__ == '__main__':
test.main()
|
MaximeGLegault/StrategyIA
|
refs/heads/dev
|
RULEngine/Util/kalman_filter/__init__.py
|
12133432
| |
ramiro/scrapy
|
refs/heads/master
|
scrapy/utils/__init__.py
|
12133432
| |
Smarsh/django
|
refs/heads/master
|
tests/modeltests/m2o_recursive/__init__.py
|
12133432
| |
jgravois/ArcREST
|
refs/heads/master
|
src/arcrest/manageorg/_content.py
|
1
|
from ..security.security import OAuthSecurityHandler, AGOLTokenSecurityHandler, PortalTokenSecurityHandler
from .._abstract.abstract import BaseAGOLClass
from parameters import ItemParameter, BaseParameters, AnalyzeParameters, PublishCSVParameters
import urllib
import urlparse
import json
import os
import mmap
import tempfile
from os.path import splitext, basename
########################################################################
class Content(BaseAGOLClass):
"""
The Portal Content Root operation consists of items, user and group
content, and feature operations. All resources and operations (other
than publicly accessible items) under this URI require an authenticated
user.
"""
_baseURL = None
_url = None
_securityHandler = None
_proxy_port = None
_proxy_url = None
#----------------------------------------------------------------------
def __init__(self, url, securityHandler,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if url.lower().find("/content") < 0:
self._url = url + "/content"
else:
self._url = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
#----------------------------------------------------------------------
@property
def contentRoot(self):
""" returns the content root """
return self._url
#----------------------------------------------------------------------
@property
def featureContent(self):
""" returns an instance of the feature class """
return FeatureContent(url=self._url + "/features",
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
def __getCurrentUsername(self):
"""gets the current username"""
from . import Administration, _portals
admin = Administration(url=self._securityHandler.org_url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return admin.portals().portalSelf().user['username']
#----------------------------------------------------------------------
def getUserContent(self, username=None, folderId=None):
"""
The user's content are items either in the home folder for the user
e.g. /content/users/<username>, or in a subfolder of the home
folder with the given folder ID. Multilevel folders are not
supported. You can also see the Quick reference topic for
additional information on this.
Items in a folder are stored by reference and are not physically in
a folder. Rather, they're stored as links to the original item, e.g.
/content/items/<itemId>.
Inputs:
username - name of user to query
"""
if username is None:
username = self.__getCurrentUsername()
url = self._url + "/users/%s" % username
if folderId is not None:
url += "/%s" % folderId
params = {"f" : "json"}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def getFolderID(self, name, userContent=None):
"""
This function retrieves the folder ID and creates the folder if
it does not exist
Inputs:
name - the name of the folder
userContent - a list of user contnet
Output:
string - ID of folder, none if no foldername is specified
"""
if not name == None and not name == '':
if userContent is None:
userContent = self.getUserContent()
folderID = None
if 'folders' in userContent:
folders = userContent['folders']
for folder in folders:
if folder['title'] == name:
folderID = folder['id']
break
del folders
return folderID
else:
return None
#----------------------------------------------------------------------
def getItemID(self,title=None, name=None, itemType=None,userContent=None,folderId=None,username=None):
"""
This function retrieves the item ID if the item exist
Inputs:
name - the name of the item
userContent - a list of user contnet
Output:
string - ID of item, none if item does not exist
"""
itemID = None
if name == None and title == None:
raise AttributeError('Name or Title needs to be specified')
if userContent is None:
userContent = self.getUserContent(username=username,folderId=folderId)
if 'items' in userContent:
items = userContent['items']
for item in items:
if title is None and not name is None:
if item['name'] == name and (itemType is None or item['type'] == itemType):
itemID = item['id']
break
elif not title is None and name is None:
if item['title'] == title and (itemType is None or item['type'] == itemType):
itemID = item['id']
break
else:
if item['name'] == name and item['title'] == title and (itemType is None or item['type'] == itemType):
itemID = item['id']
break
del items
return itemID
#----------------------------------------------------------------------
def getItem(self,itemId, username,folderId=None):
"""
This function retrieves the item
Inputs:
name - the name of the item
userContent - a list of user contnet
Output:
string - ID of item, none if item does not exist
"""
if folderId is not None:
url = "%s/users/%s/%s/items" % (self._url, username, folderId)
else:
url = "%s/users/%s/items" % (self._url, username)
return Item(itemId=itemId,
url=url,
securityHandler=self._securityHandler)
#----------------------------------------------------------------------
def groupContent(self, groupId):
"""
The group's content provides access to the items that are shared
with the group.
Group items are stored by reference and are not physically stored
in a group. Rather, they are stored as links to the original item
in the item resource (/content/items/<itemId>).
Available only to the users of the group and the administrator of
the organization to which the group belongs, if any.
Inputs:
groupId - unique group identifier
"""
url = self._url + "/groups/%s" % groupId
params = {
"f" : "json",
"token" : self._securityHandler.token
}
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def item(self, itemId):
""" returns the Item class for a given item id """
return Item(itemId=itemId,
url=self._url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
initialize=True)
#----------------------------------------------------------------------
def usercontent(self, username=None):
"""
returns the user content class for a given user
"""
if username is None:
username = self.__getCurrentUsername()
return UserContent(username=username,
url=self._url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
########################################################################
class FeatureContent(BaseAGOLClass):
"""
Feature Content Root is the parent resource for feature operations such
as Analyze and Generate.
"""
_baseURL = None
_url = None
_securityHandler = None
_proxy_port = None
_proxy_url = None
#----------------------------------------------------------------------
def __init__(self, url, securityHandler,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if url.lower().find("/features") < 0:
self._url = url + "/features"
else:
self._url = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
#----------------------------------------------------------------------
@property
def featureContentRoot(self):
""" returns the feature content root url """
return self._url
#----------------------------------------------------------------------
def analyze(self,
itemId=None,
filePath=None,
text=None,
fileType="csv",
analyzeParameters=None):
"""
The Analyze call helps a client analyze a CSV file prior to
publishing or generating features using the Publish or Generate
operation, respectively.
Analyze returns information about the file including the fields
present as well as sample records. Analyze attempts to detect the
presence of location fields that may be present as either X,Y
fields or address fields.
Analyze packages its result so that publishParameters within the
JSON response contains information that can be passed back to the
server in a subsequent call to Publish or Generate. The
publishParameters subobject contains properties that describe the
resulting layer after publishing, including its fields, the desired
renderer, and so on. Analyze will suggest defaults for the renderer.
In a typical workflow, the client will present portions of the
Analyze results to the user for editing before making the call to
Publish or Generate.
If the file to be analyzed currently exists in the portal as an
item, callers can pass in its itemId. Callers can also directly
post the file. In this case, the request must be a multipart post
request pursuant to IETF RFC1867. The third option for text files
is to pass the text in as the value of the text parameter.
Inputs:
itemid - The ID of the item to be analyzed.
file - The file to be analyzed.
text - The text in the file to be analyzed.
filetype - The type of input file.
analyzeParameters - A AnalyzeParameters object that provides
geocoding information
"""
files = []
url = self._url + "/analyze"
params = {
"f" : "json"
}
fileType = "csv"
params["fileType"] = fileType
if analyzeParameters is not None and\
isinstance(analyzeParameters, AnalyzeParameters):
params['analyzeParameters'] = analyzeParameters.value
if not (filePath is None) and \
os.path.isfile(filePath):
params['text'] = open(filePath, 'rb').read()
return self._do_post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
elif itemId is not None:
params["fileType"] = fileType
params['itemId'] = itemId
return self._do_post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
raise AttributeError("either an Item ID or a file path must be given.")
#----------------------------------------------------------------------
def generate(self,
publishParameters,
itemId=None,
filePath=None,
fileType=None
):
"""
The Generate call helps a client generate features from a CSV file
or a shapefile.
CSV files that contain location fields (either address fields or X,
Y fields) are spatially enabled by the Generate operation.
The result of Generate is a JSON feature collection.
If the file to be analyzed already exists in the portal as an item,
callers can pass in its itemId. Callers can also directly post the
file. In this case, the request must be a multipart post request
pursuant to IETF RFC1867. The third option for text files is to
pass the text in as the value of the text parameter.
Generate requires that the caller pass in publish parameters that
describe the layers in the feature collection to be generated.
Inputs:
publishParameters - A JSON object describing the layer and
service to be created as part of the Publish
operation. The appropriate value for publish
parameters depends on the file type being
published. (Python Dictionary)
itemId - unique id of item to generate
filePath - path to zipped shapefile or csv
fileType - either shapefile or csv
"""
allowedFileTypes = ['shapefile', 'csv']
files = []
url = self._url + "/generate"
params = {
"f" : "json"
}
params['publishParameters'] = publishParameters
parsed = urlparse.urlparse(url)
if fileType.lower() not in allowedFileTypes and \
filePath is not None:
raise AttributeError("fileType must be either shapefile or csv when specifying a file")
if filePath is not None:
params['type'] = fileType
if fileType.lower() == "csv":
params['text'] = open(filePath,'rb').read()
return self._do_post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
files.append(('file', filePath, os.path.basename(filePath)))
res = self._post_multipart(host=parsed.hostname,
securityHandler=self._securityHandler,
port=parsed.port,
selector=parsed.path,
fields=params,
files=files,
ssl=parsed.scheme.lower() == 'https',
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return res
elif itemId is not None:
params["fileType"] = fileType
params['itemId'] = itemId
return self._do_post(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
########################################################################
class Item(BaseAGOLClass):
"""
Modifies existing items based on an item Id.
This class is ment for administrators/owners to modify existing
contents on the AGOL/Portal site.
"""
_baseURL = None
_url = None
_securityHandler = None
_proxy_port = None
_proxy_url = None
_itemId = None
# From Service
_appCategories = None
_uploaded = None
_properties = None
_documentation = None
_item = None
_id = None
_owner = None
_created = None
_modified = None
_lastModified = None
_name = None
_title = None
_url = None
_itemType = None
_guid = None
_typeKeywords = None
_description = None
_tags = None
_snippet = None
_thumbnail = None
_extent = None
_spatialReference = None
_accessInformation = None
_licenseInfo = None
_culture = None
_access = None
_industries = None
_languages = None
_largeThumbnail = None
_banner = None
_screenshots = None
_listed = None
_ownerFolder = None
_size = None
_protected = None
_commentsEnabled = None
_numComments = None
_numRatings = None
_avgRating = None
_numViews = None
_orgId = None
_type = None
_json = None
_json_dict = None
_sourceUrl = None
_itemControl = None
_sharing = None
#----------------------------------------------------------------------
def __init__(self, itemId, url,
securityHandler,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
if url.lower().endswith("/items") == False:
self._baseUrl = url + "/items"
else:
self._baseUrl = url
self._itemId = itemId
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_port = proxy_port
self._proxy_url = proxy_url
if initialize:
self.__init()
#----------------------------------------------------------------------
def __init(self):
""" loads the data into the class """
param_dict = {"f": "json"
}
json_dict = self._do_get(self._baseUrl + "/%s" % self._itemId, param_dict,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
self._json = json.dumps(json_dict)
self._json_dict = json_dict
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.iteritems():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
elif k == 'error':
print json_dict[k]
else:
print k, " - attribute not implemented in the class _content.Item."
del k,v
#----------------------------------------------------------------------
def __str__(self):
"""returns the object as json"""
if self._json is None:
self.__init()
return self._json
#----------------------------------------------------------------------
def __iter__(self):
"""returns iterable object for class"""
if self._json_dict is None:
self.__init()
for k,v in self._json_dict.iteritems():
yield (k,v)
#----------------------------------------------------------------------
@property
def itemParameters(self):
""" returns the current Item's ItemParameter object """
ip = ItemParameter()
ip.accessInformation = self.accessInformation
ip.culture = self.culture
ip.description = self.description
#ip.extent = self.extent
ip.licenseInfo = self.licenseInfo
ip.snippet = self.snippet
ip.spatialReference = self.spatialReference
ip.tags = ",".join(self.tags)
ip.metadata = self._baseUrl.replace("http://", "https://") + \
"/%s/info/metadata/metadata.xml?token=%s" % (self._itemId, self._securityHandler.token)
if self.thumbnail is not None:
ip.thumbnailurl = self._baseUrl.replace("http://", "https://") + \
"/%s/info/%s?token=%s" % (self._itemId,
self.thumbnail,
self._securityHandler.token)
ip.title = self.title
ip.type = self.type
ip.typeKeywords = self.typeKeywords
return ip
#----------------------------------------------------------------------
@property
def itemId(self):
""" get/set id passed by the user """
return self._itemId
#----------------------------------------------------------------------
@property
def sharing(self):
""" get/set sharing """
return self._sharing
#----------------------------------------------------------------------
@itemId.setter
def itemId(self, value):
""" get/set id passed by the user """
if value != self._itemId and \
value is not None:
self._itemId = value
self.__init()
#----------------------------------------------------------------------
@property
def type(self):
""" returns the item type """
if self._type is None:
self.__init()
return self._type
#----------------------------------------------------------------------
@property
def modified(self):
""" returns last modified in UNIX time """
if self._modified is None:
self.__init()
return self._modified
#----------------------------------------------------------------------
@property
def guid(self):
""" returns the guid of the item """
if self._guid is None:
self.__init()
return self._guid
#----------------------------------------------------------------------
@property
def uploaded(self):
""" date the item is uploaded in UNIX time """
if self._uploaded is None:
self.__init()
return self._uploaded
#----------------------------------------------------------------------
@property
def properties(self):
""" returns the items properties """
if self._properties is None:
self.__init()
return self._properties
#----------------------------------------------------------------------
@property
def documentation(self):
""" returns the items documentation """
if self._documentation is None:
self.__init()
return self._documentation
#----------------------------------------------------------------------
@property
def item(self):
""" returns the item """
if self._item is None:
self.__init()
return self._item
#----------------------------------------------------------------------
@property
def id(self):
""" the unique ID for this item """
if self._id is None:
self.__init()
return self._id
#----------------------------------------------------------------------
@property
def owner(self):
"""the username of the owner """
if self._owner is None:
self.__init()
return self._owner
#----------------------------------------------------------------------
@property
def created(self):
""" date the item was created in UNIX time (milliseconds) """
if self._created is None:
self.__init()
return self._created
#----------------------------------------------------------------------
@property
def lastModified(self):
""" the date the item was last modified in UNIX time """
if self._lastModified is None:
self.__init()
return self._lastModified
#----------------------------------------------------------------------
@property
def appCategories(self):
""" displays the application category"""
if self._appCategories is None:
self.__init()
return self._appCategories
#----------------------------------------------------------------------
@property
def name(self):
""" file name of the item for file types """
if self._name is None:
self.__init()
return self._name
#----------------------------------------------------------------------
@property
def title(self):
""" title of the item """
if self._title is None:
self.__init()
return self._title
#----------------------------------------------------------------------
@property
def url(self):
"""" the URL for the resource """
if self._url is None:
self.__init()
return self._url
#----------------------------------------------------------------------
@property
def itemType(self):
""" GIS content type of the item """
if self._type is None:
self.__init()
return self._type
#----------------------------------------------------------------------
@property
def typeKeywords(self):
"""
a set of keywords that further describes the type of this item
"""
if self._typeKeywords is None:
self.__init()
return self._typeKeywords
#----------------------------------------------------------------------
@property
def description(self):
"""
item description
"""
if self._description is None:
self.__init()
return self._description
#----------------------------------------------------------------------
@property
def tags(self):
""" user defined tags that describe the item """
if self._tags is None:
self.__init()
return self._tags
#----------------------------------------------------------------------
@property
def snippet(self):
""" a short summary description of item """
if self._snippet is None:
self.__init()
return self._snippet
#----------------------------------------------------------------------
@property
def thumbnail(self):
""" URL to the thumbnail used for the item """
if self._thumbnail is None:
self.__init()
return self._thumbnail
#----------------------------------------------------------------------
@property
def sourceUrl(self):
""" Source url for this item """
if self._sourceUrl is None:
self.__init()
return self._sourceUrl
#----------------------------------------------------------------------
def saveThumbnail(self,fileName,filePath):
""" URL to the thumbnail used for the item """
if self._thumbnail is None:
self.__init()
param_dict = {}
if self._thumbnail is not None:
imgUrl = self._baseUrl + "/" + self._itemId + "/info/" + self._thumbnail
disassembled = urlparse.urlparse(imgUrl)
onlineFileName, file_ext = splitext(basename(disassembled.path))
fileNameSafe = "".join(x for x in fileName if x.isalnum()) + file_ext
result = self._download_file(self._baseUrl + "/" + self._itemId + "/info/" + self._thumbnail,
save_path=filePath, file_name=fileNameSafe, param_dict=param_dict,
securityHandler=self._securityHandler,
proxy_url=None,
proxy_port=None)
return result
else:
return None
#----------------------------------------------------------------------
@property
def extent(self):
""" bounding rectangle for the item in WGS84 """
if self._extent is None:
self.__init()
return self._extent
#----------------------------------------------------------------------
@property
def spatialReference(self):
"""The coordinate system of the item."""
if self._spatialReference is None:
self.__init()
return self._spatialReference
#----------------------------------------------------------------------
@property
def accessInformation(self):
"""Information on the source of the item."""
if self._accessInformation is None:
self.__init()
return self._accessInformation
#----------------------------------------------------------------------
@property
def licenseInfo(self):
"""license information or restrictions"""
if self._licenseInfo is None:
self.__init()
return self._licenseInfo
#----------------------------------------------------------------------
@property
def culture(self):
""" item locale information """
if self._culture is None:
self.__init()
return self._culture
#----------------------------------------------------------------------
@property
def access(self):
""" indicates the level of access to the item """
if self._access is None:
self.__init()
return self._access
#----------------------------------------------------------------------
@property
def industries(self):
"""primarily applies to industries associated with the app """
if self._industries is None:
self.__init()
return self._industries
#----------------------------------------------------------------------
@property
def languages(self):
""" languages assocaited with application """
if self._languages is None:
self.__init()
return self._languages
#----------------------------------------------------------------------
@property
def largeThumbnail(self):
""" URL to thumbnail for application """
if self._largeThumbnail is None:
self.__init()
return self._largeThumbnail
#----------------------------------------------------------------------
@property
def banner(self):
""" URL to the banner used for the application """
if self._banner is None:
self.__init()
return self._banner
#----------------------------------------------------------------------
@property
def screenshots(self):
""" URL to the screenshots used by the application """
if self._screenshots is None:
self.__init()
return self._screenshots
#----------------------------------------------------------------------
@property
def listed(self):
""" if true, item is in the marketplace """
if self._listed is None:
self.__init()
return self._listed
#----------------------------------------------------------------------
@property
def ownerFolder(self):
""" ID of the folder in which the owner stored the item """
if self._ownerFolder is None:
self.__init()
return self._ownerFolder
#----------------------------------------------------------------------
@property
def size(self):
""" size of the item """
if self._size is None:
self.__init()
return self._size
#----------------------------------------------------------------------
@property
def protected(self):
""" proctects the item from deletion """
if self._protected is None:
self.__init()
return self._protected
#----------------------------------------------------------------------
@property
def commentsEnabled(self):
""" indicates if comments are allowed on the item """
if self._commentsEnabled is None:
self.__init()
return self._commentsEnabled
#----------------------------------------------------------------------
@property
def numComments(self):
"""Number of comments on the item."""
if self._numComments is None:
self.__init()
return self._numComments
#----------------------------------------------------------------------
@property
def numRatings(self):
""" number of ratings on the item """
if self._numRatings is None:
self.__init()
return self._numRatings
#----------------------------------------------------------------------
@property
def avgRating(self):
""" average rating """
if self._avgRating is None:
self.__init()
return self._avgRating
#----------------------------------------------------------------------
@property
def numViews(self):
""" numbers of views of the item """
if self._numViews is None:
self.__init()
return self._numViews
@property
def orgId(self):
""" organization ID of the item """
if self._orgId is None:
self.__init()
return self._orgId
@property
def itemControl(self):
""" item control """
if self._itemControl is None:
self.__init()
return self._itemControl
#----------------------------------------------------------------------
def addComment(self, comment):
""" adds a comment to a given item. Must be authenticated """
url = self._baseUrl + "/%s/addComment" % self._itemId
params = {
"f" : "json",
"comment" : comment
}
return self._do_post(url, params, proxy_port=self._proxy_port,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def addRating(self, rating=5.0):
"""Adds a rating to an item between 1.0 and 5.0"""
if rating > 5.0:
rating = 5.0
elif rating < 1.0:
rating = 1.0
url = self._baseUrl + "/%s/addRating" % self._itemId
params = {
"f": "json",
"rating" : "%s" % rating
}
return self._do_post(url,
params,
proxy_port=self._proxy_port,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def deleteComment(self, commentId):
""" removes a comment from an Item
Inputs:
commentId - unique id of comment to remove
"""
url = self._baseUrl + "/%s/comments/%s/delete" % (self._itemId, commentId)
params = {
"f": "json",
}
return self._do_post(url,
params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def deleteRating(self):
""""""
url = self._baseUrl + "/%s/deleteRating" % self._itemId
params = {
"f": "json",
}
return self._do_post(url,
params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def groups(self):
""" returns a list of groups the item is shared with. """
url = self._baseUrl + "/%s/groups" % self._itemId
params = {
"f": "json",
}
return self._do_get(url,
params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def itemComment(self, commentId):
""" returns details of a single comment """
url = self._baseUrl + "/%s/comments/%s" % (self._itemId, commentId)
params = {
"f": "json"
}
return self._do_get(url,
params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
@property
def itemComments(self):
""" returns all comments for a given item """
url = self._baseUrl + "/%s/comments/" % self._itemId
params = {
"f": "json"
}
return self._do_get(url,
params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def itemData(self, f=None, savePath=None):
""" returns data for an item on agol/portal
Inputs:
f - output format either zip of json
savePath - location to save the file
Output:
either JSON/text or filepath
"""
params = {
}
if f is not None and \
f.lower() in ['zip', 'json']:
params['f'] = f
url = self._baseUrl + "/%s/data" % self._itemId
if self.type in ["Shapefile", "CityEngine Web Scene", "Web Scene", "KML",
"Code Sample",
"Code Attachment", "Operations Dashboard Add In",
"CSV", "CAD Drawing", "Service Definition",
"Microsoft Word", "Microsoft Powerpoint",
"Microsoft Excel", "PDF", "Image",
"Visio Document", "iWork Keynote", "iWork Pages",
"iWork Numbers", "Map Document", "Map Package",
"Basemap Package", "Tile Package", "Project Package",
"Task File", "ArcPad Package", "Explorer Map",
"Globe Document", "Scene Document", "Published Map",
"Map Template", "Windows Mobile Package", "Pro Map",
"Layout", "Layer", "Layer Package", "File Geodatabase",
"Explorer Layer", "Geoprocessing Package", "Geoprocessing Sample",
"Locator Package", "Rule Package", "Workflow Manager Package",
"Desktop Application", "Desktop Application Template",
"Code Sample", "Desktop Add In", "Explorer Add In",
"ArcGIS Desktop Add-In", "ArcGIS Explorer Add-In",
"ArcGIS Explorer application configuration", "ArcGIS Explorer document",
]:
if savePath is None:
raise AttributeError('savePath must be provided for a item of type: %s' % self.type)
if os.path.isdir(savePath) == False:
os.makedirs(savePath)
return self._download_file(url,
save_path=savePath,
file_name=self.name,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
else:
results = self._do_get(url, params,
proxy_port=self._proxy_port,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url)
return results
#----------------------------------------------------------------------
def itemInfoFile(self):
""" """
url = self._baseUrl + "/%s/info/iteminfo.xml" % self._itemId
xml = self._download_file(
url=url,
param_dict={},
save_path=os.environ['TEMP'],
file_name="iteminfo.xml",
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port
)
text = open(xml, 'rb').read()
os.remove(xml)
return text
#----------------------------------------------------------------------
def metadata(self, exportFormat="default", output=None):
"""
exports metadat to the various supported formats
Inputs:
exportFormats - export metadata to the following formats: fgdc,
inspire, iso19139, iso19139-3.2, iso19115, and default.
default means the value will be the default ArcGIS metadata
format.
output - html or none. Html returns values as html text.
Output:
path to file or string
"""
url = self._baseUrl + "/%s" % self._itemId + "/info/metadata/metadata.xml"
allowedFormats = ["fgdc", "inspire", "iso19139",
"iso19139-3.2", "iso19115", "default"]
if not exportFormat.lower() in allowedFormats:
raise Exception("Invalid exportFormat")
params = {
"format" : exportFormat
}
if output is not None:
params['output'] = output
if output is None:
return self._download_file(url=url,
save_path=tempfile.gettempdir(),
securityHandler=self._securityHandler,
param_dict=params,
file_name="metadata.xml",
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def deleteInfo(self):
"""deletes the info information for a given item."""
#TODO: - fix delete metadata
url = self._baseUrl.replace("/items", "/users") + \
"/%s/items/%s/deleteInfo" % (self.owner, self.itemId)
params = {
"f" : "json"
}
#print self._do_post(url=url, param_dict=params,
#securityHandler=self._securityHandler,
#proxy_url=self._proxy_url,
#proxy_port=self._proxy_port)
return None
#----------------------------------------------------------------------
def updateMetadata(self, metadataFile):
"""
updates or adds the current item's metadata
metadataFile is the path to the XML file to upload.
Output:
dictionary
"""
url = self._baseUrl.replace("/items", "/users")
uc = UserContent(url=url,
username=self.owner,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
ip = ItemParameter()
ip.metadata = metadataFile
res = uc.updateItem(itemId=self.itemId,
updateItemParameters=ip)
del uc
del ip
return res
#----------------------------------------------------------------------
@property
def itemRating(self):
""" returns the item's rating """
url = self._baseUrl + "/%s/rating" % self._itemId
params = {
"f": "json"
}
return self._do_get(url,
params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def packageInfoFile(self, saveFolder):
"""
The Package Info File for the uploaded item is only available for
items that are ArcGIS packages (for example, Layer Package, Map
Package). It contains information that is used by clients (ArcGIS
Desktop, ArcGIS Explorer, and so on) to work appropriately with
downloaded packages.
Inputs:
saveFolder - location to save the package file
"""
saveFile = saveFolder + os.sep + "item.pkinfo"
if os.path.isfile(saveFile):
os.remove(saveFile)
param_dict = {}
url = self._baseUrl + "/%s/item.pkinfo" % self._itemId
xml = self._download_file(
url=url,
save_path=saveFolder,
file_name=os.path.basename(saveFile),
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
securityHandler=self._securityHandler,
param_dict=param_dict
)
return xml
#----------------------------------------------------------------------
def relatedItems(self, relationshipType, direction=None):
"""
Gets all the related items of a certain relationship type for that
item. An optional direction can be specified if the direction of
the relationship is ambiguous. Otherwise, the service will try to
infer it.
Inputs:
relationshipType - The type of relationship between the two items
direction - The direction of the relationship. Either forward
(from origin -> destination) or reverse (from
destination -> origin).
"""
url = self._baseUrl + "/%s/relatedItems" % self._itemId
params = {
"f": "json",
"relationshipType" : relationshipType
}
if direction is not None:
params['direction'] = direction
return self._do_get(url,
params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def shareItem(self,
groups="",
everyone=False,
org=False):
"""
Shares a batch of items with the specified list of groups. Users
can only share items with groups to which they belong.
This operation also allows a user to share items with everyone, in
which case the items are publicly accessible, or with everyone in
their organization.
Inputs:
groups - comma seperate list of group Ids
everyone - boolean, true means share with all
org - boolean trues means share with orginization
"""
params = {
"f": "json",
"everyone" : everyone,
"org" : org
}
if groups != "" and groups is not None:
params['groups'] = groups
url = self._baseUrl + "/%s/share" % self._itemId
return self._do_post(
url = url,
securityHandler=self._securityHandler,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def unshareItem(self, groups):
"""
Stops sharing the item with the specified list of groups
Inputs:
groups - comma seperated list of Ids
"""
params = {
"f": "json",
"groups" : groups
}
url = self._baseUrl + "/%s/unshare" % self._itemId
return self._do_post(
url = url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
########################################################################
class UserItems(BaseAGOLClass):
"""
Helps manage a given owner's content
"""
_username = None
_itemId = None
_baseUrl = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
#----------------------------------------------------------------------
def __init__(self,
itemId,
url,
securityHandler,
username=None,
proxy_url=None,
proxy_port=None):
"""Constructor"""
if username is None:
username = securityHandler.username
self._username = username
self._itemId = itemId
if url.lower().endswith("/users") == False:
self._baseUrl = url + "/users/%s" % username
else:
self._baseUrl = url
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_url = proxy_url
self._proxy_port = proxy_port
#----------------------------------------------------------------------
@property
def itemId(self):
"""gets/sets the item id"""
return self._itemId
#----------------------------------------------------------------------
@itemId.setter
def itemId(self, value):
"""gets/sets the item id"""
if self._itemId != value:
self._itemId = value
#----------------------------------------------------------------------
@property
def username(self):
"""gets/sets the username"""
return self._username
#----------------------------------------------------------------------
@username.setter
def username(self, value):
"""gets/sets the username"""
if self._username != value:
self._username = value
#----------------------------------------------------------------------
def deleteItem(self):
"""
The Delete Item operation (POST only) removes both the item and its
link from the user's folder by default. This operation is available
to the user and to the administrator of the organization to which
the user belongs.
"""
params = {
"f" : "json"
}
url = self._baseUrl + "/%s/items/%s/delete" % (self._username, self._itemId)
return self._do_post(
url = url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def moveItem(self, folder="/"):
"""
The Move Item operation moves the item (link) from the current
folder to the specified target folder. Moving an item link does not
change the URI of the item itself, which continues to be
/content/items/<itemId>.
Inputs:
folder - the destination folder ID for the item. If the item is
to be moved to the root folder, specify the value as /
(forward slash).
Example 1: folder=/ (move to root folder)
Example 2: folder=1a9ad803da604628b08c968ce602a231
(move to folder with ID 1a9ad803da604628b08c968ce602a231)
"""
params = {
"f" : "json",
"folder" : folder
}
url = self._baseUrl + "/%s/items/%s/move" % (self._username, self._itemId)
return self._do_post(
url = url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def protect(self):
"""
The Protect operation protects the items from deletion. This
operation is available to the user and to the administrator of the
organization to which the user belongs
"""
params = {
"f" : "json"
}
url = self._baseUrl + "/%s/items/%s/protect" % (self._username, self._itemId)
return self._do_post(
url = url,
securityHandler=self._securityHandler,
param_dict=params,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def reassignItem(self,
targetUsername,
targetFoldername):
"""
The Reassign Item operation allows the administrator of an
organization to reassign a member's item to another member of the
organization.
Inputs:
targetUsername - The target username of the new owner of the
item
targetFoldername - The destination folder for the item. If the
item is to be moved to the root folder,
specify the value as "/" (forward slash). If
the target folder doesn't exist, it will be
created automatically.
"""
params = {
"f" : "json",
"targetUsername" : targetUsername,
"targetFoldername" : targetFoldername
}
url = self._baseUrl + "/%s/items/%s/reassign" % (self._username, self._itemId)
return self._do_post(
url = url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def shareItem(self,
everyone=False,
org=False,
groups=""):
"""
Shares a batch of items with the specified list of groups. Users
can only share items with groups to which they belong.
This operation also allows a user to share items with everyone, in
which case the items are publicly accessible, or with everyone in
their organization.
Inputs:
groups - comma seperate list of group Ids
everyone - boolean, true means share with all
org - boolean trues means share with orginization
"""
params = {
"f": "json",
"everyone" : everyone,
"org" : org
}
if groups != "" and groups is not None:
params['groups'] = groups
url = self._baseUrl + "/%s/items/%s/share" % (self._username, self._itemId)
return self._do_post(
url = url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def unprotect(self):
"""
The Unprotect operation disables the item protection from deletion.
"""
params = {
"f": "json"
}
url = self._baseUrl + "/%s/items/%s/unprotect" % (self._username, self._itemId)
return self._do_post(
url = url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def unshareItem(self, groups):
"""
Stops sharing the item with the specified list of groups. Available
to the user and the administrator of the organization to which the
user belongs, if any.
Inputs:
groups - comma seperated list of group Ids
"""
params = {
"f": "json",
"groups": groups
}
url = self._baseUrl + "/%s/items/%s/unshare" % (self._username, self._itemId)
return self._do_post(
url = url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
#----------------------------------------------------------------------
def updateItem(self,
itemParameters,
clearEmptyFields=False,
data=None,
metadata=None,
):
"""
updates an item's properties using the ItemParameter class.
Inputs:
itemParameters - property class to update
clearEmptyFields - boolean, cleans up empty values
data - updates the file property of the service like a .sd file
"""
thumbnail = None
files = []
params = {
"f": "json",
"clearEmptyFields": clearEmptyFields
}
if isinstance(itemParameters, ItemParameter) == False:
raise AttributeError("itemParameters must be of type parameter.ItemParameter")
keys_to_delete = ['id', 'owner', 'size', 'numComments',
'numRatings', 'avgRating', 'numViews' ]
dictItem = itemParameters.value
for key in keys_to_delete:
if key in dictItem:
del dictItem[key]
for key in dictItem:
if key == "thumbnail":
thumbnail = dictItem['thumbnail']
files.append(('thumbnail', thumbnail, os.path.basename(thumbnail)))
elif key == "metadata":
files.append(('metadata', metadata, 'metadata.xml'))
else:
params[key] = dictItem[key]
if data is not None:
files.append(('file', data, os.path.basename(data)))
url = self._baseUrl + "/items/%s/update" % (self._itemId)
parsed = urlparse.urlparse(url)
res = self._post_multipart(host=parsed.hostname,
port=parsed.port,
selector=parsed.path,
fields=params,
files=files,
securityHandler=self._securityHandler,
ssl=parsed.scheme.lower() == 'https',
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return res
########################################################################
class UserContent(BaseAGOLClass):
"""
manages a given user's content
"""
_username = None
_baseUrl = None
_securityHandler = None
_proxy_url = None
_proxy_port = None
_currentFolder = None
_folders = None
_items = None
#----------------------------------------------------------------------
def __init__(self,
url,
securityHandler,
username,
proxy_url=None,
proxy_port=None,
initialize=False):
"""Constructor"""
if username is None:
username = self.__getCurrentUsername()
if username is None or username == '':
raise AttributeError("Username is required")
self._username = username
self._securityHandler = securityHandler
if not securityHandler is None:
self._referer_url = securityHandler.referer_url
self._proxy_url = proxy_url
self._proxy_port = proxy_port
if url.lower().endswith("/users") == False:
url += "/users"
self._baseUrl = url
else:
self._baseUrl = url
def __getCurrentUsername(self):
"""gets the current username"""
from . import Administration, _portals
admin = Administration(url=self._securityHandler.org_url,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
return admin.portals().portalSelf().user['username']
#----------------------------------------------------------------------
def listUserFolders(self, username):
"""
Gets a user's folders.
Inputs:
username - name of the user to query
"""
res = self.listUserContent(username=username)
if "folders" in res:
return res.get("folders")
else:
return []
#----------------------------------------------------------------------
def listUserContent(self, username=None, folderId=None):
"""
Gets the user's content in the folder (if given)
If the folderId is None, the root content will be returned as a
dictionary object.
Input:
username - name of the user to look at it's content
folderId - unique folder Id
Output:
JSON object as dictionary
"""
if username is None:
username = self._username
url = self._baseUrl + "/%s" % username
if folderId is not None:
url += "/%s" % folderId
params = {
"f" : "json"
}
return self._do_get(url=url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
@property
def username(self):
"""gets/sets the username"""
return self._username
#----------------------------------------------------------------------
@username.setter
def username(self, value):
"""gets/sets the username"""
if self._username != value:
self._username = value
#----------------------------------------------------------------------
def addByPart(self, filePath, itemId, folder=None):
"""
Allows for large file uploads to be split into 50 MB chunks and
to be sent to AGOL/Portal. This resolves an issue in Python,
where the multi-part POST runs out of memory.
To use this function, an addItem() must be run first and that
item id must be passed into this function.
Once the file is uploaded, a commit() must be performed on the
data to have all the parts merged together.
No item properties will be inherited from the initial AddItem()
call unless it is an sd file. Therefore you must call
updateItem() on the recently updated item.
Example:
fp = r"c:\temp\big.zip"
#.... AGOL init stuff
#....
usercontent = agol.content.usercontent(username)
res = usercontent.addItem(itemParameters=None,
filePath=fp,
overwrite=True,
multipart=True)
res = usercontent.addByPart(filePath=fp, itemId=res['id'])
res = usercontent.commit(itemId)
usercontent.updateItem(itemId=res['id'],
updateItemParameters=ip)
# Item added and updated.
Inputs:
filePath - location of the file on disk
itemId - empty item added to AGOL/Portal
folder - folder id
"""
params = {
"f" : "json",
'itemType' : 'file'
}
url = self._baseUrl + "/%s" % self._username
url = url.replace("http://", "https://" )
if folder is not None:
url += '/' + folder
url += '/items/%s/addPart' % itemId
parsed = urlparse.urlparse(url)
with open(filePath, 'rb') as f:
mm = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
size = 50000000
steps = int(os.fstat(f.fileno()).st_size / size)
if os.fstat(f.fileno()).st_size % size > 0:
steps += 1
for i in range(steps):
files = []
tempFile = os.path.join(os.environ['TEMP'], "split.part%s" % i)
if os.path.isfile(tempFile):
os.remove(tempFile)
with open(tempFile, 'wb') as writer:
writer.write(mm.read(size))
writer.flush()
writer.close()
del writer
files.append(('file', tempFile, os.path.basename(tempFile)))
params['partNum'] = i + 1
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
files = files,
fields=params,
port=parsed.port,
securityHandler=self._securityHandler,
ssl=parsed.scheme.lower() == 'https',
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
os.remove(tempFile)
del mm
return res
#----------------------------------------------------------------------
def addItem(self,
itemParameters,
filePath=None,
overwrite=False,
folder=None,
url=None,
text=None,
relationshipType=None,
originItemId=None,
destinationItemId=None,
serviceProxyParams=None,
metadata=None,
multipart=False):
"""
Adds an item to ArcGIS Online or Portal.
Te Add Item operation (POST only) is used to upload an item file,
submit text content, or submit the item URL to the specified user
folder depending on documented items and item types. This operation
is available only to the specified user.
Inputs:
itemParameters - required except for when multipart = True or SD
file. This contains all the information
regarding type, tags, etc...
filePath - if updating the item's content
overwrite - if the item exists, it overwrites it
folder - id of the folder to place the item
url - The URL of the item to be submitted. The URL can be a URL
to a service, a web mapping application, or any other
content available at that URL.
text - The text content for the item to be submitted.
relationshipType - The type of relationship between the two
items. See Relationship types for a complete
listing of types.
originItemId - The item ID of the origin item of the
relationship
destinationItemId - item ID of the destination item of the
relationship.
serviceProxyParams - JSON object that provides rate limiting
and referrer checks when accessing secured
services.
metadata - XML meta data file.
multipart - If true, the file is uploaded in multiple parts. Used
for files over 100 MBs in size.
"""
params = {
"f" : "json",
"token" : self._securityHandler.token,
}
res = ""
if itemParameters is not None:
params.update(itemParameters.value)
if itemParameters.overwrite is None:
params['overwrite'] = json.dumps(overwrite)
if itemParameters.overwrite != overwrite:
params['overwrite'] = overwrite
if url is not None:
params['url'] = url
if text is not None:
params['text'] = text
if relationshipType is not None:
params['relationshipType'] = relationshipType
if originItemId is not None:
params['originItemId'] = originItemId
if destinationItemId is not None:
params['destinationItemId'] = destinationItemId
if serviceProxyParams is not None:
params['serviceProxyParams'] = serviceProxyParams
url = self._baseUrl + "/%s" % self._username
url = url.replace("http://", "https://" )
if folder is not None:
url += '/' + folder
url += '/addItem'
parsed = urlparse.urlparse(url)
files = []
if multipart:
params['multipart'] = multipart
params["filename"] = os.path.basename(filePath)
params['itemType'] = 'file'
res = self._do_post(url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
if 'id' in res.keys():
itemId = res['id']
res = self.addByPart(filePath=filePath,
itemId=itemId,
folder=folder)
itemId = res['id']
# need to pass 'type' on commit
res = self.commit(itemId=itemId, folderId=folder,
wait=True, additionalParams=\
{'type' : itemParameters.type }
)
itemId = res['itemId']
if itemParameters is not None:
res = self.updateItem(itemId=itemId,
updateItemParameters=itemParameters)
return self._unicode_convert(res)
else:
if filePath is not None and os.path.isfile(filePath):
files.append(('file', filePath, os.path.basename(filePath)))
params["filename"] = os.path.basename(filePath)
elif filePath is not None and multipart:
params["filename"] = os.path.basename(filePath)
if 'thumbnail' in params:
v = params['thumbnail']
del params['thumbnail']
files.append(('thumbnail', v, os.path.basename(v)))
if metadata is not None and os.path.isfile(metadata):
files.append(('metadata', metadata, 'metadata.xml'))
if len(files) < 1:
res = self._do_post(url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
params['itemType'] = 'file'
params['async'] = False
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
files = files,
fields=params,
securityHandler=self._securityHandler,
port=parsed.port,
ssl=parsed.scheme.lower() == 'https',
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return self._unicode_convert(res)
return self._unicode_convert(res)
#----------------------------------------------------------------------
def addRelationship(self,
originItemId,
destinationItemId,
relationshipType):
"""
Adds a relationship of a certain type between two items.
Inputs:
originItemId - The item ID of the origin item of the
relationship
destinationItemId - The item ID of the destination item of the
relationship.
relationshipType - The type of relationship between the two
items. Must be defined in Relationship types.
"""
url = self._baseUrl + "/%s/addRelationship" % self._username
params = {
"originItemId" : originItemId,
"destinationItemId": destinationItemId,
"relationshipType" : relationshipType,
"f" : "json"
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def cancel(self, itemId):
"""
Cancels a multipart upload on an item. Can be called after the
multipart Add Item or Update Item call is made but cannot be called
after a Commit operation.
Inputs:
itemId - unique item id
"""
url = self._baseUrl + "/%s/%s/cancel" % (self._username, itemId)
params = {
"f" : "json",
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def commit(self, itemId, folderId=None, wait=False, additionalParams={}):
"""
Commit is called once all parts are uploaded during a multipart Add
Item or Update Item operation. The parts are combined into a file,
and the original file is overwritten during an Update Item
operation. This is an asynchronous call and returns immediately.
Status can be used to check the status of the operation until it is
completed.
Inputs:
itemId - unique item id
folderId - folder id value, optional
wait - stops the thread and waits for the commit to finish or fail.
additionalParams - optional key/value pair like
type : "File Geodatabase". This is mainly used
when multipart uploads occur.
"""
if folderId is None:
url = self._baseUrl + "/%s/items/%s/commit" % (self._username, itemId)
else:
url = self._baseUrl + "/%s/%s/items/%s/commit" % (self._username, folderId, itemId)
params = {
"f" : "json",
}
for key, value in additionalParams.iteritems():
params[key] = value
if wait:
res = self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
res = self.status(itemId=res['id'])
import time
while res['status'].lower() in ["partial", "processing"]:
time.sleep(5)
res = self.status(itemId=res['itemId'])
return res
else:
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def createFolder(self, name):
"""
Creates a folder in which items can be placed. Folders are only
visible to a user and solely used for organizing content within
that user's content space.
"""
url = self._baseUrl + "/%s/createFolder" % self._username
params = {
"f" : "json",
"title" : name
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def createService(self, createServiceParameter):
"""
The Create Service operation allows users to create a hosted
feature service. You can use the API to create an empty hosted
feaure service from feature service metadata JSON.
Inputs:
createServiceParameter - create service object
"""
url = self._baseUrl + "/%s/createService" % self._username
val = createServiceParameter.value
params = {
"f" : "json",
"outputType" : "featureService",
"createParameters" : json.dumps(val)
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def deleteFolder(self, folderId):
"""
The delete user folder operation (POST only) is available only on
the user's non-root folders. The user's root folder cannot be
deleted.
Deleting a folder also deletes all items that it contains (both the
items and the links are removed).
Inputs:
folderId - id of folder to remove
"""
url = self._baseUrl + "/%s/%s/delete" % (self._username,
folderId)
params = {
"f" : "json"
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def deleteItem(self, item_id,folder=None,force_delete=False):
""" deletes an agol item by it's ID """
url = '{}/{}'.format(self._baseUrl, self._username )
if folder:
url += '/' + folder
url += '/items/{}/delete'.format(item_id)
params = {'f': 'json'}
jres = self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
if 'error' in jres:
if force_delete:
dis_res = self.disableProtect(item_id,folder)
if 'success' in dis_res:
return self.deleteItem(item_id=item_id,folder=folder,force_delete=False)
else:
return jres
return jres
#----------------------------------------------------------------------
def disableProtect(self, item_id,folder=None):
""" Disables an items protection """
url = '{}/{}'.format(self._baseUrl, self._username )
if folder:
url += '/' + folder
url += '/items/{}/unprotect'.format(item_id)
params = {'f': 'json'}
jres = self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return jres
#----------------------------------------------------------------------
def deleteItems(self, items):
"""
Deletes a batch of items owned or administered by the calling user.
Inputs:
items - A comma separated list of items to be deleted.
"""
url = self._baseUrl + "/%s/deleteItems" % self._username
params = {
"f" : "json",
"items" : items
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def deleteRelationship(self,
originItemId,
destinationItemId,
relationshipType
):
"""
Deletes a relationship of a certain type between two items. The
current user must have created the relationship to delete it. If
the relationship does not exist, an error is thrown.
Inputs:
originItemId - The item ID of the origin item of the
relationship.
destinationItemId - item ID of the destination item of the
relationship.
relationshipType - type of relationship between the two items.
"""
url = self._baseUrl + "/%s/deleteRelationship" % self._username
params = {
"f" : "json",
"originItemId" : originItemId,
"destinationItemid" : destinationItemId,
"relationshipType" : relationshipType
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def exportItem(self, title,
itemId,
exportFormat,
exportParameters=None):
"""
Exports a service item (POST only) to the specified output format.
Available only to users with an organizational subscription.
Invokable only by the service item owner or an administrator.
Inputs:
title - name of export item
itemId - id of the item to export
exportFormat - out format. Values: Shapefile, CSV or File
Geodatabase, Feature Collection, GeoJson
exportParameters - A JSON object describing the layers to be
exported and the export parameters for each
layer.
"""
url = self._baseUrl + '/%s/export' % self._securityHandler._username
params = {
"f" : "json",
"title" : title,
"itemId" : itemId,
"exportFormat" : exportFormat,
}
if exportParameters is not None:
params["exportParameters"] = json.dumps(exportParameters)
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def moveItems(self, items, folder="/"):
"""
Moves a batch of items from their current folder to the specified
target folder.
Inputs:
items - comma-seperated list of items to move
folder - destination folder id. "/" means root
"""
url = self._baseUrl + "/%s/moveItems" % self._username
params = {
"f" : "json",
"items" : items,
"folder" : folder
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def publishItem(self,
fileType,
publishParameters=None,
itemId=None,
filePath=None,
text=None,
outputType=None,
buildIntialCache=False
):
"""
Publishes a hosted service based on an existing source item.
Publishers can create feature services as well as tiled map
services.
Feature services can be created using input files of type csv,
shapefile, serviceDefinition, featureCollection, and
fileGeodatabase.
Inputs:
fileType - Item type.
Values: serviceDefinition | shapefile | csv |
tilePackage | featureService | featureCollection |
fileGeodata
publishParameters - object describing the service to be created
as part of the publish operation. Only
required for CSV, Shapefiles, feature
collection, and file geodatabase.
itemId - The ID of the item to be published.
text - The text in the file to be published. This ONLY applies
to CSV files.
filePath - The file to be uploaded.
outputType - Only used when a feature service is published as a
tile service.
buildIntialCache - default false. Allows the user to prevent
the creation of the initial cache for tiled
services.
"""
_allowed_types = ["serviceDefinition", "shapefile", "csv",
"tilePackage", "featureService",
"featureCollection", "fileGeodatabase"]
if fileType.lower() not in [t.lower() for t in _allowed_types]:
raise AttributeError("Invalid fileType: %s" % fileType)
url = self._baseUrl
url = url + "/%s" % self._username
url = url + "/publish"
params = {
"f" : "json",
'fileType': fileType
}
if isinstance(buildIntialCache, bool):
params['buildInitialCache'] = buildIntialCache
if publishParameters is not None and \
isinstance(publishParameters, PublishCSVParameters) == False:
params['publishParameters'] = json.dumps(publishParameters.value)
elif isinstance(publishParameters, PublishCSVParameters):
params['publishParameters'] = json.dumps(publishParameters.value)
if itemId is not None:
params['itemId'] = itemId
if text is not None and fileType.lower() == 'csv':
params['text'] = text
if filePath is not None:
parsed = urlparse.urlparse(url)
files = []
files.append(('file', filePath, os.path.basename(filePath)))
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
files = files,
fields=params,
port=parsed.port,
securityHandler=self._securityHandler,
ssl=parsed.scheme.lower() == 'https',
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
res = self._unicode_convert(res)
return res
else:
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def status(self, itemId, jobId=None, jobType=None):
"""
Inquire about status when publishing an item, adding an item in
async mode, or adding with a multipart upload. "Partial" is
available for Add Item Multipart, when only a part is uploaded
and the item is not committed.
Input:
jobType The type of asynchronous job for which the status has
to be checked. Default is none, which check the
item's status. This parameter is optional unless
used with the operations listed below.
Values: publish, generateFeatures, export,
and createService
jobId - The job ID returned during publish, generateFeatures,
export, and createService calls.
"""
params = {
"f" : "json"
}
if jobType is not None:
params['jobType'] = jobType
if jobId is not None:
params["jobId"] = jobId
url = self._baseUrl + "/%s/items/%s/status" % (self._username, itemId)
return self._do_get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def shareItems(self, items, groups="", everyone=False,
org=False):
"""
Shares a batch of items with the specified list of groups. Users
can only share items with groups to which they belong. This
operation also allows a user to share items with everyone, in which
case the items are publicly accessible, or with everyone in their
organization.
Inputs:
everyone - boolean, makes item public
org - boolean, shares with all of the organization
items - comma seperated list of items to share
groups - comma sperated list of groups to share with
"""
url = self._baseUrl + "/%s/shareItems" % self._username
params = {
"f" : "json",
"items" : items,
"everyone" : everyone,
"org" : org,
"groups" : groups
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def unshareItems(self, items, groups):
"""
Unshares a batch of items with the specified list of groups
Inputs:
items - comma-seperated list of items to unshare
groups - comma-seperated list of groups to not share items with
"""
url = self._baseUrl + "/%s/unshareItems" % self._username
params = {
"f" : "json",
"items" : items,
"groups" : groups
}
return self._do_post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
#----------------------------------------------------------------------
def updateItem(self,
itemId,
updateItemParameters,
folderId=None,
clearEmptyFields=True,
filePath=None,
multipart=False,
url=None,
text=None
):
"""
The Update Item operation allows users to update item information
and their file, URL, or text depending on type. Users can use this
operation to update item information such as title, description,
tags, and so on, or use it to update an item's file, URL, or text.
This call is available to the user and the administrator of the
organization.
Inputs:
itemId - id of item to update
updateItemParameters - ItemsParameter Object
clearEmptyFields - boolean, Clears any fields that are passed in
empty
filePath - path of the file that will update the online item
multipart - If true, the file is uploaded in multiple parts. Used
for files over 100 MBs in size.
url - The URL of the item to be updated.
text - The text content for the item to be updated.
"""
files = []
res = ""
params = {
"f" : "json",
"clearEmptyFields" : clearEmptyFields
}
if updateItemParameters is not None:
params.update(updateItemParameters.value)
if "overwrite" in params.keys() and params['overwrite'] == False:
del params['overwrite']
if url is not None:
params['url'] = url
if text is not None:
params['text'] = text
if filePath is not None and \
os.path.isfile(filePath):
files.append(('file', filePath, os.path.basename(filePath)))
if 'thumbnail' in params:
v = params['thumbnail']
del params['thumbnail']
files.append(('thumbnail', v, os.path.basename(v)))
if 'largeThumbnail' in params:
v = params['largeThumbnail']
del params['largeThumbnail']
files.append(('largeThumbnail', v, os.path.basename(v)))
if 'metadata' in params:
v = params['metadata']
del params['metadata']
files.append(('metadata', v, os.path.basename(v)))
url = self._baseUrl + "/%s" % (self._username)
if folderId is not None:
url += '/' + folderId
url = url + "/items/%s/update" % itemId
if multipart and len(files) > 0:
params['multipart'] = multipart
params["filename"] = os.path.basename(filePath)
params['itemType'] = 'file'
res = self._do_post(url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
if 'id' in res:
itemId = res['id']
res = self.addByPart(filePath=filePath,
itemId=itemId,
folder=folderId)
itemId = res['id']
# need to pass 'type' on commit
res = self.commit(itemId=itemId,
folderId=folderId,
wait=True
)
itemId = res['itemId']
else:
if len(files) > 0:
parsed = urlparse.urlparse(url)
res = self._post_multipart(host=parsed.hostname,
selector=parsed.path,
files = files,
fields=params,
port=parsed.port,
ssl=parsed.scheme.lower() == 'https',
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
else:
header = {"Content-Type": "application/x-www-form-urlencoded",
"Accept": "*/*"
}
res = self._do_post(url, param_dict=params,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url,
securityHandler=self._securityHandler,
header=header)
#Original
#if len(files) > 0:
#parsed = urlparse.urlparse(url)
#res = self._post_multipart(host=parsed.hostname,
#selector=parsed.path,
#files = files,
#fields=params,
#port=parsed.port,
#ssl=parsed.scheme.lower() == 'https',
#securityHandler=self._securityHandler,
#proxy_port=self._proxy_port,
#proxy_url=self._proxy_url)
#else:
#header = {"Content-Type": "application/x-www-form-urlencoded",
#"Accept": "*/*",
#"User-Agent": "ArcREST",
#}
#res = self._do_post(url, param_dict=params,
#proxy_port=self._proxy_port,
#proxy_url=self._proxy_url,
#securityHandler=self._securityHandler,
#header=header)
res = self._unicode_convert(res)
return res
|
rishita/mxnet
|
refs/heads/master
|
example/bi-lstm-sort/rnn_model.py
|
15
|
# pylint: disable=C0111,too-many-arguments,too-many-instance-attributes,too-many-locals,redefined-outer-name,fixme
# pylint: disable=superfluous-parens, no-member, invalid-name
import sys
sys.path.insert(0, "../../python")
import numpy as np
import mxnet as mx
from lstm import LSTMState, LSTMParam, lstm, bi_lstm_inference_symbol
class BiLSTMInferenceModel(object):
def __init__(self,
seq_len,
input_size,
num_hidden,
num_embed,
num_label,
arg_params,
ctx=mx.cpu(),
dropout=0.):
self.sym = bi_lstm_inference_symbol(input_size, seq_len,
num_hidden,
num_embed,
num_label,
dropout)
batch_size = 1
init_c = [('l%d_init_c'%l, (batch_size, num_hidden)) for l in range(2)]
init_h = [('l%d_init_h'%l, (batch_size, num_hidden)) for l in range(2)]
data_shape = [("data", (batch_size, seq_len, ))]
input_shapes = dict(init_c + init_h + data_shape)
self.executor = self.sym.simple_bind(ctx=mx.cpu(), **input_shapes)
for key in self.executor.arg_dict.keys():
if key in arg_params:
arg_params[key].copyto(self.executor.arg_dict[key])
state_name = []
for i in range(2):
state_name.append("l%d_init_c" % i)
state_name.append("l%d_init_h" % i)
self.states_dict = dict(zip(state_name, self.executor.outputs[1:]))
self.input_arr = mx.nd.zeros(data_shape[0][1])
def forward(self, input_data, new_seq=False):
if new_seq == True:
for key in self.states_dict.keys():
self.executor.arg_dict[key][:] = 0.
input_data.copyto(self.executor.arg_dict["data"])
self.executor.forward()
for key in self.states_dict.keys():
self.states_dict[key].copyto(self.executor.arg_dict[key])
prob = self.executor.outputs[0].asnumpy()
return prob
|
acsone/mozaik
|
refs/heads/8.0
|
mozaik_membership/wizard/change_main_address.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of mozaik_membership, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# mozaik_membership is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# mozaik_membership is distributed in the hope that it will
# be useful but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with mozaik_membership.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class change_main_address(orm.TransientModel):
_inherit = 'change.main.address'
_columns = {
'keeping_mode': fields.integer(string='Mode'),
# 1: mandatory
# 2: user's choice
# 3: forbiden
'keep_instance': fields.boolean(
string='Keep Previous Internal Instance?'),
'old_int_instance_id': fields.many2one(
'int.instance', string='Previous Internal Instance',
ondelete='cascade'),
'new_int_instance_id': fields.many2one(
'int.instance', string='New Internal Instance',
ondelete='cascade'),
}
def default_get(self, cr, uid, fields, context):
"""
To get default values for the object.
"""
res = super(change_main_address, self).default_get(cr, uid, fields,
context=context)
context = context or {}
ids = context.get('active_ids') or context.get('active_id') and \
[context.get('active_id')] or []
res['keeping_mode'] = 1
res['keep_instance'] = False
if len(ids) == 1:
res['keeping_mode'] = 1
for partner in self.pool['res.partner'].browse(cr, uid, ids,
context=context):
if partner.int_instance_id:
res['keep_instance'] = partner.is_company
res['old_int_instance_id'] = partner.int_instance_id.id
res['keeping_mode'] = 3
return res
# view methods: onchange, button
def onchange_address_id(self, cr, uid, ids, address_id,
old_int_instance_id, context=None):
res = {}
new_int_instance_id = False
keeping_mode = 3
if not old_int_instance_id:
keeping_mode = 1
elif address_id:
adr = self.pool['address.address'].browse(cr, uid, address_id,
context=context)
if adr.address_local_zip_id:
new_int_instance_id = \
adr.address_local_zip_id.int_instance_id.id
else:
new_int_instance_id = self.pool['int.instance'].\
get_default(cr, uid, context=None)
if old_int_instance_id != new_int_instance_id:
keeping_mode = 2
res.update({'new_int_instance_id': new_int_instance_id,
'keeping_mode': keeping_mode})
return {'value': res}
# public methods
def button_change_main_coordinate(self, cr, uid, ids, context=None):
"""
Change main coordinate for a list of partners
* a new main coordinate is created for each partner
* the previsous main coordinate is invalidates or not regarding
the option ``invalidate_previous_coordinate``
:raise: ERROR if no partner selected
**Note**
When launched from the partner form the partner id is taken ``res_id``
"""
context = context or {}
wizard = self.browse(cr, uid, ids, context=context)[0]
if wizard.keeping_mode == 2 and wizard.keep_instance:
context.update({'keep_current_instance': True})
return super(change_main_address, self).button_change_main_coordinate(
cr, uid, ids, context=context)
|
derAndreas/pyGtk3Docs
|
refs/heads/develop
|
pyGtk3Docs/ast/Function.py
|
1
|
from Base import Base
class Function(Base):
""" Function nodes <function>
This class is also the base class for <method>, <virutal-method> and
<constructor> nodes, as the have all the same signature """
def parse_node(self):
""" Parse the node"""
self.parse_attributes_from_map({
'name' : 'name',
'cidentifier' : (self.NS_C, 'identifier'),
'version' : 'version',
'deprecated' : 'deprecated',
'deprecated_version' : 'deprecated-version',
'invoker' : 'invoker',
'throws' : 'throws',
'shadows' : 'shadows',
'shadowed-by' : 'shadowed-by'
})
self.parse_doc()
self.parse_parameters()
self.parse_returnvalue()
def toObjectRepr(self):
""" Collect all informations about the function or derived ast element """
params = None
doc = None
if len(self.parameters) > 0:
#paramsPlaceholder = []
params = []
for param in self.parameters:
#paramsPlaceholder.append("%s %s" % (param.getType().translate(), param.name))
params.append(param.toObjectRepr())
if hasattr(self.returnValue, 'doc'):
doc = self.returnValue.doc.toObjectRepr()
result = {
'name' : self.getName(),
'version' : self.getVersion(),
'description' : self.getDoc(True),
'returns' : self.returnValue.toObjectRepr(),
'parameters' : params
}
return result
|
charanpald/sppy
|
refs/heads/master
|
sppy/linalg/GeneralLinearOperator.py
|
1
|
class GeneralLinearOperator(object):
"""
A more general form of LinearOperator in scipy.linalg. Can be used
with many of the scipy functions.
The new operation is rmatmat which is X.T V.
"""
def __init__(self, shape, matvec, rmatvec=None, matmat=None, rmatmat=None, add=None, dtype=None):
self.shape = shape
self.matvec = matvec
self.rmatvec = rmatvec
self.matmat = matmat
self.rmatmat = rmatmat
self.add = add
self.dtype = dtype
@staticmethod
def asLinearOperator(X, parallel=False):
"""
Make a general linear operator from csarray X.
"""
if not parallel:
def matvec(v):
return X.dot(v)
def rmatvec(v):
return X.T.dot(v)
def matmat(V):
return X.dot(V)
def rmatmat(V):
return X.T.dot(V)
def add(V):
return X + V
else:
def matvec(v):
return X.pdot(v)
def rmatvec(v):
return X.T.pdot(v)
def matmat(V):
return X.pdot(V)
def rmatmat(V):
return X.T.pdot(V)
def add(V):
return X + V
return GeneralLinearOperator(X.shape, matvec, rmatvec, matmat, rmatmat, add, X.dtype)
@staticmethod
def asLinearOperatorSum(X, Y):
"""
Take two linear operators X and Y, and operate on their sum, using lazy
evaluation.
"""
if X.shape != Y.shape:
raise ValueError("Shapes of X and Y do not match: " + str(X.shape) + " " + str(Y.shape))
def matvec(v):
return X.matvec(v) + Y.matvec(v)
def rmatvec(v):
return X.rmatvec(v) + Y.rmatvec(v)
def matmat(V):
return X.matmat(V) + Y.matmat(V)
def rmatmat(V):
return X.rmatmat(V) + Y.rmatmat(V)
def add(V):
return Y.add(X.add(V))
return GeneralLinearOperator(X.shape, matvec, rmatvec, matmat, rmatmat, add, X.dtype)
|
LICEF/edx-platform
|
refs/heads/master
|
common/lib/capa/capa/safe_exec/lazymod.py
|
68
|
"""A module proxy for delayed importing of modules.
From http://barnesc.blogspot.com/2006/06/automatic-python-imports-with-autoimp.html,
in the public domain.
"""
import sys
class LazyModule(object):
"""A lazy module proxy."""
def __init__(self, modname):
self.__dict__['__name__'] = modname
self._set_mod(None)
def _set_mod(self, mod):
if mod is not None:
self.__dict__ = mod.__dict__
self.__dict__['_lazymod_mod'] = mod
def _load_mod(self):
__import__(self.__name__)
self._set_mod(sys.modules[self.__name__])
def __getattr__(self, name):
if self.__dict__['_lazymod_mod'] is None:
self._load_mod()
mod = self.__dict__['_lazymod_mod']
if hasattr(mod, name):
return getattr(mod, name)
else:
try:
subname = '%s.%s' % (self.__name__, name)
__import__(subname)
submod = getattr(mod, name)
except ImportError:
raise AttributeError("'module' object has no attribute %r" % name)
self.__dict__[name] = LazyModule(subname, submod)
return self.__dict__[name]
|
lthurlow/Network-Grapher
|
refs/heads/master
|
proj/external/numpy-1.7.0/numpy/core/tests/test_getlimits.py
|
10
|
""" Test functions for limits module.
"""
from numpy.testing import *
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
import numpy as np
##################################################
class TestPythonFloat(TestCase):
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype),id(ftype2))
class TestHalf(TestCase):
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype),id(ftype2))
class TestSingle(TestCase):
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype),id(ftype2))
class TestDouble(TestCase):
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype),id(ftype2))
class TestLongdouble(TestCase):
def test_singleton(self,level=2):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype),id(ftype2))
class TestIinfo(TestCase):
def test_basic(self):
dts = zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
[np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64])
for dt1, dt2 in dts:
assert_equal(iinfo(dt1).min, iinfo(dt2).min)
assert_equal(iinfo(dt1).max, iinfo(dt2).max)
self.assertRaises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np.sctypes['uint']
for T in types:
assert_equal(iinfo(T).max, T(-1))
class TestRepr(TestCase):
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
def test_finfo_repr(self):
expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \
" max=3.4028235e+38, dtype=float32)"
# Python 2.5 float formatting on Windows adds an extra 0 to the
# exponent. So test for both. Once 2.5 compatibility is dropped, this
# can simply use `assert_equal(repr(np.finfo(np.float32)), expected)`.
expected_win25 = "finfo(resolution=1e-006, min=-3.4028235e+038," + \
" max=3.4028235e+038, dtype=float32)"
actual = repr(np.finfo(np.float32))
if not actual == expected:
if not actual == expected_win25:
msg = build_err_msg([actual, desired], verbose=True)
raise AssertionError(msg)
def test_instances():
iinfo(10)
finfo(3.0)
if __name__ == "__main__":
run_module_suite()
|
HSAnet/glimpse_client
|
refs/heads/develop
|
3rdparty/breakpad/src/third_party/protobuf/protobuf/python/mox.py
|
603
|
#!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
self._unexpected_method = unexpected_method
self._expected = expected
def __str__(self):
return "Unexpected method call: %s. Expecting: %s" % \
(self._unexpected_method, self._expected)
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.InstanceType, types.ModuleType,
types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
"""
new_mock = MockAnything()
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
if type(attr_to_replace) in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything()
self.stubs.Set(obj, attr_name, stub)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self):
""" """
self._Reset()
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name):
"""Create a new mock method call and return it.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
for method in dir(class_to_mock):
if callable(getattr(class_to_mock, method)):
self._known_methods.add(method)
else:
self._known_vars.add(method)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(name)
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
setitem = self._class_to_mock.__dict__.get('__setitem__', None)
# Verify the class supports item assignment.
if setitem is None:
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
getitem = self._class_to_mock.__dict__.get('__getitem__', None)
# Verify the class supports item assignment.
if getitem is None:
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable
callable = self._class_to_mock.__dict__.get('__call__', None)
if callable is None:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
mock_method = self._CreateMockMethod('__call__')
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
method_name: str
call_queue: list or deque
replay_mode: bool
"""
self._name = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
expected_method._side_effects(*params, **named_params)
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
return desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % self._key
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (self._key, self._value)
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_called = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_called.add(mock_method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
# NOTE(psycho): We can't use the simple set difference here because we want
# to match different parameters which are considered the same e.g. IsA(str)
# and some string. This solution is O(n^2) but n should be small.
tmp = self._methods.copy()
for called in self._methods_called:
for expected in tmp:
if called == expected:
tmp.remove(expected)
if not tmp:
return True
break
return False
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
cleanup_mox = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox - any mox tests will
want this. Also automatically unsets any stubs and verifies that all mock
methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
self.mox = Mox()
|
jeremysanders/taskproc
|
refs/heads/master
|
taskproc/common.py
|
1
|
# Copyright 2016 Jeremy Sanders
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
from __future__ import print_function
class TaskProcError(RuntimeError):
"""Exception raised if error encountered in this module.
A subclass of `RuntimeError`.
"""
pass
|
west2554/fofix
|
refs/heads/master
|
src/midi/RawInstreamFile.py
|
7
|
# -*- coding: ISO-8859-1 -*-
# standard library imports
from types import StringType
from struct import unpack
# custom import
from DataTypeConverters import readBew, readVar, varLen
class RawInstreamFile:
"""
It parses and reads data from an input file. It takes care of big
endianess, and keeps track of the cursor position. The midi parser
only reads from this object. Never directly from the file.
"""
def __init__(self, infile=''):
"""
If 'file' is a string we assume it is a path and read from
that file.
If it is a file descriptor we read from the file, but we don't
close it.
Midi files are usually pretty small, so it should be safe to
copy them into memory.
"""
if infile:
if type(infile) in [str, unicode]:
infile = open(infile, 'rb')
self.data = infile.read()
infile.close()
else:
# don't close the f
self.data = infile.read()
else:
self.data = ''
# start at beginning ;-)
self.cursor = 0
# setting up data manually
def setData(self, data=''):
"Sets the data from a string."
self.data = data
# cursor operations
def setCursor(self, position=0):
"Sets the absolute position if the cursor"
self.cursor = position
def getCursor(self):
"Returns the value of the cursor"
return self.cursor
def moveCursor(self, relative_position=0):
"Moves the cursor to a new relative position"
self.cursor += relative_position
# native data reading functions
def nextSlice(self, length, move_cursor=1):
"Reads the next text slice from the raw data, with length"
c = self.cursor
slc = self.data[c:c+length]
if move_cursor:
self.moveCursor(length)
return slc
def readBew(self, n_bytes=1, move_cursor=1):
"""
Reads n bytes of date from the current cursor position.
Moves cursor if move_cursor is true
"""
return readBew(self.nextSlice(n_bytes, move_cursor))
def readVarLen(self):
"""
Reads a variable length value from the current cursor position.
Moves cursor if move_cursor is true
"""
MAX_VARLEN = 4 # Max value varlen can be
var = readVar(self.nextSlice(MAX_VARLEN, 0))
# only move cursor the actual bytes in varlen
self.moveCursor(varLen(var))
return var
if __name__ == '__main__':
test_file = 'test/midifiles/minimal.mid'
fis = RawInstreamFile(test_file)
print fis.nextSlice(len(fis.data))
test_file = 'test/midifiles/cubase-minimal.mid'
cubase_minimal = open(test_file, 'rb')
fis2 = RawInstreamFile(cubase_minimal)
print fis2.nextSlice(len(fis2.data))
cubase_minimal.close()
|
mindnervestech/mnrp
|
refs/heads/master
|
addons/crm/report/__init__.py
|
313
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_lead_report
import crm_opportunity_report
import crm_phonecall_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
monikagrabowska/osf.io
|
refs/heads/develop
|
api/preprints/__init__.py
|
12133432
| |
saurabh6790/medsyn-app
|
refs/heads/master
|
manufacturing/report/production_orders_in_progress/__init__.py
|
12133432
| |
suyashphadtare/gd-erp
|
refs/heads/develop
|
erpnext/accounts/report/payment_period_based_on_invoice_date/__init__.py
|
12133432
| |
beneckart/future-robotics
|
refs/heads/master
|
Marquee/layouts/make_grid.py
|
1
|
pitch = 5.0/25.0
rows = 20
cols = 25
xoffset = 2.5
yoffset = 2.5*rows/cols
pixels = []
xpos, ypos = 0, 0
for i, r in enumerate(range(rows)):
for j, c in enumerate(range(cols)):
fwd = i % 2 == 0
if fwd:
xpos = c*pitch
ypos = r*pitch
else:
xpos = (cols-c-1)*pitch
ypos = r*pitch
pixels.append({"point":[xpos-xoffset, 0, ypos-yoffset]})
f = open('marquee.json','w')
f.write(str(pixels))
f.close()
|
RuudBurger/CouchPotatoServer
|
refs/heads/master
|
libs/tornado/ioloop.py
|
65
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""An I/O event loop for non-blocking sockets.
Typical applications will use a single `IOLoop` object, in the
`IOLoop.instance` singleton. The `IOLoop.start` method should usually
be called at the end of the ``main()`` function. Atypical applications may
use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest`
case.
In addition to I/O events, the `IOLoop` can also schedule time-based events.
`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import errno
import functools
import heapq
import itertools
import logging
import numbers
import os
import select
import sys
import threading
import time
import traceback
from tornado.concurrent import TracebackFuture, is_future
from tornado.log import app_log, gen_log
from tornado import stack_context
from tornado.util import Configurable, errno_from_exception, timedelta_to_seconds
try:
import signal
except ImportError:
signal = None
try:
import thread # py2
except ImportError:
import _thread as thread # py3
from tornado.platform.auto import set_close_exec, Waker
_POLL_TIMEOUT = 3600.0
class TimeoutError(Exception):
pass
class IOLoop(Configurable):
"""A level-triggered I/O loop.
We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they
are available, or else we fall back on select(). If you are
implementing a system that needs to handle thousands of
simultaneous connections, you should use a system that supports
either ``epoll`` or ``kqueue``.
Example usage for a simple TCP server::
import errno
import functools
import ioloop
import socket
def connection_ready(sock, fd, events):
while True:
try:
connection, address = sock.accept()
except socket.error, e:
if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN):
raise
return
connection.setblocking(0)
handle_connection(connection, address)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setblocking(0)
sock.bind(("", port))
sock.listen(128)
io_loop = ioloop.IOLoop.instance()
callback = functools.partial(connection_ready, sock)
io_loop.add_handler(sock.fileno(), callback, io_loop.READ)
io_loop.start()
"""
# Constants from the epoll module
_EPOLLIN = 0x001
_EPOLLPRI = 0x002
_EPOLLOUT = 0x004
_EPOLLERR = 0x008
_EPOLLHUP = 0x010
_EPOLLRDHUP = 0x2000
_EPOLLONESHOT = (1 << 30)
_EPOLLET = (1 << 31)
# Our events map exactly to the epoll events
NONE = 0
READ = _EPOLLIN
WRITE = _EPOLLOUT
ERROR = _EPOLLERR | _EPOLLHUP
# Global lock for creating global IOLoop instance
_instance_lock = threading.Lock()
_current = threading.local()
@staticmethod
def instance():
"""Returns a global `IOLoop` instance.
Most applications have a single, global `IOLoop` running on the
main thread. Use this method to get this instance from
another thread. To get the current thread's `IOLoop`, use `current()`.
"""
if not hasattr(IOLoop, "_instance"):
with IOLoop._instance_lock:
if not hasattr(IOLoop, "_instance"):
# New instance after double check
IOLoop._instance = IOLoop()
return IOLoop._instance
@staticmethod
def initialized():
"""Returns true if the singleton instance has been created."""
return hasattr(IOLoop, "_instance")
def install(self):
"""Installs this `IOLoop` object as the singleton instance.
This is normally not necessary as `instance()` will create
an `IOLoop` on demand, but you may want to call `install` to use
a custom subclass of `IOLoop`.
"""
assert not IOLoop.initialized()
IOLoop._instance = self
@staticmethod
def clear_instance():
"""Clear the global `IOLoop` instance.
.. versionadded:: 4.0
"""
if hasattr(IOLoop, "_instance"):
del IOLoop._instance
@staticmethod
def current(instance=True):
"""Returns the current thread's `IOLoop`.
If an `IOLoop` is currently running or has been marked as
current by `make_current`, returns that instance. If there is
no current `IOLoop`, returns `IOLoop.instance()` (i.e. the
main thread's `IOLoop`, creating one if necessary) if ``instance``
is true.
In general you should use `IOLoop.current` as the default when
constructing an asynchronous object, and use `IOLoop.instance`
when you mean to communicate to the main thread from a different
one.
.. versionchanged:: 4.1
Added ``instance`` argument to control the
"""
current = getattr(IOLoop._current, "instance", None)
if current is None and instance:
return IOLoop.instance()
return current
def make_current(self):
"""Makes this the `IOLoop` for the current thread.
An `IOLoop` automatically becomes current for its thread
when it is started, but it is sometimes useful to call
`make_current` explicitly before starting the `IOLoop`,
so that code run at startup time can find the right
instance.
.. versionchanged:: 4.1
An `IOLoop` created while there is no current `IOLoop`
will automatically become current.
"""
IOLoop._current.instance = self
@staticmethod
def clear_current():
IOLoop._current.instance = None
@classmethod
def configurable_base(cls):
return IOLoop
@classmethod
def configurable_default(cls):
if hasattr(select, "epoll"):
from tornado.platform.epoll import EPollIOLoop
return EPollIOLoop
if hasattr(select, "kqueue"):
# Python 2.6+ on BSD or Mac
from tornado.platform.kqueue import KQueueIOLoop
return KQueueIOLoop
from tornado.platform.select import SelectIOLoop
return SelectIOLoop
def initialize(self):
if IOLoop.current(instance=False) is None:
self.make_current()
def close(self, all_fds=False):
"""Closes the `IOLoop`, freeing any resources used.
If ``all_fds`` is true, all file descriptors registered on the
IOLoop will be closed (not just the ones created by the
`IOLoop` itself).
Many applications will only use a single `IOLoop` that runs for the
entire lifetime of the process. In that case closing the `IOLoop`
is not necessary since everything will be cleaned up when the
process exits. `IOLoop.close` is provided mainly for scenarios
such as unit tests, which create and destroy a large number of
``IOLoops``.
An `IOLoop` must be completely stopped before it can be closed. This
means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must
be allowed to return before attempting to call `IOLoop.close()`.
Therefore the call to `close` will usually appear just after
the call to `start` rather than near the call to `stop`.
.. versionchanged:: 3.1
If the `IOLoop` implementation supports non-integer objects
for "file descriptors", those objects will have their
``close`` method when ``all_fds`` is true.
"""
raise NotImplementedError()
def add_handler(self, fd, handler, events):
"""Registers the given handler to receive the given events for ``fd``.
The ``fd`` argument may either be an integer file descriptor or
a file-like object with a ``fileno()`` method (and optionally a
``close()`` method, which may be called when the `IOLoop` is shut
down).
The ``events`` argument is a bitwise or of the constants
``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``.
When an event occurs, ``handler(fd, events)`` will be run.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def update_handler(self, fd, events):
"""Changes the events we listen for ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def remove_handler(self, fd):
"""Stop listening for events on ``fd``.
.. versionchanged:: 4.0
Added the ability to pass file-like objects in addition to
raw file descriptors.
"""
raise NotImplementedError()
def set_blocking_signal_threshold(self, seconds, action):
"""Sends a signal if the `IOLoop` is blocked for more than
``s`` seconds.
Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy
platform.
The action parameter is a Python signal handler. Read the
documentation for the `signal` module for more information.
If ``action`` is None, the process will be killed if it is
blocked for too long.
"""
raise NotImplementedError()
def set_blocking_log_threshold(self, seconds):
"""Logs a stack trace if the `IOLoop` is blocked for more than
``s`` seconds.
Equivalent to ``set_blocking_signal_threshold(seconds,
self.log_stack)``
"""
self.set_blocking_signal_threshold(seconds, self.log_stack)
def log_stack(self, signal, frame):
"""Signal handler to log the stack trace of the current thread.
For use with `set_blocking_signal_threshold`.
"""
gen_log.warning('IOLoop blocked for %f seconds in\n%s',
self._blocking_signal_threshold,
''.join(traceback.format_stack(frame)))
def start(self):
"""Starts the I/O loop.
The loop will run until one of the callbacks calls `stop()`, which
will make the loop stop after the current event iteration completes.
"""
raise NotImplementedError()
def _setup_logging(self):
"""The IOLoop catches and logs exceptions, so it's
important that log output be visible. However, python's
default behavior for non-root loggers (prior to python
3.2) is to print an unhelpful "no handlers could be
found" message rather than the actual log entry, so we
must explicitly configure logging if we've made it this
far without anything.
This method should be called from start() in subclasses.
"""
if not any([logging.getLogger().handlers,
logging.getLogger('tornado').handlers,
logging.getLogger('tornado.application').handlers]):
logging.basicConfig()
def stop(self):
"""Stop the I/O loop.
If the event loop is not currently running, the next call to `start()`
will return immediately.
To use asynchronous methods from otherwise-synchronous code (such as
unit tests), you can start and stop the event loop like this::
ioloop = IOLoop()
async_method(ioloop=ioloop, callback=ioloop.stop)
ioloop.start()
``ioloop.start()`` will return after ``async_method`` has run
its callback, whether that callback was invoked before or
after ``ioloop.start``.
Note that even after `stop` has been called, the `IOLoop` is not
completely stopped until `IOLoop.start` has also returned.
Some work that was scheduled before the call to `stop` may still
be run before the `IOLoop` shuts down.
"""
raise NotImplementedError()
def run_sync(self, func, timeout=None):
"""Starts the `IOLoop`, runs the given function, and stops the loop.
If the function returns a `.Future`, the `IOLoop` will run
until the future is resolved. If it raises an exception, the
`IOLoop` will stop and the exception will be re-raised to the
caller.
The keyword-only argument ``timeout`` may be used to set
a maximum duration for the function. If the timeout expires,
a `TimeoutError` is raised.
This method is useful in conjunction with `tornado.gen.coroutine`
to allow asynchronous calls in a ``main()`` function::
@gen.coroutine
def main():
# do stuff...
if __name__ == '__main__':
IOLoop.instance().run_sync(main)
"""
future_cell = [None]
def run():
try:
result = func()
except Exception:
future_cell[0] = TracebackFuture()
future_cell[0].set_exc_info(sys.exc_info())
else:
if is_future(result):
future_cell[0] = result
else:
future_cell[0] = TracebackFuture()
future_cell[0].set_result(result)
self.add_future(future_cell[0], lambda future: self.stop())
self.add_callback(run)
if timeout is not None:
timeout_handle = self.add_timeout(self.time() + timeout, self.stop)
self.start()
if timeout is not None:
self.remove_timeout(timeout_handle)
if not future_cell[0].done():
raise TimeoutError('Operation timed out after %s seconds' % timeout)
return future_cell[0].result()
def time(self):
"""Returns the current time according to the `IOLoop`'s clock.
The return value is a floating-point number relative to an
unspecified time in the past.
By default, the `IOLoop`'s time function is `time.time`. However,
it may be configured to use e.g. `time.monotonic` instead.
Calls to `add_timeout` that pass a number instead of a
`datetime.timedelta` should use this function to compute the
appropriate time, so they can work no matter what time function
is chosen.
"""
return time.time()
def add_timeout(self, deadline, callback, *args, **kwargs):
"""Runs the ``callback`` at the time ``deadline`` from the I/O loop.
Returns an opaque handle that may be passed to
`remove_timeout` to cancel.
``deadline`` may be a number denoting a time (on the same
scale as `IOLoop.time`, normally `time.time`), or a
`datetime.timedelta` object for a deadline relative to the
current time. Since Tornado 4.0, `call_later` is a more
convenient alternative for the relative case since it does not
require a timedelta object.
Note that it is not safe to call `add_timeout` from other threads.
Instead, you must use `add_callback` to transfer control to the
`IOLoop`'s thread, and then call `add_timeout` from there.
Subclasses of IOLoop must implement either `add_timeout` or
`call_at`; the default implementations of each will call
the other. `call_at` is usually easier to implement, but
subclasses that wish to maintain compatibility with Tornado
versions prior to 4.0 must use `add_timeout` instead.
.. versionchanged:: 4.0
Now passes through ``*args`` and ``**kwargs`` to the callback.
"""
if isinstance(deadline, numbers.Real):
return self.call_at(deadline, callback, *args, **kwargs)
elif isinstance(deadline, datetime.timedelta):
return self.call_at(self.time() + timedelta_to_seconds(deadline),
callback, *args, **kwargs)
else:
raise TypeError("Unsupported deadline %r" % deadline)
def call_later(self, delay, callback, *args, **kwargs):
"""Runs the ``callback`` after ``delay`` seconds have passed.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.call_at(self.time() + delay, callback, *args, **kwargs)
def call_at(self, when, callback, *args, **kwargs):
"""Runs the ``callback`` at the absolute time designated by ``when``.
``when`` must be a number using the same reference point as
`IOLoop.time`.
Returns an opaque handle that may be passed to `remove_timeout`
to cancel. Note that unlike the `asyncio` method of the same
name, the returned object does not have a ``cancel()`` method.
See `add_timeout` for comments on thread-safety and subclassing.
.. versionadded:: 4.0
"""
return self.add_timeout(when, callback, *args, **kwargs)
def remove_timeout(self, timeout):
"""Cancels a pending timeout.
The argument is a handle as returned by `add_timeout`. It is
safe to call `remove_timeout` even if the callback has already
been run.
"""
raise NotImplementedError()
def add_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
It is safe to call this method from any thread at any time,
except from a signal handler. Note that this is the **only**
method in `IOLoop` that makes this thread-safety guarantee; all
other interaction with the `IOLoop` must be done from that
`IOLoop`'s thread. `add_callback()` may be used to transfer
control from other threads to the `IOLoop`'s thread.
To add a callback from a signal handler, see
`add_callback_from_signal`.
"""
raise NotImplementedError()
def add_callback_from_signal(self, callback, *args, **kwargs):
"""Calls the given callback on the next I/O loop iteration.
Safe for use from a Python signal handler; should not be used
otherwise.
Callbacks added with this method will be run without any
`.stack_context`, to avoid picking up the context of the function
that was interrupted by the signal.
"""
raise NotImplementedError()
def spawn_callback(self, callback, *args, **kwargs):
"""Calls the given callback on the next IOLoop iteration.
Unlike all other callback-related methods on IOLoop,
``spawn_callback`` does not associate the callback with its caller's
``stack_context``, so it is suitable for fire-and-forget callbacks
that should not interfere with the caller.
.. versionadded:: 4.0
"""
with stack_context.NullContext():
self.add_callback(callback, *args, **kwargs)
def add_future(self, future, callback):
"""Schedules a callback on the ``IOLoop`` when the given
`.Future` is finished.
The callback is invoked with one argument, the
`.Future`.
"""
assert is_future(future)
callback = stack_context.wrap(callback)
future.add_done_callback(
lambda future: self.add_callback(callback, future))
def _run_callback(self, callback):
"""Runs a callback with error handling.
For use in subclasses.
"""
try:
ret = callback()
if ret is not None and is_future(ret):
# Functions that return Futures typically swallow all
# exceptions and store them in the Future. If a Future
# makes it out to the IOLoop, ensure its exception (if any)
# gets logged too.
self.add_future(ret, lambda f: f.result())
except Exception:
self.handle_callback_exception(callback)
def handle_callback_exception(self, callback):
"""This method is called whenever a callback run by the `IOLoop`
throws an exception.
By default simply logs the exception as an error. Subclasses
may override this method to customize reporting of exceptions.
The exception itself is not passed explicitly, but is available
in `sys.exc_info`.
"""
app_log.error("Exception in callback %r", callback, exc_info=True)
def split_fd(self, fd):
"""Returns an (fd, obj) pair from an ``fd`` parameter.
We accept both raw file descriptors and file-like objects as
input to `add_handler` and related methods. When a file-like
object is passed, we must retain the object itself so we can
close it correctly when the `IOLoop` shuts down, but the
poller interfaces favor file descriptors (they will accept
file-like objects and call ``fileno()`` for you, but they
always return the descriptor itself).
This method is provided for use by `IOLoop` subclasses and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
return fd.fileno(), fd
except AttributeError:
return fd, fd
def close_fd(self, fd):
"""Utility method to close an ``fd``.
If ``fd`` is a file-like object, we close it directly; otherwise
we use `os.close`.
This method is provided for use by `IOLoop` subclasses (in
implementations of ``IOLoop.close(all_fds=True)`` and should
not generally be used by application code.
.. versionadded:: 4.0
"""
try:
try:
fd.close()
except AttributeError:
os.close(fd)
except OSError:
pass
class PollIOLoop(IOLoop):
"""Base class for IOLoops built around a select-like function.
For concrete implementations, see `tornado.platform.epoll.EPollIOLoop`
(Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or
`tornado.platform.select.SelectIOLoop` (all platforms).
"""
def initialize(self, impl, time_func=None):
super(PollIOLoop, self).initialize()
self._impl = impl
if hasattr(self._impl, 'fileno'):
set_close_exec(self._impl.fileno())
self.time_func = time_func or time.time
self._handlers = {}
self._events = {}
self._callbacks = []
self._callback_lock = threading.Lock()
self._timeouts = []
self._cancellations = 0
self._running = False
self._stopped = False
self._closing = False
self._thread_ident = None
self._blocking_signal_threshold = None
self._timeout_counter = itertools.count()
# Create a pipe that we send bogus data to when we want to wake
# the I/O loop when it is idle
self._waker = Waker()
self.add_handler(self._waker.fileno(),
lambda fd, events: self._waker.consume(),
self.READ)
def close(self, all_fds=False):
with self._callback_lock:
self._closing = True
self.remove_handler(self._waker.fileno())
if all_fds:
for fd, handler in self._handlers.values():
self.close_fd(fd)
self._waker.close()
self._impl.close()
self._callbacks = None
self._timeouts = None
def add_handler(self, fd, handler, events):
fd, obj = self.split_fd(fd)
self._handlers[fd] = (obj, stack_context.wrap(handler))
self._impl.register(fd, events | self.ERROR)
def update_handler(self, fd, events):
fd, obj = self.split_fd(fd)
self._impl.modify(fd, events | self.ERROR)
def remove_handler(self, fd):
fd, obj = self.split_fd(fd)
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except Exception:
gen_log.debug("Error deleting fd from IOLoop", exc_info=True)
def set_blocking_signal_threshold(self, seconds, action):
if not hasattr(signal, "setitimer"):
gen_log.error("set_blocking_signal_threshold requires a signal module "
"with the setitimer method")
return
self._blocking_signal_threshold = seconds
if seconds is not None:
signal.signal(signal.SIGALRM,
action if action is not None else signal.SIG_DFL)
def start(self):
if self._running:
raise RuntimeError("IOLoop is already running")
self._setup_logging()
if self._stopped:
self._stopped = False
return
old_current = getattr(IOLoop._current, "instance", None)
IOLoop._current.instance = self
self._thread_ident = thread.get_ident()
self._running = True
# signal.set_wakeup_fd closes a race condition in event loops:
# a signal may arrive at the beginning of select/poll/etc
# before it goes into its interruptible sleep, so the signal
# will be consumed without waking the select. The solution is
# for the (C, synchronous) signal handler to write to a pipe,
# which will then be seen by select.
#
# In python's signal handling semantics, this only matters on the
# main thread (fortunately, set_wakeup_fd only works on the main
# thread and will raise a ValueError otherwise).
#
# If someone has already set a wakeup fd, we don't want to
# disturb it. This is an issue for twisted, which does its
# SIGCHLD processing in response to its own wakeup fd being
# written to. As long as the wakeup fd is registered on the IOLoop,
# the loop will still wake up and everything should work.
old_wakeup_fd = None
if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix':
# requires python 2.6+, unix. set_wakeup_fd exists but crashes
# the python process on windows.
try:
old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno())
if old_wakeup_fd != -1:
# Already set, restore previous value. This is a little racy,
# but there's no clean get_wakeup_fd and in real use the
# IOLoop is just started once at the beginning.
signal.set_wakeup_fd(old_wakeup_fd)
old_wakeup_fd = None
except ValueError: # non-main thread
pass
try:
while True:
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
# Add any timeouts that have come due to the callback list.
# Do not run anything until we have determined which ones
# are ready, so timeouts that call add_timeout cannot
# schedule anything in this iteration.
due_timeouts = []
if self._timeouts:
now = self.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# The timeout was cancelled. Note that the
# cancellation check is repeated below for timeouts
# that are cancelled by another timeout or callback.
heapq.heappop(self._timeouts)
self._cancellations -= 1
elif self._timeouts[0].deadline <= now:
due_timeouts.append(heapq.heappop(self._timeouts))
else:
break
if (self._cancellations > 512
and self._cancellations > (len(self._timeouts) >> 1)):
# Clean up the timeout queue when it gets large and it's
# more than half cancellations.
self._cancellations = 0
self._timeouts = [x for x in self._timeouts
if x.callback is not None]
heapq.heapify(self._timeouts)
for callback in callbacks:
self._run_callback(callback)
for timeout in due_timeouts:
if timeout.callback is not None:
self._run_callback(timeout.callback)
# Closures may be holding on to a lot of memory, so allow
# them to be freed before we go into our poll wait.
callbacks = callback = due_timeouts = timeout = None
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
elif self._timeouts:
# If there are any timeouts, schedule the first one.
# Use self.time() instead of 'now' to account for time
# spent running callbacks.
poll_timeout = self._timeouts[0].deadline - self.time()
poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT))
else:
# No timeouts and no callbacks, so use the default.
poll_timeout = _POLL_TIMEOUT
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if errno_from_exception(e) == errno.EINTR:
continue
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
fd_obj, handler_func = self._handlers[fd]
handler_func(fd_obj, events)
except (OSError, IOError) as e:
if errno_from_exception(e) == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
self.handle_callback_exception(self._handlers.get(fd))
except Exception:
self.handle_callback_exception(self._handlers.get(fd))
fd_obj = handler_func = None
finally:
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0)
IOLoop._current.instance = old_current
if old_wakeup_fd is not None:
signal.set_wakeup_fd(old_wakeup_fd)
def stop(self):
self._running = False
self._stopped = True
self._waker.wake()
def time(self):
return self.time_func()
def call_at(self, deadline, callback, *args, **kwargs):
timeout = _Timeout(
deadline,
functools.partial(stack_context.wrap(callback), *args, **kwargs),
self)
heapq.heappush(self._timeouts, timeout)
return timeout
def remove_timeout(self, timeout):
# Removing from a heap is complicated, so just leave the defunct
# timeout object in the queue (see discussion in
# http://docs.python.org/library/heapq.html).
# If this turns out to be a problem, we could add a garbage
# collection pass whenever there are too many dead timeouts.
timeout.callback = None
self._cancellations += 1
def add_callback(self, callback, *args, **kwargs):
with self._callback_lock:
if self._closing:
raise RuntimeError("IOLoop is closing")
list_empty = not self._callbacks
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
if list_empty and thread.get_ident() != self._thread_ident:
# If we're in the IOLoop's thread, we know it's not currently
# polling. If we're not, and we added the first callback to an
# empty list, we may need to wake it up (it may wake up on its
# own, but an occasional extra wake is harmless). Waking
# up a polling IOLoop is relatively expensive, so we try to
# avoid it when we can.
self._waker.wake()
def add_callback_from_signal(self, callback, *args, **kwargs):
with stack_context.NullContext():
if thread.get_ident() != self._thread_ident:
# if the signal is handled on another thread, we can add
# it normally (modulo the NullContext)
self.add_callback(callback, *args, **kwargs)
else:
# If we're on the IOLoop's thread, we cannot use
# the regular add_callback because it may deadlock on
# _callback_lock. Blindly insert into self._callbacks.
# This is safe because the GIL makes list.append atomic.
# One subtlety is that if the signal interrupted the
# _callback_lock block in IOLoop.start, we may modify
# either the old or new version of self._callbacks,
# but either way will work.
self._callbacks.append(functools.partial(
stack_context.wrap(callback), *args, **kwargs))
class _Timeout(object):
"""An IOLoop timeout, a UNIX timestamp and a callback"""
# Reduce memory overhead when there are lots of pending callbacks
__slots__ = ['deadline', 'callback', 'tiebreaker']
def __init__(self, deadline, callback, io_loop):
if not isinstance(deadline, numbers.Real):
raise TypeError("Unsupported deadline %r" % deadline)
self.deadline = deadline
self.callback = callback
self.tiebreaker = next(io_loop._timeout_counter)
# Comparison methods to sort by deadline, with object id as a tiebreaker
# to guarantee a consistent ordering. The heapq module uses __le__
# in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons
# use __lt__).
def __lt__(self, other):
return ((self.deadline, self.tiebreaker) <
(other.deadline, other.tiebreaker))
def __le__(self, other):
return ((self.deadline, self.tiebreaker) <=
(other.deadline, other.tiebreaker))
class PeriodicCallback(object):
"""Schedules the given callback to be called periodically.
The callback is called every ``callback_time`` milliseconds.
`start` must be called after the `PeriodicCallback` is created.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, callback, callback_time, io_loop=None):
self.callback = callback
if callback_time <= 0:
raise ValueError("Periodic callback must have a positive callback_time")
self.callback_time = callback_time
self.io_loop = io_loop or IOLoop.current()
self._running = False
self._timeout = None
def start(self):
"""Starts the timer."""
self._running = True
self._next_timeout = self.io_loop.time()
self._schedule_next()
def stop(self):
"""Stops the timer."""
self._running = False
if self._timeout is not None:
self.io_loop.remove_timeout(self._timeout)
self._timeout = None
def is_running(self):
"""Return True if this `.PeriodicCallback` has been started.
.. versionadded:: 4.1
"""
return self._running
def _run(self):
if not self._running:
return
try:
return self.callback()
except Exception:
self.io_loop.handle_callback_exception(self.callback)
finally:
self._schedule_next()
def _schedule_next(self):
if self._running:
current_time = self.io_loop.time()
while self._next_timeout <= current_time:
self._next_timeout += self.callback_time / 1000.0
self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
|
capchu/TextRPGOnline
|
refs/heads/master
|
rpgonline/env/lib/python2.7/site-packages/pip/req.py
|
74
|
from email.parser import FeedParser
import os
import imp
import pkg_resources
import re
import sys
import shutil
import tempfile
import textwrap
import zipfile
from distutils.util import change_root
from pip.locations import (bin_py, running_under_virtualenv,PIP_DELETE_MARKER_FILENAME,
write_delete_marker_file)
from pip.exceptions import (InstallationError, UninstallationError,
BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError)
from pip.vcs import vcs
from pip.log import logger
from pip.util import (display_path, rmtree, ask, ask_path_exists, backup_dir,
is_installable_dir, is_local, dist_is_local,
dist_in_usersite, dist_in_site_packages, renames,
normalize_path, egg_link_path, make_path_relative,
call_subprocess, is_prerelease, normalize_name)
from pip.backwardcompat import (urlparse, urllib, uses_pycache,
ConfigParser, string_types, HTTPError,
get_python_version, b)
from pip.index import Link
from pip.locations import build_prefix
from pip.download import (get_file_content, is_url, url_to_path,
path_to_url, is_archive_file,
unpack_vcs_link, is_vcs_url, is_file_url,
unpack_file_url, unpack_http_url)
import pip.wheel
from pip.wheel import move_wheel_files
class InstallRequirement(object):
def __init__(self, req, comes_from, source_dir=None, editable=False,
url=None, as_egg=False, update=True, prereleases=None,
from_bundle=False):
self.extras = ()
if isinstance(req, string_types):
req = pkg_resources.Requirement.parse(req)
self.extras = req.extras
self.req = req
self.comes_from = comes_from
self.source_dir = source_dir
self.editable = editable
self.url = url
self.as_egg = as_egg
self._egg_info_path = None
# This holds the pkg_resources.Distribution object if this requirement
# is already available:
self.satisfied_by = None
# This hold the pkg_resources.Distribution object if this requirement
# conflicts with another installed distribution:
self.conflicts_with = None
self._temp_build_dir = None
self._is_bundle = None
# True if the editable should be updated:
self.update = update
# Set to True after successful installation
self.install_succeeded = None
# UninstallPathSet of uninstalled distribution (for possible rollback)
self.uninstalled = None
self.use_user_site = False
self.target_dir = None
self.from_bundle = from_bundle
# True if pre-releases are acceptable
if prereleases:
self.prereleases = True
elif self.req is not None:
self.prereleases = any([is_prerelease(x[1]) and x[0] != "!=" for x in self.req.specs])
else:
self.prereleases = False
@classmethod
def from_editable(cls, editable_req, comes_from=None, default_vcs=None):
name, url, extras_override = parse_editable(editable_req, default_vcs)
if url.startswith('file:'):
source_dir = url_to_path(url)
else:
source_dir = None
res = cls(name, comes_from, source_dir=source_dir, editable=True, url=url, prereleases=True)
if extras_override is not None:
res.extras = extras_override
return res
@classmethod
def from_line(cls, name, comes_from=None, prereleases=None):
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
"""
url = None
name = name.strip()
req = None
path = os.path.normpath(os.path.abspath(name))
link = None
if is_url(name):
link = Link(name)
elif os.path.isdir(path) and (os.path.sep in name or name.startswith('.')):
if not is_installable_dir(path):
raise InstallationError("Directory %r is not installable. File 'setup.py' not found." % name)
link = Link(path_to_url(name))
elif is_archive_file(path):
if not os.path.isfile(path):
logger.warn('Requirement %r looks like a filename, but the file does not exist', name)
link = Link(path_to_url(name))
# If the line has an egg= definition, but isn't editable, pull the requirement out.
# Otherwise, assume the name is the req for the non URL/path/archive case.
if link and req is None:
url = link.url_without_fragment
req = link.egg_fragment #when fragment is None, this will become an 'unnamed' requirement
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', url):
url = path_to_url(os.path.normpath(os.path.abspath(link.path)))
else:
req = name
return cls(req, comes_from, url=url, prereleases=prereleases)
def __str__(self):
if self.req:
s = str(self.req)
if self.url:
s += ' from %s' % self.url
else:
s = self.url
if self.satisfied_by is not None:
s += ' in %s' % display_path(self.satisfied_by.location)
if self.comes_from:
if isinstance(self.comes_from, string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += ' (from %s)' % comes_from
return s
def from_path(self):
if self.req is None:
return None
s = str(self.req)
if self.comes_from:
if isinstance(self.comes_from, string_types):
comes_from = self.comes_from
else:
comes_from = self.comes_from.from_path()
if comes_from:
s += '->' + comes_from
return s
def build_location(self, build_dir, unpack=True):
if self._temp_build_dir is not None:
return self._temp_build_dir
if self.req is None:
self._temp_build_dir = tempfile.mkdtemp('-build', 'pip-')
self._ideal_build_dir = build_dir
return self._temp_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
# FIXME: Is there a better place to create the build_dir? (hg and bzr need this)
if not os.path.exists(build_dir):
_make_build_dir(build_dir)
return os.path.join(build_dir, name)
def correct_build_location(self):
"""If the build location was a temporary directory, this will move it
to a new more permanent location"""
if self.source_dir is not None:
return
assert self.req is not None
assert self._temp_build_dir
old_location = self._temp_build_dir
new_build_dir = self._ideal_build_dir
del self._ideal_build_dir
if self.editable:
name = self.name.lower()
else:
name = self.name
new_location = os.path.join(new_build_dir, name)
if not os.path.exists(new_build_dir):
logger.debug('Creating directory %s' % new_build_dir)
_make_build_dir(new_build_dir)
if os.path.exists(new_location):
raise InstallationError(
'A package already exists in %s; please remove it to continue'
% display_path(new_location))
logger.debug('Moving package %s from %s to new location %s'
% (self, display_path(old_location), display_path(new_location)))
shutil.move(old_location, new_location)
self._temp_build_dir = new_location
self.source_dir = new_location
self._egg_info_path = None
@property
def name(self):
if self.req is None:
return None
return self.req.project_name
@property
def url_name(self):
if self.req is None:
return None
return urllib.quote(self.req.unsafe_name)
@property
def setup_py(self):
return os.path.join(self.source_dir, 'setup.py')
def run_egg_info(self, force_root_egg_info=False):
assert self.source_dir
if self.name:
logger.notify('Running setup.py egg_info for package %s' % self.name)
else:
logger.notify('Running setup.py egg_info for package from %s' % self.url)
logger.indent += 2
try:
# if it's distribute>=0.7, it won't contain an importable
# setuptools, and having an egg-info dir blocks the ability of
# setup.py to find setuptools plugins, so delete the egg-info dir if
# no setuptools. it will get recreated by the run of egg_info
# NOTE: this self.name check only works when installing from a specifier
# (not archive path/urls)
# TODO: take this out later
if self.name == 'distribute' and not os.path.isdir(os.path.join(self.source_dir, 'setuptools')):
rmtree(os.path.join(self.source_dir, 'distribute.egg-info'))
script = self._run_setup_py
script = script.replace('__SETUP_PY__', repr(self.setup_py))
script = script.replace('__PKG_NAME__', repr(self.name))
egg_info_cmd = [sys.executable, '-c', script, 'egg_info']
# We can't put the .egg-info files at the root, because then the source code will be mistaken
# for an installed egg, causing problems
if self.editable or force_root_egg_info:
egg_base_option = []
else:
egg_info_dir = os.path.join(self.source_dir, 'pip-egg-info')
if not os.path.exists(egg_info_dir):
os.makedirs(egg_info_dir)
egg_base_option = ['--egg-base', 'pip-egg-info']
call_subprocess(
egg_info_cmd + egg_base_option,
cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False,
command_level=logger.VERBOSE_DEBUG,
command_desc='python setup.py egg_info')
finally:
logger.indent -= 2
if not self.req:
self.req = pkg_resources.Requirement.parse(
"%(Name)s==%(Version)s" % self.pkg_info())
self.correct_build_location()
## FIXME: This is a lame hack, entirely for PasteScript which has
## a self-provided entry point that causes this awkwardness
_run_setup_py = """
__file__ = __SETUP_PY__
from setuptools.command import egg_info
import pkg_resources
import os
def replacement_run(self):
self.mkpath(self.egg_info)
installer = self.distribution.fetch_build_egg
for ep in pkg_resources.iter_entry_points('egg_info.writers'):
# require=False is the change we're making:
writer = ep.load(require=False)
if writer:
writer(self, ep.name, os.path.join(self.egg_info,ep.name))
self.find_sources()
egg_info.egg_info.run = replacement_run
exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))
"""
def egg_info_data(self, filename):
if self.satisfied_by is not None:
if not self.satisfied_by.has_metadata(filename):
return None
return self.satisfied_by.get_metadata(filename)
assert self.source_dir
filename = self.egg_info_path(filename)
if not os.path.exists(filename):
return None
fp = open(filename, 'r')
data = fp.read()
fp.close()
return data
def egg_info_path(self, filename):
if self._egg_info_path is None:
if self.editable:
base = self.source_dir
else:
base = os.path.join(self.source_dir, 'pip-egg-info')
filenames = os.listdir(base)
if self.editable:
filenames = []
for root, dirs, files in os.walk(base):
for dir in vcs.dirnames:
if dir in dirs:
dirs.remove(dir)
# Iterate over a copy of ``dirs``, since mutating
# a list while iterating over it can cause trouble.
# (See https://github.com/pypa/pip/pull/462.)
for dir in list(dirs):
# Don't search in anything that looks like a virtualenv environment
if (os.path.exists(os.path.join(root, dir, 'bin', 'python'))
or os.path.exists(os.path.join(root, dir, 'Scripts', 'Python.exe'))):
dirs.remove(dir)
# Also don't search through tests
if dir == 'test' or dir == 'tests':
dirs.remove(dir)
filenames.extend([os.path.join(root, dir)
for dir in dirs])
filenames = [f for f in filenames if f.endswith('.egg-info')]
if not filenames:
raise InstallationError('No files/directories in %s (from %s)' % (base, filename))
assert filenames, "No files/directories in %s (from %s)" % (base, filename)
# if we have more than one match, we pick the toplevel one. This can
# easily be the case if there is a dist folder which contains an
# extracted tarball for testing purposes.
if len(filenames) > 1:
filenames.sort(key=lambda x: x.count(os.path.sep) +
(os.path.altsep and
x.count(os.path.altsep) or 0))
self._egg_info_path = os.path.join(base, filenames[0])
return os.path.join(self._egg_info_path, filename)
def egg_info_lines(self, filename):
data = self.egg_info_data(filename)
if not data:
return []
result = []
for line in data.splitlines():
line = line.strip()
if not line or line.startswith('#'):
continue
result.append(line)
return result
def pkg_info(self):
p = FeedParser()
data = self.egg_info_data('PKG-INFO')
if not data:
logger.warn('No PKG-INFO file found in %s' % display_path(self.egg_info_path('PKG-INFO')))
p.feed(data or '')
return p.close()
@property
def dependency_links(self):
return self.egg_info_lines('dependency_links.txt')
_requirements_section_re = re.compile(r'\[(.*?)\]')
def requirements(self, extras=()):
in_extra = None
for line in self.egg_info_lines('requires.txt'):
match = self._requirements_section_re.match(line.lower())
if match:
in_extra = match.group(1)
continue
if in_extra and in_extra not in extras:
logger.debug('skipping extra %s' % in_extra)
# Skip requirement for an extra we aren't requiring
continue
yield line
@property
def absolute_versions(self):
for qualifier, version in self.req.specs:
if qualifier == '==':
yield version
@property
def installed_version(self):
return self.pkg_info()['version']
def assert_source_matches_version(self):
assert self.source_dir
version = self.installed_version
if version not in self.req:
logger.warn('Requested %s, but installing version %s' % (self, self.installed_version))
else:
logger.debug('Source in %s has version %s, which satisfies requirement %s'
% (display_path(self.source_dir), version, self))
def update_editable(self, obtain=True):
if not self.url:
logger.info("Cannot update repository at %s; repository location is unknown" % self.source_dir)
return
assert self.editable
assert self.source_dir
if self.url.startswith('file:'):
# Static paths don't get updated
return
assert '+' in self.url, "bad url: %r" % self.url
if not self.update:
return
vc_type, url = self.url.split('+', 1)
backend = vcs.get_backend(vc_type)
if backend:
vcs_backend = backend(self.url)
if obtain:
vcs_backend.obtain(self.source_dir)
else:
vcs_backend.export(self.source_dir)
else:
assert 0, (
'Unexpected version control type (in %s): %s'
% (self.url, vc_type))
def uninstall(self, auto_confirm=False):
"""
Uninstall the distribution currently satisfying this requirement.
Prompts before removing or modifying files unless
``auto_confirm`` is True.
Refuses to delete or modify files outside of ``sys.prefix`` -
thus uninstallation within a virtual environment can only
modify that virtual environment, even if the virtualenv is
linked to global site-packages.
"""
if not self.check_if_exists():
raise UninstallationError("Cannot uninstall requirement %s, not installed" % (self.name,))
dist = self.satisfied_by or self.conflicts_with
paths_to_remove = UninstallPathSet(dist)
pip_egg_info_path = os.path.join(dist.location,
dist.egg_name()) + '.egg-info'
dist_info_path = os.path.join(dist.location,
'-'.join(dist.egg_name().split('-')[:2])
) + '.dist-info'
# workaround for http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=618367
debian_egg_info_path = pip_egg_info_path.replace(
'-py%s' % pkg_resources.PY_MAJOR, '')
easy_install_egg = dist.egg_name() + '.egg'
develop_egg_link = egg_link_path(dist)
pip_egg_info_exists = os.path.exists(pip_egg_info_path)
debian_egg_info_exists = os.path.exists(debian_egg_info_path)
dist_info_exists = os.path.exists(dist_info_path)
if pip_egg_info_exists or debian_egg_info_exists:
# package installed by pip
if pip_egg_info_exists:
egg_info_path = pip_egg_info_path
else:
egg_info_path = debian_egg_info_path
paths_to_remove.add(egg_info_path)
if dist.has_metadata('installed-files.txt'):
for installed_file in dist.get_metadata('installed-files.txt').splitlines():
path = os.path.normpath(os.path.join(egg_info_path, installed_file))
paths_to_remove.add(path)
#FIXME: need a test for this elif block
#occurs with --single-version-externally-managed/--record outside of pip
elif dist.has_metadata('top_level.txt'):
if dist.has_metadata('namespace_packages.txt'):
namespaces = dist.get_metadata('namespace_packages.txt')
else:
namespaces = []
for top_level_pkg in [p for p
in dist.get_metadata('top_level.txt').splitlines()
if p and p not in namespaces]:
path = os.path.join(dist.location, top_level_pkg)
paths_to_remove.add(path)
paths_to_remove.add(path + '.py')
paths_to_remove.add(path + '.pyc')
elif dist.location.endswith(easy_install_egg):
# package installed by easy_install
paths_to_remove.add(dist.location)
easy_install_pth = os.path.join(os.path.dirname(dist.location),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg)
elif develop_egg_link:
# develop egg
fh = open(develop_egg_link, 'r')
link_pointer = os.path.normcase(fh.readline().strip())
fh.close()
assert (link_pointer == dist.location), 'Egg-link %s does not match installed location of %s (at %s)' % (link_pointer, self.name, dist.location)
paths_to_remove.add(develop_egg_link)
easy_install_pth = os.path.join(os.path.dirname(develop_egg_link),
'easy-install.pth')
paths_to_remove.add_pth(easy_install_pth, dist.location)
elif dist_info_exists:
for path in pip.wheel.uninstallation_paths(dist):
paths_to_remove.add(path)
# find distutils scripts= scripts
if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'):
for script in dist.metadata_listdir('scripts'):
paths_to_remove.add(os.path.join(bin_py, script))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, script) + '.bat')
# find console_scripts
if dist.has_metadata('entry_points.txt'):
config = ConfigParser.SafeConfigParser()
config.readfp(FakeFile(dist.get_metadata_lines('entry_points.txt')))
if config.has_section('console_scripts'):
for name, value in config.items('console_scripts'):
paths_to_remove.add(os.path.join(bin_py, name))
if sys.platform == 'win32':
paths_to_remove.add(os.path.join(bin_py, name) + '.exe')
paths_to_remove.add(os.path.join(bin_py, name) + '.exe.manifest')
paths_to_remove.add(os.path.join(bin_py, name) + '-script.py')
paths_to_remove.remove(auto_confirm)
self.uninstalled = paths_to_remove
def rollback_uninstall(self):
if self.uninstalled:
self.uninstalled.rollback()
else:
logger.error("Can't rollback %s, nothing uninstalled."
% (self.project_name,))
def commit_uninstall(self):
if self.uninstalled:
self.uninstalled.commit()
else:
logger.error("Can't commit %s, nothing uninstalled."
% (self.project_name,))
def archive(self, build_dir):
assert self.source_dir
create_archive = True
archive_name = '%s-%s.zip' % (self.name, self.installed_version)
archive_path = os.path.join(build_dir, archive_name)
if os.path.exists(archive_path):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(archive_path), ('i', 'w', 'b'))
if response == 'i':
create_archive = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(archive_path))
os.remove(archive_path)
elif response == 'b':
dest_file = backup_dir(archive_path)
logger.warn('Backing up %s to %s'
% (display_path(archive_path), display_path(dest_file)))
shutil.move(archive_path, dest_file)
if create_archive:
zip = zipfile.ZipFile(archive_path, 'w', zipfile.ZIP_DEFLATED)
dir = os.path.normcase(os.path.abspath(self.source_dir))
for dirpath, dirnames, filenames in os.walk(dir):
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zipdir = zipfile.ZipInfo(self.name + '/' + name + '/')
zipdir.external_attr = 0x1ED << 16 # 0o755
zip.writestr(zipdir, '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, self.name + '/' + name)
zip.close()
logger.indent -= 2
logger.notify('Saved %s' % display_path(archive_path))
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix+os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix))
name = name[len(prefix)+1:]
name = name.replace(os.path.sep, '/')
return name
def install(self, install_options, global_options=(), root=None):
if self.editable:
self.install_editable(install_options, global_options)
return
if self.is_wheel:
self.move_wheel_files(self.source_dir)
self.install_succeeded = True
return
temp_location = tempfile.mkdtemp('-record', 'pip-')
record_filename = os.path.join(temp_location, 'install-record.txt')
try:
install_args = [sys.executable]
install_args.append('-c')
install_args.append(
"import setuptools;__file__=%r;"\
"exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py)
install_args += list(global_options) + ['install','--record', record_filename]
if not self.as_egg:
install_args += ['--single-version-externally-managed']
if root is not None:
install_args += ['--root', root]
if running_under_virtualenv():
## FIXME: I'm not sure if this is a reasonable location; probably not
## but we can't put it in the default location, as that is a virtualenv symlink that isn't writable
install_args += ['--install-headers',
os.path.join(sys.prefix, 'include', 'site',
'python' + get_python_version())]
logger.notify('Running setup.py install for %s' % self.name)
logger.indent += 2
try:
call_subprocess(install_args + install_options,
cwd=self.source_dir, filter_stdout=self._filter_install, show_stdout=False)
finally:
logger.indent -= 2
if not os.path.exists(record_filename):
logger.notify('Record file %s not found' % record_filename)
return
self.install_succeeded = True
if self.as_egg:
# there's no --always-unzip option we can pass to install command
# so we unable to save the installed-files.txt
return
def prepend_root(path):
if root is None or not os.path.isabs(path):
return path
else:
return change_root(root, path)
f = open(record_filename)
for line in f:
line = line.strip()
if line.endswith('.egg-info'):
egg_info_dir = prepend_root(line)
break
else:
logger.warn('Could not find .egg-info directory in install record for %s' % self)
## FIXME: put the record somewhere
## FIXME: should this be an error?
return
f.close()
new_lines = []
f = open(record_filename)
for line in f:
filename = line.strip()
if os.path.isdir(filename):
filename += os.path.sep
new_lines.append(make_path_relative(prepend_root(filename), egg_info_dir))
f.close()
f = open(os.path.join(egg_info_dir, 'installed-files.txt'), 'w')
f.write('\n'.join(new_lines)+'\n')
f.close()
finally:
if os.path.exists(record_filename):
os.remove(record_filename)
os.rmdir(temp_location)
def remove_temporary_source(self):
"""Remove the source files from this requirement, if they are marked
for deletion"""
if self.is_bundle or os.path.exists(self.delete_marker_filename):
logger.info('Removing source in %s' % self.source_dir)
if self.source_dir:
rmtree(self.source_dir)
self.source_dir = None
if self._temp_build_dir and os.path.exists(self._temp_build_dir):
rmtree(self._temp_build_dir)
self._temp_build_dir = None
def install_editable(self, install_options, global_options=()):
logger.notify('Running setup.py develop for %s' % self.name)
logger.indent += 2
try:
## FIXME: should we do --install-headers here too?
call_subprocess(
[sys.executable, '-c',
"import setuptools; __file__=%r; exec(compile(open(__file__).read().replace('\\r\\n', '\\n'), __file__, 'exec'))" % self.setup_py]
+ list(global_options) + ['develop', '--no-deps'] + list(install_options),
cwd=self.source_dir, filter_stdout=self._filter_install,
show_stdout=False)
finally:
logger.indent -= 2
self.install_succeeded = True
def _filter_install(self, line):
level = logger.NOTIFY
for regex in [r'^running .*', r'^writing .*', '^creating .*', '^[Cc]opying .*',
r'^reading .*', r"^removing .*\.egg-info' \(and everything under it\)$",
r'^byte-compiling ',
# Not sure what this warning is, but it seems harmless:
r"^warning: manifest_maker: standard file '-c' not found$"]:
if re.search(regex, line.strip()):
level = logger.INFO
break
return (level, line)
def check_if_exists(self):
"""Find an installed distribution that satisfies or conflicts
with this requirement, and set self.satisfied_by or
self.conflicts_with appropriately."""
if self.req is None:
return False
try:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# if we've already set distribute as a conflict to setuptools
# then this check has already run before. we don't want it to
# run again, and return False, since it would block the uninstall
# TODO: remove this later
if (self.req.project_name == 'setuptools'
and self.conflicts_with
and self.conflicts_with.project_name == 'distribute'):
return True
else:
self.satisfied_by = pkg_resources.get_distribution(self.req)
except pkg_resources.DistributionNotFound:
return False
except pkg_resources.VersionConflict:
existing_dist = pkg_resources.get_distribution(self.req.project_name)
if self.use_user_site:
if dist_in_usersite(existing_dist):
self.conflicts_with = existing_dist
elif running_under_virtualenv() and dist_in_site_packages(existing_dist):
raise InstallationError("Will not install to the user site because it will lack sys.path precedence to %s in %s"
%(existing_dist.project_name, existing_dist.location))
else:
self.conflicts_with = existing_dist
return True
@property
def is_wheel(self):
return self.url and '.whl' in self.url
@property
def is_bundle(self):
if self._is_bundle is not None:
return self._is_bundle
base = self._temp_build_dir
if not base:
## FIXME: this doesn't seem right:
return False
self._is_bundle = (os.path.exists(os.path.join(base, 'pip-manifest.txt'))
or os.path.exists(os.path.join(base, 'pyinstall-manifest.txt')))
return self._is_bundle
def bundle_requirements(self):
for dest_dir in self._bundle_editable_dirs:
package = os.path.basename(dest_dir)
## FIXME: svnism:
for vcs_backend in vcs.backends:
url = rev = None
vcs_bundle_file = os.path.join(
dest_dir, vcs_backend.bundle_file)
if os.path.exists(vcs_bundle_file):
vc_type = vcs_backend.name
fp = open(vcs_bundle_file)
content = fp.read()
fp.close()
url, rev = vcs_backend().parse_vcs_bundle_file(content)
break
if url:
url = '%s+%s@%s' % (vc_type, url, rev)
else:
url = None
yield InstallRequirement(
package, self, editable=True, url=url,
update=False, source_dir=dest_dir, from_bundle=True)
for dest_dir in self._bundle_build_dirs:
package = os.path.basename(dest_dir)
yield InstallRequirement(package, self,source_dir=dest_dir, from_bundle=True)
def move_bundle_files(self, dest_build_dir, dest_src_dir):
base = self._temp_build_dir
assert base
src_dir = os.path.join(base, 'src')
build_dir = os.path.join(base, 'build')
bundle_build_dirs = []
bundle_editable_dirs = []
for source_dir, dest_dir, dir_collection in [
(src_dir, dest_src_dir, bundle_editable_dirs),
(build_dir, dest_build_dir, bundle_build_dirs)]:
if os.path.exists(source_dir):
for dirname in os.listdir(source_dir):
dest = os.path.join(dest_dir, dirname)
dir_collection.append(dest)
if os.path.exists(dest):
logger.warn('The directory %s (containing package %s) already exists; cannot move source from bundle %s'
% (dest, dirname, self))
continue
if not os.path.exists(dest_dir):
logger.info('Creating directory %s' % dest_dir)
os.makedirs(dest_dir)
shutil.move(os.path.join(source_dir, dirname), dest)
if not os.listdir(source_dir):
os.rmdir(source_dir)
self._temp_build_dir = None
self._bundle_build_dirs = bundle_build_dirs
self._bundle_editable_dirs = bundle_editable_dirs
def move_wheel_files(self, wheeldir):
move_wheel_files(self.name, self.req, wheeldir, user=self.use_user_site, home=self.target_dir)
@property
def delete_marker_filename(self):
assert self.source_dir
return os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, download_cache=None,
upgrade=False, ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False, use_user_site=False):
self.build_dir = build_dir
self.src_dir = src_dir
self.download_dir = download_dir
self.download_cache = download_cache
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir #set from --target option
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def add_requirement(self, install_req):
name = install_req.name
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
if not name:
#url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
else:
if self.has_requirement(name):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, self.get_requirement(name), name))
self.requirements[name] = install_req
## FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
def has_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements or name in self.requirement_aliases:
return True
return False
@property
def has_requirements(self):
return list(self.requirements.values()) or self.unnamed_requirements
@property
def has_editables(self):
if any(req.editable for req in self.requirements.values()):
return True
if any(req.editable for req in self.unnamed_requirements):
return True
return False
@property
def is_download(self):
if self.download_dir:
self.download_dir = os.path.expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.fatal('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def locate_files(self):
## FIXME: duplicates code from prepare_files; relevant code should
## probably be factored out into a separate method
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install_needed = True
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
#don't uninstall conflict if user install and and conflict is not user install
if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install_needed = False
if req_to_install.satisfied_by:
logger.notify('Requirement already satisfied '
'(use --upgrade to upgrade): %s'
% req_to_install)
if req_to_install.editable:
if req_to_install.source_dir is None:
req_to_install.source_dir = req_to_install.build_location(self.src_dir)
elif install_needed:
req_to_install.source_dir = req_to_install.build_location(self.build_dir, not self.is_download)
if req_to_install.source_dir is not None and not os.path.isdir(req_to_install.source_dir):
raise InstallationError('Could not install requirement %s '
'because source folder %s does not exist '
'(perhaps --no-download was used without first running '
'an equivalent install with --no-install?)'
% (req_to_install, req_to_install.source_dir))
def prepare_files(self, finder, force_root_egg_info=False, bundle=False):
"""Prepare process. Create temp directories, download and/or unpack files."""
unnamed = list(self.unnamed_requirements)
reqs = list(self.requirements.values())
while reqs or unnamed:
if unnamed:
req_to_install = unnamed.pop(0)
else:
req_to_install = reqs.pop(0)
install = True
best_installed = False
not_found = None
if not self.ignore_installed and not req_to_install.editable:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade:
if not self.force_reinstall and not req_to_install.url:
try:
url = finder.find_requirement(
req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
best_installed = True
install = False
except DistributionNotFound:
not_found = sys.exc_info()[1]
else:
# Avoid the need to call find_requirement again
req_to_install.url = url.url
if not best_installed:
#don't uninstall conflict if user install and conflict is not user install
if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if req_to_install.satisfied_by:
if best_installed:
logger.notify('Requirement already up-to-date: %s'
% req_to_install)
else:
logger.notify('Requirement already satisfied '
'(use --upgrade to upgrade): %s'
% req_to_install)
if req_to_install.editable:
logger.notify('Obtaining %s' % req_to_install)
elif install:
if req_to_install.url and req_to_install.url.lower().startswith('file:'):
logger.notify('Unpacking %s' % display_path(url_to_path(req_to_install.url)))
else:
logger.notify('Downloading/unpacking %s' % req_to_install)
logger.indent += 2
try:
is_bundle = False
is_wheel = False
if req_to_install.editable:
if req_to_install.source_dir is None:
location = req_to_install.build_location(self.src_dir)
req_to_install.source_dir = location
else:
location = req_to_install.source_dir
if not os.path.exists(self.build_dir):
_make_build_dir(self.build_dir)
req_to_install.update_editable(not self.is_download)
if self.is_download:
req_to_install.run_egg_info()
req_to_install.archive(self.download_dir)
else:
req_to_install.run_egg_info()
elif install:
##@@ if filesystem packages are not marked
##editable in a req, a non deterministic error
##occurs when the script attempts to unpack the
##build directory
# NB: This call can result in the creation of a temporary build directory
location = req_to_install.build_location(self.build_dir, not self.is_download)
unpack = True
url = None
# In the case where the req comes from a bundle, we should
# assume a build dir exists and move on
if req_to_install.from_bundle:
pass
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
elif os.path.exists(os.path.join(location, 'setup.py')):
msg = textwrap.dedent("""
pip can't proceed with requirement '%s' due to a pre-existing build directory.
location: %s
This is likely due to a previous installation that failed.
pip is being responsible and not assuming it can delete this.
Please delete it and try again.
""" % (req_to_install, location))
e = PreviousBuildDirError(msg)
logger.fatal(msg)
raise e
else:
## FIXME: this won't upgrade when there's an existing package unpacked in `location`
if req_to_install.url is None:
if not_found:
raise not_found
url = finder.find_requirement(req_to_install, upgrade=self.upgrade)
else:
## FIXME: should req_to_install.url already be a link?
url = Link(req_to_install.url)
assert url
if url:
try:
self.unpack_url(url, location, self.is_download)
except HTTPError:
e = sys.exc_info()[1]
logger.fatal('Could not install requirement %s because of error %s'
% (req_to_install, e))
raise InstallationError(
'Could not install requirement %s because of HTTP error %s for URL %s'
% (req_to_install, e, url))
else:
unpack = False
if unpack:
is_bundle = req_to_install.is_bundle
is_wheel = url and url.filename.endswith('.whl')
if is_bundle:
req_to_install.move_bundle_files(self.build_dir, self.src_dir)
for subreq in req_to_install.bundle_requirements():
reqs.append(subreq)
self.add_requirement(subreq)
elif self.is_download:
req_to_install.source_dir = location
if not is_wheel:
# FIXME: see https://github.com/pypa/pip/issues/1112
req_to_install.run_egg_info()
if url and url.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
elif is_wheel:
req_to_install.source_dir = location
req_to_install.url = url.url
dist = list(pkg_resources.find_distributions(location))[0]
if not req_to_install.req:
req_to_install.req = dist.as_requirement()
self.add_requirement(req_to_install)
if not self.ignore_dependencies:
for subreq in dist.requires(req_to_install.extras):
if self.has_requirement(subreq.project_name):
continue
subreq = InstallRequirement(str(subreq),
req_to_install)
reqs.append(subreq)
self.add_requirement(subreq)
else:
req_to_install.source_dir = location
req_to_install.run_egg_info()
if force_root_egg_info:
# We need to run this to make sure that the .egg-info/
# directory is created for packing in the bundle
req_to_install.run_egg_info(force_root_egg_info=True)
req_to_install.assert_source_matches_version()
#@@ sketchy way of identifying packages not grabbed from an index
if bundle and req_to_install.url:
self.copy_to_build_dir(req_to_install)
install = False
# req_to_install.req is only avail after unpack for URL pkgs
# repeat check_if_exists to uninstall-on-upgrade (#14)
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
#don't uninstall conflict if user install and and conflict is not user install
if not (self.use_user_site and not dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
install = False
if not (is_bundle or is_wheel):
## FIXME: shouldn't be globally added:
finder.add_dependency_links(req_to_install.dependency_links)
if (req_to_install.extras):
logger.notify("Installing extra requirements: %r" % ','.join(req_to_install.extras))
if not self.ignore_dependencies:
for req in req_to_install.requirements(req_to_install.extras):
try:
name = pkg_resources.Requirement.parse(req).project_name
except ValueError:
e = sys.exc_info()[1]
## FIXME: proper warning
logger.error('Invalid requirement: %r (%s) in requirement %s' % (req, e, req_to_install))
continue
if self.has_requirement(name):
## FIXME: check for conflict
continue
subreq = InstallRequirement(req, req_to_install)
reqs.append(subreq)
self.add_requirement(subreq)
if not self.has_requirement(req_to_install.name):
#'unnamed' requirements will get added here
self.add_requirement(req_to_install)
if self.is_download or req_to_install._temp_build_dir is not None:
self.reqs_to_cleanup.append(req_to_install)
else:
self.reqs_to_cleanup.append(req_to_install)
if install:
self.successfully_downloaded.append(req_to_install)
if bundle and (req_to_install.url and req_to_install.url.startswith('file:///')):
self.copy_to_build_dir(req_to_install)
finally:
logger.indent -= 2
def cleanup_files(self, bundle=False):
"""Clean up files, remove builds."""
logger.notify('Cleaning up...')
logger.indent += 2
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
remove_dir = []
if self._pip_has_created_build_dir():
remove_dir.append(self.build_dir)
# The source dir of a bundle can always be removed.
# FIXME: not if it pre-existed the bundle!
if bundle:
remove_dir.append(self.src_dir)
for dir in remove_dir:
if os.path.exists(dir):
logger.info('Removing temporary dir %s...' % dir)
rmtree(dir)
logger.indent -= 2
def _pip_has_created_build_dir(self):
return (self.build_dir == build_prefix and
os.path.exists(os.path.join(self.build_dir, PIP_DELETE_MARKER_FILENAME)))
def copy_to_build_dir(self, req_to_install):
target_dir = req_to_install.editable and self.src_dir or self.build_dir
logger.info("Copying %s to %s" % (req_to_install.name, target_dir))
dest = os.path.join(target_dir, req_to_install.name)
shutil.copytree(req_to_install.source_dir, dest)
call_subprocess(["python", "%s/setup.py" % dest, "clean"], cwd=dest,
command_desc='python setup.py clean')
def unpack_url(self, link, location, only_download=False):
if only_download:
loc = self.download_dir
else:
loc = location
if is_vcs_url(link):
return unpack_vcs_link(link, loc, only_download)
# a local file:// index could have links with hashes
elif not link.hash and is_file_url(link):
return unpack_file_url(link, loc)
else:
if self.download_cache:
self.download_cache = os.path.expanduser(self.download_cache)
retval = unpack_http_url(link, location, self.download_cache, self.download_dir)
if only_download:
write_delete_marker_file(location)
return retval
def install(self, install_options, global_options=(), *args, **kwargs):
"""Install everything in this set (after having downloaded and unpacked the packages)"""
to_install = [r for r in self.requirements.values()
if not r.satisfied_by]
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# move the distribute-0.7.X wrapper to the end because it does not
# install a setuptools package. by moving it to the end, we ensure it's
# setuptools dependency is handled first, which will provide the
# setuptools package
# TODO: take this out later
distribute_req = pkg_resources.Requirement.parse("distribute>=0.7")
for req in to_install:
if req.name == 'distribute' and req.installed_version in distribute_req:
to_install.remove(req)
to_install.append(req)
if to_install:
logger.notify('Installing collected packages: %s' % ', '.join([req.name for req in to_install]))
logger.indent += 2
try:
for requirement in to_install:
# DISTRIBUTE TO SETUPTOOLS UPGRADE HACK (1 of 3 parts)
# when upgrading from distribute-0.6.X to the new merged
# setuptools in py2, we need to force setuptools to uninstall
# distribute. In py3, which is always using distribute, this
# conversion is already happening in distribute's pkg_resources.
# It's ok *not* to check if setuptools>=0.7 because if someone
# were actually trying to ugrade from distribute to setuptools
# 0.6.X, then all this could do is actually help, although that
# upgade path was certainly never "supported"
# TODO: remove this later
if requirement.name == 'setuptools':
try:
# only uninstall distribute<0.7. For >=0.7, setuptools
# will also be present, and that's what we need to
# uninstall
distribute_requirement = pkg_resources.Requirement.parse("distribute<0.7")
existing_distribute = pkg_resources.get_distribution("distribute")
if existing_distribute in distribute_requirement:
requirement.conflicts_with = existing_distribute
except pkg_resources.DistributionNotFound:
# distribute wasn't installed, so nothing to do
pass
if requirement.conflicts_with:
logger.notify('Found existing installation: %s'
% requirement.conflicts_with)
logger.indent += 2
try:
requirement.uninstall(auto_confirm=True)
finally:
logger.indent -= 2
try:
requirement.install(install_options, global_options, *args, **kwargs)
except:
# if install did not succeed, rollback previous uninstall
if requirement.conflicts_with and not requirement.install_succeeded:
requirement.rollback_uninstall()
raise
else:
if requirement.conflicts_with and requirement.install_succeeded:
requirement.commit_uninstall()
requirement.remove_temporary_source()
finally:
logger.indent -= 2
self.successfully_installed = to_install
def create_bundle(self, bundle_filename):
## FIXME: can't decide which is better; zip is easier to read
## random files from, but tar.bz2 is smaller and not as lame a
## format.
## FIXME: this file should really include a manifest of the
## packages, maybe some other metadata files. It would make
## it easier to detect as well.
zip = zipfile.ZipFile(bundle_filename, 'w', zipfile.ZIP_DEFLATED)
vcs_dirs = []
for dir, basename in (self.build_dir, 'build'), (self.src_dir, 'src'):
dir = os.path.normcase(os.path.abspath(dir))
for dirpath, dirnames, filenames in os.walk(dir):
for backend in vcs.backends:
vcs_backend = backend()
vcs_url = vcs_rev = None
if vcs_backend.dirname in dirnames:
for vcs_dir in vcs_dirs:
if dirpath.startswith(vcs_dir):
# vcs bundle file already in parent directory
break
else:
vcs_url, vcs_rev = vcs_backend.get_info(
os.path.join(dir, dirpath))
vcs_dirs.append(dirpath)
vcs_bundle_file = vcs_backend.bundle_file
vcs_guide = vcs_backend.guide % {'url': vcs_url,
'rev': vcs_rev}
dirnames.remove(vcs_backend.dirname)
break
if 'pip-egg-info' in dirnames:
dirnames.remove('pip-egg-info')
for dirname in dirnames:
dirname = os.path.join(dirpath, dirname)
name = self._clean_zip_name(dirname, dir)
zip.writestr(basename + '/' + name + '/', '')
for filename in filenames:
if filename == PIP_DELETE_MARKER_FILENAME:
continue
filename = os.path.join(dirpath, filename)
name = self._clean_zip_name(filename, dir)
zip.write(filename, basename + '/' + name)
if vcs_url:
name = os.path.join(dirpath, vcs_bundle_file)
name = self._clean_zip_name(name, dir)
zip.writestr(basename + '/' + name, vcs_guide)
zip.writestr('pip-manifest.txt', self.bundle_requirements())
zip.close()
BUNDLE_HEADER = '''\
# This is a pip bundle file, that contains many source packages
# that can be installed as a group. You can install this like:
# pip this_file.zip
# The rest of the file contains a list of all the packages included:
'''
def bundle_requirements(self):
parts = [self.BUNDLE_HEADER]
for req in [req for req in self.requirements.values()
if not req.comes_from]:
parts.append('%s==%s\n' % (req.name, req.installed_version))
parts.append('# These packages were installed to satisfy the above requirements:\n')
for req in [req for req in self.requirements.values()
if req.comes_from]:
parts.append('%s==%s\n' % (req.name, req.installed_version))
## FIXME: should we do something with self.unnamed_requirements?
return ''.join(parts)
def _clean_zip_name(self, name, prefix):
assert name.startswith(prefix+os.path.sep), (
"name %r doesn't start with prefix %r" % (name, prefix))
name = name[len(prefix)+1:]
name = name.replace(os.path.sep, '/')
return name
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
_scheme_re = re.compile(r'^(http|https|file):', re.I)
def parse_requirements(filename, finder=None, comes_from=None, options=None):
skip_match = None
skip_regex = options.skip_requirements_regex if options else None
if skip_regex:
skip_match = re.compile(skip_regex)
reqs_file_dir = os.path.dirname(os.path.abspath(filename))
filename, content = get_file_content(filename, comes_from=comes_from)
for line_number, line in enumerate(content.splitlines()):
line_number += 1
line = line.strip()
if not line or line.startswith('#'):
continue
if skip_match and skip_match.search(line):
continue
if line.startswith('-r') or line.startswith('--requirement'):
if line.startswith('-r'):
req_url = line[2:].strip()
else:
req_url = line[len('--requirement'):].strip().strip('=')
if _scheme_re.search(filename):
# Relative to a URL
req_url = urlparse.urljoin(filename, req_url)
elif not _scheme_re.search(req_url):
req_url = os.path.join(os.path.dirname(filename), req_url)
for item in parse_requirements(req_url, finder, comes_from=filename, options=options):
yield item
elif line.startswith('-Z') or line.startswith('--always-unzip'):
# No longer used, but previously these were used in
# requirement files, so we'll ignore.
pass
elif line.startswith('-f') or line.startswith('--find-links'):
if line.startswith('-f'):
line = line[2:].strip()
else:
line = line[len('--find-links'):].strip().lstrip('=')
## FIXME: it would be nice to keep track of the source of
## the find_links:
# support a find-links local path relative to a requirements file
relative_to_reqs_file = os.path.join(reqs_file_dir, line)
if os.path.exists(relative_to_reqs_file):
line = relative_to_reqs_file
if finder:
finder.find_links.append(line)
elif line.startswith('-i') or line.startswith('--index-url'):
if line.startswith('-i'):
line = line[2:].strip()
else:
line = line[len('--index-url'):].strip().lstrip('=')
if finder:
finder.index_urls = [line]
elif line.startswith('--extra-index-url'):
line = line[len('--extra-index-url'):].strip().lstrip('=')
if finder:
finder.index_urls.append(line)
elif line.startswith('--use-wheel'):
finder.use_wheel = True
elif line.startswith('--no-index'):
finder.index_urls = []
elif line.startswith("--allow-external"):
line = line[len("--allow-external"):].strip().lstrip("=")
finder.allow_external |= set([normalize_name(line).lower()])
elif line.startswith("--allow-all-external"):
finder.allow_all_external = True
elif line.startswith("--no-allow-external"):
finder.allow_external = False
elif line.startswith("--no-allow-insecure"):
finder.allow_all_insecure = False
elif line.startswith("--allow-insecure"):
line = line[len("--allow-insecure"):].strip().lstrip("=")
finder.allow_insecure |= set([normalize_name(line).lower()])
else:
comes_from = '-r %s (line %s)' % (filename, line_number)
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
req = InstallRequirement.from_editable(
line, comes_from=comes_from, default_vcs=options.default_vcs if options else None)
else:
req = InstallRequirement.from_line(line, comes_from, prereleases=getattr(options, "pre", None))
yield req
def parse_editable(editable_req, default_vcs=None):
"""Parses svn+http://blahblah@rev#egg=Foobar into a requirement
(Foobar) and a URL"""
url = editable_req
extras = None
# If a file path is specified with extras, strip off the extras.
m = re.match(r'^(.+)(\[[^\]]+\])$', url)
if m:
url_no_extras = m.group(1)
extras = m.group(2)
else:
url_no_extras = url
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
raise InstallationError("Directory %r is not installable. File 'setup.py' not found." % url_no_extras)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
if extras:
return None, url_no_extras, pkg_resources.Requirement.parse('__placeholder__' + extras).extras
else:
return None, url_no_extras, None
for version_control in vcs:
if url.lower().startswith('%s:' % version_control):
url = '%s+%s' % (version_control, url)
if '+' not in url:
if default_vcs:
url = default_vcs + '+' + url
else:
raise InstallationError(
'%s should either by a path to a local project or a VCS url beginning with svn+, git+, hg+, or bzr+' % editable_req)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
error_message = 'For --editable=%s only ' % editable_req + \
', '.join([backend.name + '+URL' for backend in vcs.backends]) + \
' is currently supported'
raise InstallationError(error_message)
match = re.search(r'(?:#|#.*?&)egg=([^&]*)', editable_req)
if (not match or not match.group(1)) and vcs.get_backend(vc_type):
parts = [p for p in editable_req.split('#', 1)[0].split('/') if p]
if parts[-2] in ('tags', 'branches', 'tag', 'branch'):
req = parts[-3]
elif parts[-1] == 'trunk':
req = parts[-2]
else:
raise InstallationError(
'--editable=%s is not the right format; it must have #egg=Package'
% editable_req)
else:
req = match.group(1)
## FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req, url, None
class UninstallPathSet(object):
"""A set of file paths to be removed in the uninstallation of a
requirement."""
def __init__(self, dist):
self.paths = set()
self._refuse = set()
self.pth = {}
self.dist = dist
self.save_dir = None
self._moved_paths = []
def _permitted(self, path):
"""
Return True if the given path is one we are permitted to
remove/modify, False otherwise.
"""
return is_local(path)
def _can_uninstall(self):
if not dist_is_local(self.dist):
logger.notify("Not uninstalling %s at %s, outside environment %s"
% (self.dist.project_name, normalize_path(self.dist.location), sys.prefix))
return False
return True
def add(self, path):
path = normalize_path(path)
if not os.path.exists(path):
return
if self._permitted(path):
self.paths.add(path)
else:
self._refuse.add(path)
# __pycache__ files can show up after 'installed-files.txt' is created, due to imports
if os.path.splitext(path)[1] == '.py' and uses_pycache:
self.add(imp.cache_from_source(path))
def add_pth(self, pth_file, entry):
pth_file = normalize_path(pth_file)
if self._permitted(pth_file):
if pth_file not in self.pth:
self.pth[pth_file] = UninstallPthEntries(pth_file)
self.pth[pth_file].add(entry)
else:
self._refuse.add(pth_file)
def compact(self, paths):
"""Compact a path set to contain the minimal number of paths
necessary to contain all paths in the set. If /a/path/ and
/a/path/to/a/file.txt are both in the set, leave only the
shorter path."""
short_paths = set()
for path in sorted(paths, key=len):
if not any([(path.startswith(shortpath) and
path[len(shortpath.rstrip(os.path.sep))] == os.path.sep)
for shortpath in short_paths]):
short_paths.add(path)
return short_paths
def _stash(self, path):
return os.path.join(
self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep))
def remove(self, auto_confirm=False):
"""Remove paths in ``self.paths`` with confirmation (unless
``auto_confirm`` is True)."""
if not self._can_uninstall():
return
if not self.paths:
logger.notify("Can't uninstall '%s'. No files were found to uninstall." % self.dist.project_name)
return
logger.notify('Uninstalling %s:' % self.dist.project_name)
logger.indent += 2
paths = sorted(self.compact(self.paths))
try:
if auto_confirm:
response = 'y'
else:
for path in paths:
logger.notify(path)
response = ask('Proceed (y/n)? ', ('y', 'n'))
if self._refuse:
logger.notify('Not removing or modifying (outside of prefix):')
for path in self.compact(self._refuse):
logger.notify(path)
if response == 'y':
self.save_dir = tempfile.mkdtemp(suffix='-uninstall',
prefix='pip-')
for path in paths:
new_path = self._stash(path)
logger.info('Removing file or directory %s' % path)
self._moved_paths.append(path)
renames(path, new_path)
for pth in self.pth.values():
pth.remove()
logger.notify('Successfully uninstalled %s' % self.dist.project_name)
finally:
logger.indent -= 2
def rollback(self):
"""Rollback the changes previously made by remove()."""
if self.save_dir is None:
logger.error("Can't roll back %s; was not uninstalled" % self.dist.project_name)
return False
logger.notify('Rolling back uninstall of %s' % self.dist.project_name)
for path in self._moved_paths:
tmp_path = self._stash(path)
logger.info('Replacing %s' % path)
renames(tmp_path, path)
for pth in self.pth:
pth.rollback()
def commit(self):
"""Remove temporary save dir: rollback will no longer be possible."""
if self.save_dir is not None:
rmtree(self.save_dir)
self.save_dir = None
self._moved_paths = []
class UninstallPthEntries(object):
def __init__(self, pth_file):
if not os.path.isfile(pth_file):
raise UninstallationError("Cannot remove entries from nonexistent file %s" % pth_file)
self.file = pth_file
self.entries = set()
self._saved_lines = None
def add(self, entry):
entry = os.path.normcase(entry)
# On Windows, os.path.normcase converts the entry to use
# backslashes. This is correct for entries that describe absolute
# paths outside of site-packages, but all the others use forward
# slashes.
if sys.platform == 'win32' and not os.path.splitdrive(entry)[0]:
entry = entry.replace('\\', '/')
self.entries.add(entry)
def remove(self):
logger.info('Removing pth entries from %s:' % self.file)
fh = open(self.file, 'rb')
# windows uses '\r\n' with py3k, but uses '\n' with py2.x
lines = fh.readlines()
self._saved_lines = lines
fh.close()
if any(b('\r\n') in line for line in lines):
endline = '\r\n'
else:
endline = '\n'
for entry in self.entries:
try:
logger.info('Removing entry: %s' % entry)
lines.remove(b(entry + endline))
except ValueError:
pass
fh = open(self.file, 'wb')
fh.writelines(lines)
fh.close()
def rollback(self):
if self._saved_lines is None:
logger.error('Cannot roll back changes to %s, none were made' % self.file)
return False
logger.info('Rolling %s back to previous state' % self.file)
fh = open(self.file, 'wb')
fh.writelines(self._saved_lines)
fh.close()
return True
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
|
Hybrid-Cloud/cinder
|
refs/heads/master
|
cinder/volume/drivers/falconstor/fss_common.py
|
1
|
# Copyright (c) 2016 FalconStor, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume driver for FalconStor FSS storage system.
This driver requires FSS-8.00-8865 or later.
"""
import math
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.volume.drivers.falconstor import rest_proxy
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
FSS_OPTS = [
cfg.IntOpt('fss_pool',
default='',
help='FSS pool id in which FalconStor volumes are stored.'),
cfg.BoolOpt('fss_debug',
default=False,
help="Enable HTTP debugging to FSS"),
cfg.StrOpt('additional_retry_list',
default='',
help='FSS additional retry list, separate by ;')
]
CONF = cfg.CONF
CONF.register_opts(FSS_OPTS)
class FalconstorBaseDriver(san.SanDriver):
def __init__(self, *args, **kwargs):
super(FalconstorBaseDriver, self).__init__(*args, **kwargs)
if self.configuration:
self.configuration.append_config_values(FSS_OPTS)
self.proxy = rest_proxy.RESTProxy(self.configuration)
self._backend_name = (
self.configuration.safe_get('volume_backend_name') or 'FalconStor')
self._storage_protocol = 'iSCSI'
def do_setup(self, context):
self.proxy.do_setup()
LOG.info(_LI('Activate FalconStor cinder volume driver.'))
def check_for_setup_error(self):
if self.proxy.session_id is None:
msg = (_('FSS cinder volume driver not ready: Unable to determine '
'session id.'))
raise exception.VolumeBackendAPIException(data=msg)
if not self.configuration.fss_pool:
msg = _('Pool is not available in the cinder configuration '
'fields.')
raise exception.InvalidHost(reason=msg)
self._pool_checking(self.configuration.fss_pool)
def _pool_checking(self, pool_id):
pool_count = 0
try:
output = self.proxy.list_pool_info(pool_id)
if "name" in output['data']:
pool_count = len(re.findall(rest_proxy.GROUP_PREFIX,
output['data']['name']))
if pool_count is 0:
msg = (_('The given pool info must include the storage pool '
'and naming start with OpenStack-'))
raise exception.VolumeBackendAPIException(data=msg)
except Exception:
msg = (_('Unexpected exception during pool checking.'))
LOG.exception(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _check_multipath(self):
if self.configuration.use_multipath_for_image_xfer:
if not self.configuration.san_secondary_ip:
msg = (_('The san_secondary_ip param is null.'))
raise exception.VolumeBackendAPIException(data=msg)
output = self.proxy._check_iocluster_state()
if not output:
msg = (_('FSS do not support multipathing.'))
raise exception.VolumeBackendAPIException(data=msg)
return output
else:
return False
def create_volume(self, volume):
"""Creates a volume.
We use the metadata of the volume to create variety volume.
Create a thin provisioned volume :
[Usage] create --volume-type FSS --metadata thinprovisioned=true
thinsize=<thin-volume-size>
Create a LUN that is a Timeview of another LUN at a specified CDP tag:
[Usage] create --volume-type FSS --metadata timeview=<vid>
cdptag=<tag> volume-size
Create a LUN that is a Timeview of another LUN at a specified Timemark:
[Usage] create --volume-type FSS --metadata timeview=<vid>
rawtimestamp=<rawtimestamp> volume-size
"""
volume_metadata = self._get_volume_metadata(volume)
if not volume_metadata:
volume_name, fss_metadata = self.proxy.create_vdev(volume)
else:
if ("timeview" in volume_metadata and
("cdptag" in volume_metadata) or
("rawtimestamp" in volume_metadata)):
volume_name, fss_metadata = self.proxy.create_tv_from_cdp_tag(
volume_metadata, volume)
elif ("thinprovisioned" in volume_metadata and
"thinsize" in volume_metadata):
volume_name, fss_metadata = self.proxy.create_thin_vdev(
volume_metadata, volume)
else:
volume_name, fss_metadata = self.proxy.create_vdev(volume)
fss_metadata.update(volume_metadata)
if type(volume['metadata']) is dict:
fss_metadata.update(volume['metadata'])
if volume['consistencygroup_id']:
self.proxy._add_volume_to_consistency_group(
volume['consistencygroup_id'],
volume_name
)
return {'metadata': fss_metadata}
def _get_volume_metadata(self, volume):
volume_metadata = {}
if 'volume_metadata' in volume:
for metadata in volume['volume_metadata']:
volume_metadata[metadata['key']] = metadata['value']
return volume_metadata
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
new_vol_name = self.proxy._get_fss_volume_name(volume)
src_name = self.proxy._get_fss_volume_name(src_vref)
vol_size = volume["size"]
src_size = src_vref["size"]
fss_metadata = self.proxy.clone_volume(new_vol_name, src_name)
self.proxy.extend_vdev(new_vol_name, src_size, vol_size)
if volume['consistencygroup_id']:
self.proxy._add_volume_to_consistency_group(
volume['consistencygroup_id'],
new_vol_name
)
volume_metadata = self._get_volume_metadata(volume)
fss_metadata.update(volume_metadata)
if type(volume['metadata']) is dict:
fss_metadata.update(volume['metadata'])
return {'metadata': fss_metadata}
def extend_volume(self, volume, new_size):
"""Extend volume to new_size."""
volume_name = self.proxy._get_fss_volume_name(volume)
self.proxy.extend_vdev(volume_name, volume["size"], new_size)
def delete_volume(self, volume):
"""Disconnect all hosts and delete the volume"""
try:
self.proxy.delete_vdev(volume)
except rest_proxy.FSSHTTPError as err:
with excutils.save_and_reraise_exception(reraise=False):
LOG.warning(_LW("Volume deletion failed with message: %s"),
err.reason)
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
snap_metadata = snapshot["metadata"]
metadata = self.proxy.create_snapshot(snapshot)
snap_metadata.update(metadata)
return {'metadata': snap_metadata}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
try:
self.proxy.delete_snapshot(snapshot)
except rest_proxy.FSSHTTPError as err:
with excutils.save_and_reraise_exception(reraise=False):
LOG.error(
_LE("Snapshot deletion failed with message: %s"),
err.reason)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
vol_size = volume['size']
snap_size = snapshot['volume_size']
volume_name, fss_metadata = self.proxy.create_volume_from_snapshot(
volume, snapshot)
if vol_size != snap_size:
try:
extend_volume_name = self.proxy._get_fss_volume_name(volume)
self.proxy.extend_vdev(extend_volume_name, snap_size, vol_size)
except rest_proxy.FSSHTTPError as err:
with excutils.save_and_reraise_exception(reraise=False):
LOG.error(_LE(
"Resizing %(id)s failed with message: %(msg)s. "
"Cleaning volume."), {'id': volume["id"],
'msg': err.reason})
if type(volume['metadata']) is dict:
fss_metadata.update(volume['metadata'])
if volume['consistencygroup_id']:
self.proxy._add_volume_to_consistency_group(
volume['consistencygroup_id'],
volume_name)
return {'metadata': fss_metadata}
def ensure_export(self, context, volume):
pass
def create_export(self, context, volume, connector):
pass
def remove_export(self, context, volume):
pass
# Attach/detach volume to instance/host
def attach_volume(self, context, volume, instance_uuid, host_name,
mountpoint):
pass
def detach_volume(self, context, volume, attachment=None):
pass
def get_volume_stats(self, refresh=False):
total_capacity = 0
free_space = 0
if refresh:
try:
info = self.proxy._get_pools_info()
if info:
total_capacity = int(info['total_capacity_gb'])
used_space = int(info['used_gb'])
free_space = int(total_capacity - used_space)
data = {"vendor_name": "FalconStor",
"volume_backend_name": self._backend_name,
"driver_version": self.VERSION,
"storage_protocol": self._storage_protocol,
"total_capacity_gb": total_capacity,
"free_capacity_gb": free_space,
"reserved_percentage": 0,
"consistencygroup_support": True
}
self._stats = data
except Exception as exc:
LOG.error(_LE('Cannot get volume status %(exc)s.'),
{'exc': exc})
return self._stats
def create_consistencygroup(self, context, group):
"""Creates a consistencygroup."""
self.proxy.create_group(group)
model_update = {'status': 'available'}
return model_update
def delete_consistencygroup(self, context, group, volumes):
"""Deletes a consistency group."""
self.proxy.destroy_group(group)
volume_updates = []
for volume in volumes:
self.delete_volume(volume)
volume_updates.append({
'id': volume.id,
'status': 'deleted'
})
model_update = {'status': group['status']}
return model_update, volume_updates
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
addvollist = []
remvollist = []
if add_volumes:
for volume in add_volumes:
addvollist.append(self.proxy._get_fss_volume_name(volume))
if remove_volumes:
for volume in remove_volumes:
remvollist.append(self.proxy._get_fss_volume_name(volume))
self.proxy.set_group(group['id'], addvollist=addvollist,
remvollist=remvollist)
return None, None, None
def create_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Creates a cgsnapshot."""
cgsnapshot_id = cgsnapshot['id']
try:
self.proxy.create_cgsnapshot(cgsnapshot)
except Exception as e:
msg = _('Failed to create cg snapshot %(id)s '
'due to %(reason)s.') % {'id': cgsnapshot_id,
'reason': six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
snapshot_updates = []
for snapshot in snapshots:
snapshot_updates.append({
'id': snapshot.id,
'status': 'available'
})
model_update = {'status': 'available'}
return model_update, snapshot_updates
def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
"""Deletes a cgsnapshot."""
cgsnapshot_id = cgsnapshot.id
try:
self.proxy.delete_cgsnapshot(cgsnapshot)
except Exception as e:
msg = _('Failed to delete cgsnapshot %(id)s '
'due to %(reason)s.') % {'id': cgsnapshot_id,
'reason': six.text_type(e)}
raise exception.VolumeBackendAPIException(data=msg)
snapshot_updates = []
for snapshot in snapshots:
snapshot_updates.append({
'id': snapshot.id,
'status': 'deleted',
})
model_update = {'status': cgsnapshot.status}
return model_update, snapshot_updates
def manage_existing(self, volume, existing_ref):
"""Convert an existing FSS volume to a Cinder volume.
We expect a volume id in the existing_ref that matches one in FSS.
"""
volume_metadata = {}
self.proxy._get_existing_volume_ref_vid(existing_ref)
self.proxy._manage_existing_volume(existing_ref['source-id'], volume)
volume_metadata['FSS-vid'] = existing_ref['source-id']
updates = {'metadata': volume_metadata}
return updates
def manage_existing_get_size(self, volume, existing_ref):
"""Get size of an existing FSS volume.
We expect a volume id in the existing_ref that matches one in FSS.
"""
sizemb = self.proxy._get_existing_volume_ref_vid(existing_ref)
size = int(math.ceil(float(sizemb) / units.Ki))
return size
def unmanage(self, volume):
"""Remove Cinder management from FSS volume"""
self.proxy.unmanage(volume)
def copy_image_to_volume(self, context, volume, image_service, image_id):
with image_utils.temporary_file() as tmp:
image_utils.fetch_verify_image(context, image_service,
image_id, tmp)
image_utils.fetch_to_raw(context,
image_service,
image_id,
tmp,
self.configuration.volume_dd_blocksize,
size=volume['size'])
|
Alberto-Beralix/Beralix
|
refs/heads/master
|
i386-squashfs-root/usr/lib/python2.7/_abcoll.py
|
218
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
]
### ONE-TRICK PONIES ###
def _hasattr(C, attr):
try:
return any(attr in B.__dict__ for B in C.__mro__)
except AttributeError:
# Old-style class
return hasattr(C, attr)
class Hashable:
__metaclass__ = ABCMeta
@abstractmethod
def __hash__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Hashable:
try:
for B in C.__mro__:
if "__hash__" in B.__dict__:
if B.__dict__["__hash__"]:
return True
break
except AttributeError:
# Old-style class
if getattr(C, "__hash__", None):
return True
return NotImplemented
class Iterable:
__metaclass__ = ABCMeta
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if _hasattr(C, "__iter__"):
return True
return NotImplemented
Iterable.register(str)
class Iterator(Iterable):
@abstractmethod
def next(self):
raise StopIteration
def __iter__(self):
return self
@classmethod
def __subclasshook__(cls, C):
if cls is Iterator:
if _hasattr(C, "next") and _hasattr(C, "__iter__"):
return True
return NotImplemented
class Sized:
__metaclass__ = ABCMeta
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if _hasattr(C, "__len__"):
return True
return NotImplemented
class Container:
__metaclass__ = ABCMeta
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if _hasattr(C, "__contains__"):
return True
return NotImplemented
class Callable:
__metaclass__ = ABCMeta
@abstractmethod
def __call__(self, *args, **kwds):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Callable:
if _hasattr(C, "__call__"):
return True
return NotImplemented
### SETS ###
class Set(Sized, Iterable, Container):
"""A set is a finite, iterable container.
This class provides concrete generic implementations of all
methods except for __contains__, __iter__ and __len__.
To override the comparisons (presumably for speed, as the
semantics are fixed), all you have to do is redefine __le__ and
then the other operations will automatically follow suit.
"""
def __le__(self, other):
if not isinstance(other, Set):
return NotImplemented
if len(self) > len(other):
return False
for elem in self:
if elem not in other:
return False
return True
def __lt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) < len(other) and self.__le__(other)
def __gt__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other < self
def __ge__(self, other):
if not isinstance(other, Set):
return NotImplemented
return other <= self
def __eq__(self, other):
if not isinstance(other, Set):
return NotImplemented
return len(self) == len(other) and self.__le__(other)
def __ne__(self, other):
return not (self == other)
@classmethod
def _from_iterable(cls, it):
'''Construct an instance of the class from any iterable input.
Must override this method if the class constructor signature
does not accept an iterable for an input.
'''
return cls(it)
def __and__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
return self._from_iterable(value for value in other if value in self)
def isdisjoint(self, other):
for value in other:
if value in self:
return False
return True
def __or__(self, other):
if not isinstance(other, Iterable):
return NotImplemented
chain = (e for s in (self, other) for e in s)
return self._from_iterable(chain)
def __sub__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return self._from_iterable(value for value in self
if value not in other)
def __xor__(self, other):
if not isinstance(other, Set):
if not isinstance(other, Iterable):
return NotImplemented
other = self._from_iterable(other)
return (self - other) | (other - self)
# Sets are not hashable by default, but subclasses can change this
__hash__ = None
def _hash(self):
"""Compute the hash value of a set.
Note that we don't define __hash__: not all sets are hashable.
But if you define a hashable set type, its __hash__ should
call this function.
This must be compatible __eq__.
All sets ought to compare equal if they contain the same
elements, regardless of how they are implemented, and
regardless of the order of the elements; so there's not much
freedom for __eq__ or __hash__. We match the algorithm used
by the built-in frozenset type.
"""
MAX = sys.maxint
MASK = 2 * MAX + 1
n = len(self)
h = 1927868237 * (n + 1)
h &= MASK
for x in self:
hx = hash(x)
h ^= (hx ^ (hx << 16) ^ 89869747) * 3644798167
h &= MASK
h = h * 69069 + 907133923
h &= MASK
if h > MAX:
h -= MASK + 1
if h == -1:
h = 590923713
return h
Set.register(frozenset)
class MutableSet(Set):
@abstractmethod
def add(self, value):
"""Add an element."""
raise NotImplementedError
@abstractmethod
def discard(self, value):
"""Remove an element. Do not raise an exception if absent."""
raise NotImplementedError
def remove(self, value):
"""Remove an element. If not a member, raise a KeyError."""
if value not in self:
raise KeyError(value)
self.discard(value)
def pop(self):
"""Return the popped value. Raise KeyError if empty."""
it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
def clear(self):
"""This is slow (creates N new iterators!) but effective."""
try:
while True:
self.pop()
except KeyError:
pass
def __ior__(self, it):
for value in it:
self.add(value)
return self
def __iand__(self, it):
for value in (self - it):
self.discard(value)
return self
def __ixor__(self, it):
if it is self:
self.clear()
else:
if not isinstance(it, Set):
it = self._from_iterable(it)
for value in it:
if value in self:
self.discard(value)
else:
self.add(value)
return self
def __isub__(self, it):
if it is self:
self.clear()
else:
for value in it:
self.discard(value)
return self
MutableSet.register(set)
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def iterkeys(self):
return iter(self)
def itervalues(self):
for key in self:
yield self[key]
def iteritems(self):
for key in self:
yield (key, self[key])
def keys(self):
return list(self)
def items(self):
return [(key, self[key]) for key in self]
def values(self):
return [self[key] for key in self]
# Mappings are not hashable by default, but subclasses can change this
__hash__ = None
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MappingView(Sized):
def __init__(self, mapping):
self._mapping = mapping
def __len__(self):
return len(self._mapping)
def __repr__(self):
return '{0.__class__.__name__}({0._mapping!r})'.format(self)
class KeysView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, key):
return key in self._mapping
def __iter__(self):
for key in self._mapping:
yield key
class ItemsView(MappingView, Set):
@classmethod
def _from_iterable(self, it):
return set(it)
def __contains__(self, item):
key, value = item
try:
v = self._mapping[key]
except KeyError:
return False
else:
return v == value
def __iter__(self):
for key in self._mapping:
yield (key, self._mapping[key])
class ValuesView(MappingView):
def __contains__(self, value):
for key in self._mapping:
if value == self._mapping[key]:
return True
return False
def __iter__(self):
for key in self._mapping:
yield self._mapping[key]
class MutableMapping(Mapping):
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
MutableMapping.register(dict)
### SEQUENCES ###
class Sequence(Sized, Iterable, Container):
"""All the operations on a read-only sequence.
Concrete subclasses must override __new__ or __init__,
__getitem__, and __len__.
"""
@abstractmethod
def __getitem__(self, index):
raise IndexError
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def __reversed__(self):
for i in reversed(range(len(self))):
yield self[i]
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum(1 for v in self if v == value)
Sequence.register(tuple)
Sequence.register(basestring)
Sequence.register(buffer)
Sequence.register(xrange)
class MutableSequence(Sequence):
@abstractmethod
def __setitem__(self, index, value):
raise IndexError
@abstractmethod
def __delitem__(self, index):
raise IndexError
@abstractmethod
def insert(self, index, value):
raise IndexError
def append(self, value):
self.insert(len(self), value)
def reverse(self):
n = len(self)
for i in range(n//2):
self[i], self[n-i-1] = self[n-i-1], self[i]
def extend(self, values):
for v in values:
self.append(v)
def pop(self, index=-1):
v = self[index]
del self[index]
return v
def remove(self, value):
del self[self.index(value)]
def __iadd__(self, values):
self.extend(values)
return self
MutableSequence.register(list)
|
tsingjinyun/experiments
|
refs/heads/master
|
word2vec_tw/cut.py
|
3
|
import jieba
import sys
if __name__ == '__main__':
jieba.set_dictionary('jieba/extra_dict/dict.txt.big')
for l in sys.stdin:
words = jieba.cut(l.strip())
sys.stdout.write((u' '.join(words) + u'\n').encode('utf8'))
|
pplatek/odoo
|
refs/heads/8.0
|
addons/account/wizard/account_report_common_account.py
|
371
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_common_account_report(osv.osv_memory):
_name = 'account.common.account.report'
_description = 'Account Common Account Report'
_inherit = "account.common.report"
_columns = {
'display_account': fields.selection([('all','All'), ('movement','With movements'),
('not_zero','With balance is not equal to 0'),
],'Display Accounts', required=True),
}
_defaults = {
'display_account': 'movement',
}
def pre_print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data['form'].update(self.read(cr, uid, ids, ['display_account'], context=context)[0])
return data
#vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
srm912/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/old-tests/webdriver/runtests.py
|
212
|
import unittest
from unittest import TestLoader, TextTestRunner, TestSuite
if __name__ == "__main__":
loader = TestLoader()
suite = TestSuite((
loader.discover(".", pattern="*.py")
))
runner = TextTestRunner(verbosity=2)
runner.run(suite)
unittest.main()
|
kb8u/ZenPacks.Merit.AdvaFSP3000R7
|
refs/heads/master
|
ZenPacks/Merit/AdvaFSP3000R7/datasources/__init__.py
|
504
|
# __init__.py
|
roth1002/infer
|
refs/heads/master
|
infer/bin/utils.py
|
2
|
#
# Copyright (c) 2013- Facebook. All rights reserved.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import csv
import fnmatch
import gzip
import json
import logging
import os
import re
import subprocess
import sys
import tempfile
import time
BIN_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
LIB_DIRECTORY = os.path.join(BIN_DIRECTORY, '..', 'lib', 'java')
TMP_DIRECTORY = tempfile.gettempdir()
MODELS_JAR = os.path.join(LIB_DIRECTORY, 'models.jar')
DEFAULT_INFER_OUT = os.path.join(os.getcwd(), 'infer-out')
CSV_PERF_FILENAME = 'performances.csv'
STATS_FILENAME = 'stats.json'
CSV_REPORT_FILENAME = 'report.csv'
JSON_REPORT_FILENAME = 'report.json'
BUGS_FILENAME = 'bugs.txt'
CSV_INDEX_KIND = 1
CSV_INDEX_TYPE = 2
CSV_INDEX_QUALIFIER = 3
CSV_INDEX_LINE = 5
CSV_INDEX_FILENAME = 8
CSV_INDEX_QUALIFIER_TAGS = 11
QUALIFIER_TAGS = 'qualifier_tags'
BUCKET_TAGS = 'bucket'
IOS_CAPTURE_ERRORS = 'errors'
IOS_BUILD_OUTPUT = 'build_output'
BUCK_INFER_OUT = 'infer'
FORMAT = '[%(levelname)s] %(message)s'
DEBUG_FORMAT = '[%(levelname)s:%(filename)s:%(lineno)03d] %(message)s'
# Monkey patching subprocess (I'm so sorry!).
if "check_output" not in dir(subprocess):
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout not supported')
process = subprocess.Popen(
stdout=subprocess.PIPE,
*popenargs,
**kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
def configure_logging(debug, quiet=False):
"""Configures the default logger. This can be called only once and has to
be called before any logging is done.
"""
logging.TIMING = logging.ERROR + 5
logging.addLevelName(logging.TIMING, "TIMING")
def timing(msg, *args, **kwargs):
logging.log(logging.TIMING, msg, *args, **kwargs)
logging.timing = timing
if quiet:
logging.basicConfig(level=logging.TIMING, format=FORMAT)
elif not debug:
logging.basicConfig(level=logging.INFO, format=FORMAT)
else:
logging.basicConfig(level=logging.DEBUG, format=DEBUG_FORMAT)
def elapsed_time(start_time):
return time.time() - start_time
def error(msg):
print(msg, file=sys.stderr)
def remove_bucket(bug_message):
""" Remove anything from the beginning if the message that
looks like a bucket """
return re.sub(r'(^\[[a-zA-Z0-9]*\])', '', bug_message, 1)
def get_cmd_in_bin_dir(binary_name):
# this relies on the fact that utils.py is located in infer/bin
return os.path.join(
os.path.dirname(os.path.realpath(__file__)),
binary_name)
def write_cmd_streams_to_file(logfile, cmd=None, out=None, err=None):
with open(logfile, 'w') as log_filedesc:
if cmd:
log_filedesc.write(' '.join(cmd) + '\n')
if err is not None:
errors = str(err)
log_filedesc.write('\nSTDERR:\n')
log_filedesc.write(errors)
if out is not None:
output = str(out)
log_filedesc.write('\n\nSTDOUT:\n')
log_filedesc.write(output)
def save_failed_command(
infer_out,
cmd,
message,
prefix='failed_',
out=None,
err=None):
cmd_filename = tempfile.mktemp(
'_' + message + ".txt",
prefix, infer_out
)
write_cmd_streams_to_file(cmd_filename, cmd=cmd, out=out, err=err)
logging.error('\n' + message + ' error saved in ' + cmd_filename)
def run_command(cmd, debug_mode, infer_out, message, env=os.environ):
if debug_mode:
print('\n{0}\n'.format(' '.join(cmd)))
try:
return subprocess.check_call(cmd, env=env)
except subprocess.CalledProcessError as e:
save_failed_command(infer_out, cmd, message)
raise e
def print_exit(s):
print(s)
exit(os.EX_OK)
def infer_version():
version = json.loads(subprocess.check_output([
get_cmd_in_bin_dir('InferAnalyze'),
'-version_json',
]).decode())
return version['commit']
def infer_branch():
version = json.loads(subprocess.check_output([
get_cmd_in_bin_dir('InferAnalyze'),
'-version_json',
]).decode())
return version['branch']
def infer_key(analyzer):
return os.pathsep.join([analyzer, infer_version()])
def vcs_branch(dir='.'):
cwd = os.getcwd()
devnull = open(os.devnull, 'w')
try:
os.chdir(dir)
branch = subprocess.check_output(
['git',
'rev-parse',
'--abbrev-ref',
'HEAD'],
stderr=devnull).decode().strip()
except subprocess.CalledProcessError:
try:
branch = subprocess.check_output(
['hg',
'id',
'-B'],
stderr=devnull).decode().strip()
except subprocess.CalledProcessError:
branch = 'not-versioned'
finally:
devnull.close()
os.chdir(cwd)
return branch
def vcs_revision(dir='.'):
cwd = os.getcwd()
devnull = open(os.devnull, 'w')
try:
os.chdir(dir)
revision = subprocess.check_output(
['git',
'rev-parse',
'HEAD'],
stderr=devnull).decode().strip()
except subprocess.CalledProcessError:
try:
revision = subprocess.check_output(
['hg',
'id',
'-i'],
stderr=devnull).decode().strip()
except subprocess.CalledProcessError:
revision = 'not-versioned'
finally:
devnull.close()
os.chdir(cwd)
return revision
class Timer:
"""Simple logging timer. Initialize with a printf like logging function."""
def __init__(self, logger=lambda x: None):
self._logger = logger
self._start = 0
def start(self, message=None, *args):
self._start = time.time()
if message:
self._logger(message, *args)
def stop(self, message=None, *args):
self._stop = time.time()
self._dt = self._stop - self._start
if message:
self._logger(message + ' (%.2fs)', *(args + (self._dt,)))
return self._dt
def interact():
"""Start interactive mode. Useful for debugging.
"""
import code
code.interact(local=locals())
def search_files(root_dir, extension):
# Input:
# - root directory where to start a recursive search of yjson files
# - file extension to search from the root
# Output:
# - list of absolute filepaths
files = []
if not os.path.isabs(root_dir):
root_dir = os.path.abspath(root_dir)
for dirpath, _, filenames in os.walk(root_dir):
for filename in fnmatch.filter(filenames, "*" + extension):
files.append(os.path.join(dirpath, filename))
return files
def uncompress_gzip_file(gzip_file, out_dir):
# This is python2.6 compliant, gzip.open doesn't support 'with' statement
# Input:
# - gzip file path
# - output directory where uncompress the file
# Output:
# - path of the uncompressed file
# NOTE: the file is permanently created, is responsibility of the
# caller to delete it
uncompressed_path = None
uncompressed_fd = None
compressed_fd = None
try:
# the uncompressed filename loses its final extension
# (for example abc.gz -> abc)
uncompressed_path = os.path.join(
out_dir,
os.path.splitext(gzip_file)[0],
)
uncompressed_fd = open(uncompressed_path, 'wb')
compressed_fd = gzip.open(gzip_file, 'rb')
uncompressed_fd.write(compressed_fd.read())
return uncompressed_path
except IOError as exc:
# delete the uncompressed file (if exists)
if uncompressed_path is not None and os.path.exists(uncompressed_path):
os.remove(uncompressed_path)
raise exc
finally:
if compressed_fd is not None:
compressed_fd.close()
if uncompressed_fd is not None:
uncompressed_fd.close()
def run_process(cmd, cwd=None, logfile=None):
# Input:
# - command to execute
# - current working directory to cd before running the cmd
# - logfile where to dump stdout/stderr
# Output:
# - exitcode of the executed process
p = subprocess.Popen(
cmd,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
if logfile:
write_cmd_streams_to_file(logfile, cmd=cmd, out=out, err=err)
return p.returncode
def invoke_function_with_callbacks(
func,
args,
on_terminate=None,
on_exception=None):
try:
res = func(*args)
if on_terminate:
on_terminate(res)
return res
except Exception as exc:
if on_exception:
return on_exception(exc)
raise
def create_json_report(out_dir):
csv_report_filename = os.path.join(out_dir, CSV_REPORT_FILENAME)
json_report_filename = os.path.join(out_dir, JSON_REPORT_FILENAME)
rows = []
with open(csv_report_filename, 'r') as file_in:
reader = csv.reader(file_in)
rows = [row for row in reader]
with open(json_report_filename, 'w') as file_out:
headers = rows[0]
issues = rows[1:]
json.dump([dict(zip(headers, row)) for row in issues], file_out)
class AbsolutePathAction(argparse.Action):
"""Convert a path from relative to absolute in the arg parser"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(values))
# vim: set sw=4 ts=4 et:
|
Acidburn0zzz/archiso-gui
|
refs/heads/master
|
releng/root-image/usr/share/cnchi/src/i18n.py
|
1
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2012 Canonical Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gettext
import misc
def utf8(s, errors="strict"):
"""Decode a string as UTF-8 if it isn't already Unicode."""
if isinstance(s, str):
return s
else:
return str(s, "utf-8", errors)
# Returns a tuple of (current language, sorted choices, display map).
def get_languages(language_list="data/languagelist.data.gz", current_language_index=-1, only_installable=False):
import gzip
#import icu
current_language = "English"
if only_installable:
from apt.cache import Cache
#workaround for an issue where euid != uid and the
#apt cache has not yet been loaded causing a SystemError
#when libapt-pkg tries to load the Cache the first time.
with misc.raised_privileges():
cache = Cache()
languagelist = gzip.open(language_list)
language_display_map = {}
i = 0
for line in languagelist:
line = utf8(line)
if line == '' or line == '\n':
continue
code, name, trans = line.strip('\n').split(':')[1:]
if code in ('C', 'dz', 'km'):
i += 1
continue
# KDE fails to round-trip strings containing U+FEFF ZERO WIDTH
# NO-BREAK SPACE, and we don't care about the NBSP anyway, so strip
# it.
# https://bugs.launchpad.net/bugs/1001542
# (comment #5 and on)
trans = trans.strip(" \ufeff")
if only_installable:
pkg_name = 'language-pack-%s' % code
#special case these
if pkg_name.endswith('_CN'):
pkg_name = 'language-pack-zh-hans'
elif pkg_name.endswith('_TW'):
pkg_name = 'language-pack-zh-hant'
elif pkg_name.endswith('_NO'):
pkg_name = pkg_name.split('_NO')[0]
elif pkg_name.endswith('_BR'):
pkg_name = pkg_name.split('_BR')[0]
try:
pkg = cache[pkg_name]
if not (pkg.installed or pkg.candidate):
i += 1
continue
except KeyError:
i += 1
continue
language_display_map[trans] = (name, code)
if i == current_language_index:
current_language = trans
i += 1
languagelist.close()
if only_installable:
del cache
#try:
# Note that we always collate with the 'C' locale. This is far
# from ideal. But proper collation always requires a specific
# language for its collation rules (languages frequently have
# custom sorting). This at least gives us common sorting rules,
# like stripping accents.
#collator = icu.Collator.createInstance(icu.Locale('C'))
#except:
# collator = None
collator = None
def compare_choice(x):
if language_display_map[x][1] == 'C':
return None # place C first
if collator:
try:
return collator.getCollationKey(x).getByteArray()
except:
pass
# Else sort by unicode code point, which isn't ideal either,
# but also has the virtue of sorting like-glyphs together
return x
sorted_choices = sorted(language_display_map, key=compare_choice)
return current_language, sorted_choices, language_display_map
|
USGSDenverPychron/pychron
|
refs/heads/develop
|
pychron/experiment/utilities/identifier.py
|
1
|
# ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
import os
import re
import yaml
# ============= local library imports ==========================
from pychron.file_defaults import IDENTIFIERS_DEFAULT
from pychron.pychron_constants import LINE_STR, ALPHAS
from pychron.paths import paths
IDENTIFIER_REGEX = re.compile(r'(?P<identifier>\d+)-(?P<aliquot>\d+)(?P<step>\w*)')
SPECIAL_IDENTIFIER_REGEX = re.compile(r'(?P<identifier>\w{1,2}-[\d\w]+-\w{1})-(?P<aliquot>\d+)')
ANALYSIS_MAPPING_UNDERSCORE_KEY = dict() # blank_air: ba
ANALYSIS_MAPPING = dict() # ba: 'Blank Air'
NON_EXTRACTABLE = dict() # ba: 'Blank Air'
ANALYSIS_MAPPING_INTS = dict() # blank_air: 0
SPECIAL_MAPPING = dict() # blank_air: ba
SPECIAL_NAMES = ['Special Labnumber', LINE_STR] # 'Blank Air'
SPECIAL_KEYS = [] # ba
# AGE_TESTABLE = []
try:
p = os.path.join(paths.hidden_dir, 'identifiers.yaml')
with open(p, 'r') as rfile:
yd = yaml.load(rfile)
except BaseException:
yd = yaml.load(IDENTIFIERS_DEFAULT)
for i, idn_d in enumerate(yd):
key = idn_d['shortname']
value = idn_d['name']
ANALYSIS_MAPPING[key] = value
underscore_name = value.lower().replace(' ', '_')
ANALYSIS_MAPPING_INTS[underscore_name] = i
ANALYSIS_MAPPING_UNDERSCORE_KEY[underscore_name] = key
if not idn_d['extractable']:
NON_EXTRACTABLE[key] = value
# if idn_d['ageable']:
# AGE_TESTABLE.append(value.lower())
if idn_d['special']:
SPECIAL_MAPPING[underscore_name] = key
SPECIAL_NAMES.append(value)
SPECIAL_KEYS.append(key)
# ANALYSIS_MAPPING = dict(ba='Blank Air', bc='Blank Cocktail', bu='Blank Unknown',
# bg='Background', u='Unknown', c='Cocktail', a='Air',
# pa='Pause', ic='Detector IC')
#
# ANALYSIS_MAPPING_INTS = dict(unknown=0, background=1,
# air=2, cocktail=3,
# blank_air=4,
# blank_cocktail=5,
# blank_unknown=6,
# detector_ic=7)
#
#
# # "labnumbers" where extract group is disabled
# NON_EXTRACTABLE = dict(ba='Blank Air', bc='Blank Cocktail', bu='Blank Unknown',
# bg='Background', c='Cocktail', a='Air', ic='Detector IC', be='Blank ExtractionLine')
#
# AGE_TESTABLE = ('unknown','cocktail')
# SPECIAL_NAMES = ['Special Labnumber', LINE_STR, 'Air', 'Cocktail', 'Blank Unknown',
# 'Blank Air', 'Blank Cocktail', 'Background', 'Pause', 'Degas', 'Detector IC']
#
# SPECIAL_MAPPING = dict(background='bg',
# blank_air='ba',
# blank_cocktail='bc',
# blank_unknown='bu',
# pause='pa',
# degas='dg',
# detector_ic='ic',
# air='a',
# cocktail='c',
# unknown='u')
#
# p = os.path.join(paths.setup_dir, 'identifiers.yaml')
# differed = []
# if os.path.isfile(p):
# with open(p, 'r') as rfile:
# yd = yaml.load(rfile)
# for i, (k, v) in enumerate(yd.items()):
# ANALYSIS_MAPPING[k] = v
#
# #if : assume '01:Value' where 01 is used for preserving order
# if ':' in v:
# a, v = v.split(':')
# c = int(a)
# differed.append((c, v))
# ANALYSIS_MAPPING_INTS[v.lower()] = 7 + c
# else:
# SPECIAL_NAMES.append(v)
# ANALYSIS_MAPPING_INTS[v.lower()] = 7 + i
# SPECIAL_MAPPING[v.lower()] = k
#
# if differed:
# ds = sorted(differed, key=lambda x: x[0])
# SPECIAL_NAMES.extend([di[1] for di in ds])
#
# SPECIAL_KEYS = map(str.lower, SPECIAL_MAPPING.values())
def convert_identifier_to_int(ln):
m = {'ba': 1, 'bc': 2, 'bu': 3, 'bg': 4, 'u': 5, 'c': 6, 'ic': 7}
try:
return int(ln)
except ValueError:
return m[ln]
def convert_special_name(name, output='shortname'):
"""
input name output shortname
name='Background'
returns:
if output=='shortname'
return 'bg'
else
return 4 #identifier
"""
if isinstance(name, str):
name = name.lower()
name = name.replace(' ', '_')
if name in SPECIAL_MAPPING:
sn = SPECIAL_MAPPING[name]
if output == 'labnumber':
sn = convert_identifier(sn)
return sn
else:
return name
def convert_identifier(identifier):
"""
old:
identifier=='bg, a, ...'
return 1
identifier== bu-FD-J, 51234, 13212-01
return bu-FD-J, 51234, 13212
"""
if '-' in identifier:
ln = identifier.split('-')[0]
try:
ln = int(ln)
identifier = str(ln)
except ValueError:
return identifier
# identifier=identifier.split('-')[0]
# if identifier in ANALYSIS_MAPPING:
# sname = ANALYSIS_MAPPING[identifier]
# identifier = next((k for k, v in SPECIAL_IDS.iteritems() if v == sname), identifier)
return identifier
def get_analysis_type(idn):
"""
idn: str like 'a-...' or '43513'
"""
idn = idn.lower()
for atype, tag in SPECIAL_MAPPING.iteritems():
if idn.startswith(tag):
return atype
else:
return 'unknown'
# if idn.startswith('bg'):
# return 'background'
# elif idn.startswith('ba'):
# return 'blank_air'
# elif idn.startswith('bc'):
# return 'blank_cocktail'
# elif idn.startswith('b'):
# return 'blank_unknown'
# elif idn.startswith('a'):
# return 'air'
# elif idn.startswith('c'):
# return 'cocktail'
# elif idn.startswith('dg'):
# return 'degas'
# elif idn.startswith('pa'):
# return 'pause'
# else:
# return 'unknown'
def make_runid(ln, a, s=''):
_as = make_aliquot_step(a, s)
return '{}-{}'.format(ln, _as)
def strip_runid(r):
l, x = r.split('-')
a = ''
for i, xi in enumerate(x):
a += xi
try:
int(a)
except ValueError:
a = x[:i]
s = x[i:]
break
else:
s = ''
return l, int(a), s
def make_step(s):
if isinstance(s, (float, int, long)):
s = ALPHAS[int(s)]
return s or ''
def make_aliquot_step(a, s):
if not isinstance(a, str):
a = '{:02d}'.format(int(a))
s = make_step(s)
return '{}{}'.format(a, s)
def make_identifier(ln, ed, ms):
try:
_ = int(ln)
return ln
except ValueError:
return make_special_identifier(ln, ed, ms)
def make_standard_identifier(ln, modifier, ms, aliquot=None):
"""
ln: str or int
a: int
modifier: str or int. if int zero pad
ms: int or str
"""
if isinstance(ms, int):
ms = '{:02d}'.format(ms)
try:
modifier = '{:02d}'.format(modifier)
except ValueError:
pass
d = '{}-{}-{}'.format(ln, modifier, ms)
if aliquot:
d = '{}-{:02d}'.format(d, aliquot)
return d
def make_special_identifier(ln, ed, ms, aliquot=None):
"""
ln: str or int
a: int aliquot
ms: int mass spectrometer id
ed: int extract device id
"""
if isinstance(ed, int):
ed = '{:02d}'.format(ed)
if isinstance(ms, int):
ms = '{:02d}'.format(ms)
d = '{}-{}-{}'.format(ln, ed, ms)
if aliquot:
if not isinstance(aliquot, str):
aliquot = '{:02d}'.format(aliquot)
d = '{}-{}'.format(d, aliquot)
return d
def make_rid(ln, a, step=''):
"""
if ln can be converted to integer return runid
else return ln-a
"""
try:
_ = int(ln)
return make_runid(ln, a, step)
except ValueError:
if not isinstance(a, str):
a = '{:02d}'.format(a)
return '{}-{}'.format(ln, a)
def is_special(ln):
special = False
if '-' in ln:
special = ln.split('-')[0] in ANALYSIS_MAPPING
return special
# return make_special_identifier(ln, ed, ms, aliquot=a)
# ===============================================================================
# deprecated
# ===============================================================================
# SPECIAL_IDS = {1: 'Blank Air', 2: 'Blank Cocktail', 3: 'Blank Unknown',
# 4: 'Background', 5: 'Air', 6: 'Cocktail'
# }
# # @deprecated
# def convert_labnumber(ln):
# """
# ln is a str but only special labnumbers cannot be converted to int
# convert number to name
#
# """
# try:
# ln = int(ln)
#
# if ln in SPECIAL_IDS:
# ln = SPECIAL_IDS[ln]
# except ValueError:
# pass
#
# return ln
#
#
# # @deprecated
# def convert_shortname(ln):
# """
# convert number to shortname (a for air, bg for background...)
# """
# name = convert_labnumber(ln)
# if name is not None:
# ln = next((k for k, v in ANALYSIS_MAPPING.iteritems()
# if v == name), ln)
# return ln
def convert_extract_device(name):
"""
change Fusions UV to FusionsUV, etc
"""
n = ''
if name:
n = name.replace(' ', '')
return n
def pretty_extract_device(ident):
"""
change fusions_uv to Fusions UV, etc
"""
n = ''
if ident:
args = ident.split('_')
if args[-1] in ('uv, co2'):
n = ' '.join(map(str.capitalize, args[:-1]))
n = '{} {}'.format(n, args[-1].upper())
else:
n = ' '.join(map(str.capitalize, args))
# n=ident.replace(' ', '_')
return n
# ============= EOF =============================================
|
fairbird/OpenPLI-BlackHole
|
refs/heads/master
|
lib/python/Screens/Wizard.py
|
1
|
from Screen import Screen
from Screens.HelpMenu import HelpableScreen
from Screens.MessageBox import MessageBox
from Components.config import config, ConfigText, ConfigPassword, KEY_LEFT, KEY_RIGHT, KEY_HOME, KEY_END, KEY_0, KEY_DELETE, KEY_BACKSPACE, KEY_OK, KEY_TOGGLEOW, KEY_ASCII, KEY_TIMEOUT, KEY_NUMBERS
from Components.Label import Label
from Components.Sources.StaticText import StaticText
from Components.Slider import Slider
from Components.ActionMap import NumberActionMap
from Components.MenuList import MenuList
from Components.ConfigList import ConfigList
from Components.Sources.List import List
from enigma import eTimer, eEnv
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
class WizardSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent)
self["text"] = StaticText("")
self.onShow.append(self.setCallback)
def setCallback(self):
self.parent.setLCDTextCallback(self.setText)
def setText(self, text):
self["text"].setText(text)
class Wizard(Screen):
instance = None
def createSummary(self):
print "WizardCreateSummary"
return WizardSummary
class parseWizard(ContentHandler):
def __init__(self, wizard):
self.isPointsElement, self.isReboundsElement = 0, 0
self.wizard = wizard
self.currContent = ""
self.lastStep = 0
def startElement(self, name, attrs):
#print "startElement", name
self.currContent = name
if (name == "step"):
self.lastStep += 1
if attrs.has_key('id'):
id = str(attrs.get('id'))
else:
id = ""
#print "id:", id
if attrs.has_key('nextstep'):
nextstep = str(attrs.get('nextstep'))
else:
nextstep = None
if attrs.has_key('timeout'):
timeout = int(attrs.get('timeout'))
else:
timeout = None
if attrs.has_key('timeoutaction'):
timeoutaction = str(attrs.get('timeoutaction'))
else:
timeoutaction = 'nextpage'
if attrs.has_key('timeoutstep'):
timeoutstep = str(attrs.get('timeoutstep'))
else:
timeoutstep = ''
self.wizard[self.lastStep] = {"id": id, "condition": "", "text": "", "timeout": timeout, "timeoutaction": timeoutaction, "timeoutstep": timeoutstep, "list": [], "config": {"screen": None, "args": None, "type": "" }, "code": "", "codeafter": "", "code_async": "", "codeafter_async": "", "nextstep": nextstep}
if attrs.has_key('laststep'):
self.wizard[self.lastStep]["laststep"] = str(attrs.get('laststep'))
elif (name == "text"):
self.wizard[self.lastStep]["text"] = str(attrs.get('value')).replace("\\n", "\n")
elif (name == "displaytext"):
self.wizard[self.lastStep]["displaytext"] = str(attrs.get('value')).replace("\\n", "\n")
elif (name == "list"):
if (attrs.has_key('type')):
if attrs["type"] == "dynamic":
self.wizard[self.lastStep]["dynamiclist"] = attrs.get("source")
#self.wizard[self.lastStep]["list"].append(("Hallo", "test"))
if (attrs.has_key("evaluation")):
#print "evaluation"
self.wizard[self.lastStep]["listevaluation"] = attrs.get("evaluation")
if (attrs.has_key("onselect")):
self.wizard[self.lastStep]["onselect"] = attrs.get("onselect")
elif (name == "listentry"):
self.wizard[self.lastStep]["list"].append((str(attrs.get('caption')), str(attrs.get('step'))))
elif (name == "config"):
type = str(attrs.get('type'))
self.wizard[self.lastStep]["config"]["type"] = type
if type == "ConfigList" or type == "standalone":
try:
exec "from Screens." + str(attrs.get('module')) + " import *"
except:
exec "from " + str(attrs.get('module')) + " import *"
self.wizard[self.lastStep]["config"]["screen"] = eval(str(attrs.get('screen')))
if (attrs.has_key('args')):
#print "has args"
self.wizard[self.lastStep]["config"]["args"] = str(attrs.get('args'))
elif type == "dynamic":
self.wizard[self.lastStep]["config"]["source"] = str(attrs.get('source'))
if (attrs.has_key('evaluation')):
self.wizard[self.lastStep]["config"]["evaluation"] = str(attrs.get('evaluation'))
elif (name == "code"):
self.async_code = attrs.has_key('async') and str(attrs.get('async')) == "yes"
if attrs.has_key('pos') and str(attrs.get('pos')) == "after":
self.codeafter = True
else:
self.codeafter = False
elif (name == "condition"):
pass
def endElement(self, name):
self.currContent = ""
if name == 'code':
if self.async_code:
if self.codeafter:
self.wizard[self.lastStep]["codeafter_async"] = self.wizard[self.lastStep]["codeafter_async"].strip()
else:
self.wizard[self.lastStep]["code_async"] = self.wizard[self.lastStep]["code_async"].strip()
else:
if self.codeafter:
self.wizard[self.lastStep]["codeafter"] = self.wizard[self.lastStep]["codeafter"].strip()
else:
self.wizard[self.lastStep]["code"] = self.wizard[self.lastStep]["code"].strip()
elif name == 'condition':
self.wizard[self.lastStep]["condition"] = self.wizard[self.lastStep]["condition"].strip()
elif name == 'step':
#print "Step number", self.lastStep, ":", self.wizard[self.lastStep]
pass
def characters(self, ch):
if self.currContent == "code":
if self.async_code:
if self.codeafter:
self.wizard[self.lastStep]["codeafter_async"] = self.wizard[self.lastStep]["codeafter_async"] + ch
else:
self.wizard[self.lastStep]["code_async"] = self.wizard[self.lastStep]["code_async"] + ch
else:
if self.codeafter:
self.wizard[self.lastStep]["codeafter"] = self.wizard[self.lastStep]["codeafter"] + ch
else:
self.wizard[self.lastStep]["code"] = self.wizard[self.lastStep]["code"] + ch
elif self.currContent == "condition":
self.wizard[self.lastStep]["condition"] = self.wizard[self.lastStep]["condition"] + ch
def __init__(self, session, showSteps = True, showStepSlider = True, showList = True, showConfig = True):
Screen.__init__(self, session)
self.isLastWizard = False # can be used to skip a "goodbye"-screen in a wizard
self.stepHistory = []
self.wizard = {}
parser = make_parser()
if not isinstance(self.xmlfile, list):
self.xmlfile = [self.xmlfile]
print "Reading ", self.xmlfile
wizardHandler = self.parseWizard(self.wizard)
parser.setContentHandler(wizardHandler)
for xmlfile in self.xmlfile:
if xmlfile[0] != '/':
parser.parse(eEnv.resolve('${datadir}/enigma2/') + xmlfile)
else:
parser.parse(xmlfile)
self.showSteps = showSteps
self.showStepSlider = showStepSlider
self.showList = showList
self.showConfig = showConfig
self.numSteps = len(self.wizard)
self.currStep = self.getStepWithID("start") + 1
self.timeoutTimer = eTimer()
self.timeoutTimer.callback.append(self.timeoutCounterFired)
self["text"] = Label()
if showConfig:
self["config"] = ConfigList([], session = session)
if self.showSteps:
self["step"] = Label()
if self.showStepSlider:
self["stepslider"] = Slider(1, self.numSteps)
if self.showList:
self.list = []
self["list"] = List(self.list, enableWrapAround = True)
self["list"].onSelectionChanged.append(self.selChanged)
#self["list"] = MenuList(self.list, enableWrapAround = True)
self.onShown.append(self.updateValues)
self.configInstance = None
self.currentConfigIndex = None
Wizard.instance = self
self.lcdCallbacks = []
self.disableKeys = False
self["actions"] = NumberActionMap(["WizardActions", "NumberActions", "ColorActions", "SetupActions", "InputAsciiActions", "KeyboardInputActions"],
{
"gotAsciiCode": self.keyGotAscii,
"ok": self.ok,
"back": self.back,
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down,
"red": self.red,
"green": self.green,
"yellow": self.yellow,
"blue":self.blue,
"deleteBackward": self.deleteBackward,
"deleteForward": self.deleteForward,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
self["VirtualKB"] = NumberActionMap(["VirtualKeyboardActions"],
{
"showVirtualKeyboard": self.KeyText,
}, -2)
self["VirtualKB"].setEnabled(False)
def red(self):
print "red"
pass
def green(self):
print "green"
pass
def yellow(self):
print "yellow"
pass
def blue(self):
print "blue"
pass
def deleteForward(self):
self.resetCounter()
if (self.wizard[self.currStep]["config"]["screen"] != None):
self.configInstance.keyDelete()
elif (self.wizard[self.currStep]["config"]["type"] == "dynamic"):
self["config"].handleKey(KEY_DELETE)
print "deleteForward"
def deleteBackward(self):
self.resetCounter()
if (self.wizard[self.currStep]["config"]["screen"] != None):
self.configInstance.keyBackspace()
elif (self.wizard[self.currStep]["config"]["type"] == "dynamic"):
self["config"].handleKey(KEY_BACKSPACE)
print "deleteBackward"
def setLCDTextCallback(self, callback):
self.lcdCallbacks.append(callback)
def back(self):
if self.disableKeys:
return
print "getting back..."
print "stepHistory:", self.stepHistory
if len(self.stepHistory) > 1:
self.currStep = self.stepHistory[-2]
self.stepHistory = self.stepHistory[:-2]
else:
self.session.openWithCallback(self.exitWizardQuestion, MessageBox, (_("Are you sure you want to exit this wizard?") ) )
if self.currStep < 1:
self.currStep = 1
print "currStep:", self.currStep
print "new stepHistory:", self.stepHistory
self.updateValues()
print "after updateValues stepHistory:", self.stepHistory
def exitWizardQuestion(self, ret = False):
if (ret):
self.markDone()
self.exit()
def markDone(self):
pass
def exit(self):
Wizard.instance = None
self.close()
def getStepWithID(self, id):
print "getStepWithID:", id
count = 0
for x in self.wizard.keys():
if self.wizard[x]["id"] == id:
print "result:", count
return count
count += 1
print "result: nothing"
return 0
def finished(self, gotoStep = None, *args, **kwargs):
print "finished"
currStep = self.currStep
if self.updateValues not in self.onShown:
self.onShown.append(self.updateValues)
if self.showConfig:
if self.wizard[currStep]["config"]["type"] == "dynamic":
eval("self." + self.wizard[currStep]["config"]["evaluation"])()
if self.showList:
if (len(self.wizard[currStep]["evaluatedlist"]) > 0):
print "current:", self["list"].current
nextStep = self["list"].current[1]
if (self.wizard[currStep].has_key("listevaluation")):
exec("self." + self.wizard[self.currStep]["listevaluation"] + "('" + nextStep + "')")
else:
self.currStep = self.getStepWithID(nextStep)
print_now = True
if ((currStep == self.numSteps and self.wizard[currStep]["nextstep"] is None) or self.wizard[currStep]["id"] == "end"): # wizard finished
print "wizard finished"
self.markDone()
self.exit()
else:
self.codeafter = True
self.runCode(self.wizard[currStep]["codeafter"])
self.prevStep = currStep
self.gotoStep = gotoStep
if not self.runCode(self.wizard[currStep]["codeafter_async"]):
self.afterAsyncCode()
else:
if self.updateValues in self.onShown:
self.onShown.remove(self.updateValues)
if print_now:
print "Now: " + str(self.currStep)
def ok(self):
print "OK"
if self.disableKeys:
return
currStep = self.currStep
if self.showConfig:
if (self.wizard[currStep]["config"]["screen"] != None):
# TODO: don't die, if no run() is available
# there was a try/except here, but i can't see a reason
# for this. If there is one, please do a more specific check
# and/or a comment in which situation there is no run()
if callable(getattr(self.configInstance, "runAsync", None)):
if self.updateValues in self.onShown:
self.onShown.remove(self.updateValues)
self.configInstance.runAsync(self.finished)
return
else:
self.configInstance.run()
self.finished()
def keyNumberGlobal(self, number):
if (self.wizard[self.currStep]["config"]["screen"] != None):
self.configInstance.keyNumberGlobal(number)
elif (self.wizard[self.currStep]["config"]["type"] == "dynamic"):
self["config"].handleKey(KEY_0 + number)
def keyGotAscii(self):
if (self.wizard[self.currStep]["config"]["screen"] != None):
self["config"].handleKey(KEY_ASCII)
elif (self.wizard[self.currStep]["config"]["type"] == "dynamic"):
self["config"].handleKey(KEY_ASCII)
def left(self):
self.resetCounter()
if (self.wizard[self.currStep]["config"]["screen"] != None):
self.configInstance.keyLeft()
elif (self.wizard[self.currStep]["config"]["type"] == "dynamic"):
self["config"].handleKey(KEY_LEFT)
print "left"
def right(self):
self.resetCounter()
if (self.wizard[self.currStep]["config"]["screen"] != None):
self.configInstance.keyRight()
elif (self.wizard[self.currStep]["config"]["type"] == "dynamic"):
self["config"].handleKey(KEY_RIGHT)
print "right"
def up(self):
self.resetCounter()
if (self.showConfig and self.wizard[self.currStep]["config"]["screen"] != None or self.wizard[self.currStep]["config"]["type"] == "dynamic"):
self["config"].instance.moveSelection(self["config"].instance.moveUp)
self.handleInputHelpers()
elif (self.showList and len(self.wizard[self.currStep]["evaluatedlist"]) > 0):
self["list"].selectPrevious()
if self.wizard[self.currStep].has_key("onselect"):
print "current:", self["list"].current
self.selection = self["list"].current[-1]
#self.selection = self.wizard[self.currStep]["evaluatedlist"][self["list"].l.getCurrentSelectionIndex()][1]
exec("self." + self.wizard[self.currStep]["onselect"] + "()")
print "up"
def down(self):
self.resetCounter()
if (self.showConfig and self.wizard[self.currStep]["config"]["screen"] != None or self.wizard[self.currStep]["config"]["type"] == "dynamic"):
self["config"].instance.moveSelection(self["config"].instance.moveDown)
self.handleInputHelpers()
elif (self.showList and len(self.wizard[self.currStep]["evaluatedlist"]) > 0):
#self["list"].instance.moveSelection(self["list"].instance.moveDown)
self["list"].selectNext()
if self.wizard[self.currStep].has_key("onselect"):
print "current:", self["list"].current
#self.selection = self.wizard[self.currStep]["evaluatedlist"][self["list"].l.getCurrentSelectionIndex()][1]
#exec("self." + self.wizard[self.currStep]["onselect"] + "()")
self.selection = self["list"].current[-1]
#self.selection = self.wizard[self.currStep]["evaluatedlist"][self["list"].l.getCurrentSelectionIndex()][1]
exec("self." + self.wizard[self.currStep]["onselect"] + "()")
print "down"
def selChanged(self):
self.resetCounter()
if (self.showConfig and self.wizard[self.currStep]["config"]["screen"] != None):
self["config"].instance.moveSelection(self["config"].instance.moveUp)
elif (self.showList and len(self.wizard[self.currStep]["evaluatedlist"]) > 0):
if self.wizard[self.currStep].has_key("onselect"):
self.selection = self["list"].current[-1]
print "self.selection:", self.selection
exec("self." + self.wizard[self.currStep]["onselect"] + "()")
def resetCounter(self):
self.timeoutCounter = self.wizard[self.currStep]["timeout"]
def runCode(self, code):
if code != "":
print "code", code
exec(code)
return True
return False
def getTranslation(self, text):
return _(text)
def updateText(self, firstset = False):
text = self.getTranslation(self.wizard[self.currStep]["text"])
if "[timeout]" in text:
text = text.replace("[timeout]", str(self.timeoutCounter))
self["text"].setText(text)
else:
if firstset:
self["text"].setText(text)
def updateValues(self):
print "Updating values in step " + str(self.currStep)
# calling a step which doesn't exist can only happen if the condition in the last step is not fulfilled
# if a non-existing step is called, end the wizard
if self.currStep > len(self.wizard):
self.markDone()
self.exit()
return
self.timeoutTimer.stop()
if self.configInstance is not None:
# remove callbacks
self.configInstance["config"].onSelectionChanged = []
del self.configInstance["config"]
self.configInstance.doClose()
self.configInstance = None
self.condition = True
exec (self.wizard[self.currStep]["condition"])
if not self.condition:
print "keys*******************:", self.wizard[self.currStep].keys()
if self.wizard[self.currStep].has_key("laststep"): # exit wizard, if condition of laststep doesn't hold
self.markDone()
self.exit()
return
else:
self.currStep += 1
self.updateValues()
else:
if self.wizard[self.currStep].has_key("displaytext"):
displaytext = self.wizard[self.currStep]["displaytext"]
print "set LCD text"
for x in self.lcdCallbacks:
x(displaytext)
if len(self.stepHistory) == 0 or self.stepHistory[-1] != self.currStep:
self.stepHistory.append(self.currStep)
print "wizard step:", self.wizard[self.currStep]
if self.showSteps:
self["step"].setText(self.getTranslation("Step ") + str(self.currStep) + "/" + str(self.numSteps))
if self.showStepSlider:
self["stepslider"].setValue(self.currStep)
if self.wizard[self.currStep]["timeout"] is not None:
self.resetCounter()
self.timeoutTimer.start(1000)
print "wizard text", self.getTranslation(self.wizard[self.currStep]["text"])
self.updateText(firstset = True)
if self.wizard[self.currStep].has_key("displaytext"):
displaytext = self.wizard[self.currStep]["displaytext"]
print "set LCD text"
for x in self.lcdCallbacks:
x(displaytext)
self.codeafter=False
self.runCode(self.wizard[self.currStep]["code"])
if self.runCode(self.wizard[self.currStep]["code_async"]):
if self.updateValues in self.onShown:
self.onShown.remove(self.updateValues)
else:
self.afterAsyncCode()
def afterAsyncCode(self):
if not self.updateValues in self.onShown:
self.onShown.append(self.updateValues)
if self.codeafter:
if self.wizard[self.prevStep]["nextstep"] is not None:
self.currStep = self.getStepWithID(self.wizard[self.prevStep]["nextstep"])
if self.gotoStep is not None:
self.currStep = self.getStepWithID(self.gotoStep)
self.currStep += 1
self.updateValues()
print "Now: " + str(self.currStep)
else:
if self.showList:
print "showing list,", self.currStep
for renderer in self.renderer:
rootrenderer = renderer
while renderer.source is not None:
print "self.list:", self["list"]
if renderer.source is self["list"]:
print "setZPosition"
rootrenderer.instance.setZPosition(1)
renderer = renderer.source
#self["list"].instance.setZPosition(1)
self.list = []
if (self.wizard[self.currStep].has_key("dynamiclist")):
print "dynamic list, calling", self.wizard[self.currStep]["dynamiclist"]
newlist = eval("self." + self.wizard[self.currStep]["dynamiclist"] + "()")
#self.wizard[self.currStep]["evaluatedlist"] = []
for entry in newlist:
#self.wizard[self.currStep]["evaluatedlist"].append(entry)
self.list.append(entry)
#del self.wizard[self.currStep]["dynamiclist"]
if (len(self.wizard[self.currStep]["list"]) > 0):
#self["list"].instance.setZPosition(2)
for x in self.wizard[self.currStep]["list"]:
self.list.append((self.getTranslation(x[0]), x[1]))
self.wizard[self.currStep]["evaluatedlist"] = self.list
self["list"].list = self.list
self["list"].index = 0
else:
self["list"].hide()
if self.showConfig:
print "showing config"
self["config"].instance.setZPosition(1)
if self.wizard[self.currStep]["config"]["type"] == "dynamic":
print "config type is dynamic"
self["config"].instance.setZPosition(2)
self["config"].l.setList(eval("self." + self.wizard[self.currStep]["config"]["source"])())
elif (self.wizard[self.currStep]["config"]["screen"] != None):
if self.wizard[self.currStep]["config"]["type"] == "standalone":
print "Type is standalone"
self.session.openWithCallback(self.ok, self.wizard[self.currStep]["config"]["screen"])
else:
self["config"].instance.setZPosition(2)
print "wizard screen", self.wizard[self.currStep]["config"]["screen"]
if self.wizard[self.currStep]["config"]["args"] == None:
self.configInstance = self.session.instantiateDialog(self.wizard[self.currStep]["config"]["screen"])
else:
self.configInstance = self.session.instantiateDialog(self.wizard[self.currStep]["config"]["screen"], eval(self.wizard[self.currStep]["config"]["args"]))
self.configInstance.setAnimationMode(0)
self["config"].l.setList(self.configInstance["config"].list)
callbacks = self.configInstance["config"].onSelectionChanged
self.configInstance["config"].destroy()
print "clearConfigList", self.configInstance["config"], self["config"]
self.configInstance["config"] = self["config"]
self.configInstance["config"].onSelectionChanged = callbacks
print "clearConfigList", self.configInstance["config"], self["config"]
self["config"].setCurrentIndex(0)
else:
self["config"].l.setList([])
self.handleInputHelpers()
else:
if self.has_key("config"):
self["config"].hide()
def timeoutCounterFired(self):
self.timeoutCounter -= 1
print "timeoutCounter:", self.timeoutCounter
if self.timeoutCounter == 0:
if self.wizard[self.currStep]["timeoutaction"] == "selectnext":
print "selection next item"
self.down()
else:
if self.wizard[self.currStep]["timeoutaction"] == "changestep":
self.finished(gotoStep = self.wizard[self.currStep]["timeoutstep"])
self.updateText()
def handleInputHelpers(self):
if self["config"].getCurrent() is not None:
if isinstance(self["config"].getCurrent()[1], ConfigText) or isinstance(self["config"].getCurrent()[1], ConfigPassword):
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(True)
self["VKeyIcon"].boolean = True
if self.has_key("HelpWindow"):
if self["config"].getCurrent()[1].help_window.instance is not None:
helpwindowpos = self["HelpWindow"].getPosition()
from enigma import ePoint
self["config"].getCurrent()[1].help_window.instance.move(ePoint(helpwindowpos[0],helpwindowpos[1]))
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
else:
if self.has_key("VKeyIcon"):
self["VirtualKB"].setEnabled(False)
self["VKeyIcon"].boolean = False
def KeyText(self):
from Screens.VirtualKeyBoard import VirtualKeyBoard
self.currentConfigIndex = self["config"].getCurrentIndex()
self.session.openWithCallback(self.VirtualKeyBoardCallback, VirtualKeyBoard, title = self["config"].getCurrent()[0], text = self["config"].getCurrent()[1].getValue())
def VirtualKeyBoardCallback(self, callback = None):
if callback is not None and len(callback):
if isinstance(self["config"].getCurrent()[1], ConfigText) or isinstance(self["config"].getCurrent()[1], ConfigPassword):
if self.has_key("HelpWindow"):
if self["config"].getCurrent()[1].help_window.instance is not None:
helpwindowpos = self["HelpWindow"].getPosition()
from enigma import ePoint
self["config"].getCurrent()[1].help_window.instance.move(ePoint(helpwindowpos[0],helpwindowpos[1]))
self["config"].instance.moveSelectionTo(self.currentConfigIndex)
self["config"].setCurrentIndex(self.currentConfigIndex)
self["config"].getCurrent()[1].setValue(callback)
self["config"].invalidate(self["config"].getCurrent())
class WizardManager:
def __init__(self):
self.wizards = []
def registerWizard(self, wizard, precondition, priority = 0):
self.wizards.append((wizard, precondition, priority))
def getWizards(self):
# x[1] is precondition
for wizard in self.wizards:
wizard[0].isLastWizard = False
if len(self.wizards) > 0:
self.wizards[-1][0].isLastWizard = True
return [(x[2], x[0]) for x in self.wizards if x[1] == 1]
wizardManager = WizardManager()
|
rohitwaghchaure/alec_frappe5_erpnext
|
refs/heads/develop
|
erpnext/stock/doctype/delivery_note/delivery_note.py
|
2
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cint
from frappe import msgprint, _
import frappe.defaults
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class DeliveryNote(SellingController):
def __init__(self, arg1, arg2=None):
super(DeliveryNote, self).__init__(arg1, arg2)
self.status_updater = [{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_field': 'delivered_qty',
'target_parent_dt': 'Sales Order',
'target_parent_field': 'per_delivered',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'against_sales_order',
'status_field': 'delivery_status',
'keyword': 'Delivered',
'second_source_dt': 'Sales Invoice Item',
'second_source_field': 'qty',
'second_join_field': 'so_detail',
'overflow_type': 'delivery',
'second_source_extra_cond': """ and exists(select name from `tabSales Invoice`
where name=`tabSales Invoice Item`.parent and ifnull(update_stock, 0) = 1)"""
},
{
'source_dt': 'Delivery Note Item',
'target_dt': 'Sales Invoice Item',
'join_field': 'si_detail',
'target_field': 'delivered_qty',
'target_parent_dt': 'Sales Invoice',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'against_sales_invoice',
'overflow_type': 'delivery'
}]
def onload(self):
billed_qty = frappe.db.sql("""select sum(ifnull(qty, 0)) from `tabSales Invoice Item`
where docstatus=1 and delivery_note=%s""", self.name)
if billed_qty:
total_qty = sum((item.qty for item in self.get("items")))
self.get("__onload").billing_complete = (billed_qty[0][0] == total_qty)
def before_print(self):
def toggle_print_hide(meta, fieldname):
df = meta.get_field(fieldname)
if self.get("print_without_amount"):
df.set("__print_hide", 1)
else:
df.delete_key("__print_hide")
item_meta = frappe.get_meta("Delivery Note Item")
print_hide_fields = {
"parent": ["grand_total", "rounded_total", "in_words", "currency", "net_total"],
"items": ["rate", "amount", "price_list_rate", "discount_percentage"]
}
for key, fieldname in print_hide_fields.items():
for f in fieldname:
toggle_print_hide(self.meta if key == "parent" else item_meta, f)
def set_actual_qty(self):
for d in self.get('items'):
if d.item_code and d.warehouse:
actual_qty = frappe.db.sql("""select actual_qty from `tabBin`
where item_code = %s and warehouse = %s""", (d.item_code, d.warehouse))
d.actual_qty = actual_qty and flt(actual_qty[0][0]) or 0
def so_required(self):
"""check in manage account if sales order required or not"""
if not self.is_return and frappe.db.get_value("Selling Settings", None, 'so_required') == 'Yes':
for d in self.get('items'):
if not d.against_sales_order:
frappe.throw(_("Sales Order required for Item {0}").format(d.item_code))
def validate(self):
super(DeliveryNote, self).validate()
self.set_status()
self.so_required()
self.validate_proj_cust()
self.check_stop_sales_order("against_sales_order")
self.validate_for_items()
self.validate_warehouse()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_with_previous_doc()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self, 'items')
self.update_current_stock()
if not self.installation_status: self.installation_status = 'Not Installed'
def validate_with_previous_doc(self):
for fn in (("Sales Order", "against_sales_order", "so_detail"),
("Sales Invoice", "against_sales_invoice", "si_detail")):
if filter(None, [getattr(d, fn[1], None) for d in self.get("items")]):
super(DeliveryNote, self).validate_with_previous_doc({
fn[0]: {
"ref_dn_field": fn[1],
"compare_fields": [["customer", "="], ["company", "="], ["project_name", "="],
["currency", "="]],
},
})
if cint(frappe.db.get_single_value('Selling Settings', 'maintain_same_sales_rate')):
self.validate_rate_with_reference_doc([["Sales Order", "sales_order", "so_detail"],
["Sales Invoice", "sales_invoice", "si_detail"]])
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.project_name and self.customer:
res = frappe.db.sql("""select name from `tabProject`
where name = %s and (customer = %s or
ifnull(customer,'')='')""", (self.project_name, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project_name))
def validate_for_items(self):
check_list, chk_dupl_itm = [], []
for d in self.get('items'):
e = [d.item_code, d.description, d.warehouse, d.against_sales_order or d.against_sales_invoice, d.batch_no or '']
f = [d.item_code, d.description, d.against_sales_order or d.against_sales_invoice]
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 1:
if e in check_list:
msgprint(_("Note: Item {0} entered multiple times").format(d.item_code))
else:
check_list.append(e)
else:
if f in chk_dupl_itm:
msgprint(_("Note: Item {0} entered multiple times").format(d.item_code))
else:
chk_dupl_itm.append(f)
def validate_warehouse(self):
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == 1:
if not d['warehouse']:
frappe.throw(_("Warehouse required for stock Item {0}").format(d["item_code"]))
def update_current_stock(self):
if self.get("_action") and self._action != "update_after_submit":
for d in self.get('items'):
d.actual_qty = frappe.db.get_value("Bin", {"item_code": d.item_code,
"warehouse": d.warehouse}, "actual_qty")
for d in self.get('packed_items'):
bin_qty = frappe.db.get_value("Bin", {"item_code": d.item_code,
"warehouse": d.warehouse}, ["actual_qty", "projected_qty"], as_dict=True)
if bin_qty:
d.actual_qty = flt(bin_qty.actual_qty)
d.projected_qty = flt(bin_qty.projected_qty)
def on_submit(self):
self.validate_packed_qty()
# Check for Approving Authority
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total, self)
if not self.is_return:
# update delivered qty in sales order
self.update_prevdoc_status()
self.check_credit_limit()
self.update_stock_ledger()
self.make_gl_entries()
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
self.check_stop_sales_order("against_sales_order")
self.check_next_docstatus()
if not self.is_return:
self.update_prevdoc_status()
self.update_stock_ledger()
frappe.db.set(self, 'status', 'Cancelled')
self.cancel_packing_slips()
self.make_gl_entries_on_cancel()
def validate_packed_qty(self):
"""
Validate that if packed qty exists, it should be equal to qty
"""
if not any([flt(d.get('packed_qty')) for d in self.get("items")]):
return
has_error = False
for d in self.get("items"):
if flt(d.get('qty')) != flt(d.get('packed_qty')):
frappe.msgprint(_("Packed quantity must equal quantity for Item {0} in row {1}").format(d.item_code, d.idx))
has_error = True
if has_error:
raise frappe.ValidationError
def check_next_docstatus(self):
submit_rv = frappe.db.sql("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.delivery_note = %s and t1.docstatus = 1""",
(self.name))
if submit_rv:
frappe.throw(_("Sales Invoice {0} has already been submitted").format(submit_rv[0][0]))
submit_in = frappe.db.sql("""select t1.name
from `tabInstallation Note` t1, `tabInstallation Note Item` t2
where t1.name = t2.parent and t2.prevdoc_docname = %s and t1.docstatus = 1""",
(self.name))
if submit_in:
frappe.throw(_("Installation Note {0} has already been submitted").format(submit_in[0][0]))
def cancel_packing_slips(self):
"""
Cancel submitted packing slips related to this delivery note
"""
res = frappe.db.sql("""SELECT name FROM `tabPacking Slip` WHERE delivery_note = %s
AND docstatus = 1""", self.name)
if res:
for r in res:
ps = frappe.get_doc('Packing Slip', r[0])
ps.cancel()
frappe.msgprint(_("Packing Slip(s) cancelled"))
def update_stock_ledger(self):
sl_entries = []
for d in self.get_item_list():
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 1 \
and d.warehouse and flt(d['qty']):
self.update_reserved_qty(d)
incoming_rate = 0
if cint(self.is_return) and self.return_against and self.docstatus==1:
incoming_rate = self.get_incoming_rate_for_sales_return(d.item_code, self.return_against)
sl_entries.append(self.get_sl_entries(d, {
"actual_qty": -1*flt(d['qty']),
"incoming_rate": incoming_rate
}))
self.make_sl_entries(sl_entries)
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["title"] = _("My Shipments")
return list_context
def get_invoiced_qty_map(delivery_note):
"""returns a map: {dn_detail: invoiced_qty}"""
invoiced_qty_map = {}
for dn_detail, qty in frappe.db.sql("""select dn_detail, qty from `tabSales Invoice Item`
where delivery_note=%s and docstatus=1""", delivery_note):
if not invoiced_qty_map.get(dn_detail):
invoiced_qty_map[dn_detail] = 0
invoiced_qty_map[dn_detail] += qty
return invoiced_qty_map
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
invoiced_qty_map = get_invoiced_qty_map(source_name)
def update_accounts(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
if len(target.get("items")) == 0:
frappe.throw(_("All these items have already been invoiced"))
target.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.qty = source_doc.qty - invoiced_qty_map.get(source_doc.name, 0)
doc = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Delivery Note Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "dn_detail",
"parent": "delivery_note",
"so_detail": "so_detail",
"against_sales_order": "sales_order",
"serial_no": "serial_no"
},
"postprocess": update_item,
"filter": lambda d: d.qty - invoiced_qty_map.get(d.name, 0)<=0
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"field_map": {
"incentives": "incentives"
},
"add_if_empty": True
}
}, target_doc, update_accounts)
return doc
@frappe.whitelist()
def make_installation_note(source_name, target_doc=None):
def update_item(obj, target, source_parent):
target.qty = flt(obj.qty) - flt(obj.installed_qty)
target.serial_no = obj.serial_no
doclist = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Installation Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Delivery Note Item": {
"doctype": "Installation Note Item",
"field_map": {
"name": "prevdoc_detail_docname",
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype",
},
"postprocess": update_item,
"condition": lambda doc: doc.installed_qty < doc.qty
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_packing_slip(source_name, target_doc=None):
doclist = get_mapped_doc("Delivery Note", source_name, {
"Delivery Note": {
"doctype": "Packing Slip",
"field_map": {
"name": "delivery_note",
"letter_head": "letter_head"
},
"validation": {
"docstatus": ["=", 0]
}
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_sales_return(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Delivery Note", source_name, target_doc)
|
etherkit/OpenBeacon2
|
refs/heads/master
|
client/linux-x86/venv/lib/python3.8/site-packages/serial/tools/list_ports_windows.py
|
12
|
#! python
#
# Enumerate serial ports on Windows including a human readable description
# and hardware information.
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2001-2016 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
# pylint: disable=invalid-name,too-few-public-methods
import re
import ctypes
from ctypes.wintypes import BOOL
from ctypes.wintypes import HWND
from ctypes.wintypes import DWORD
from ctypes.wintypes import WORD
from ctypes.wintypes import LONG
from ctypes.wintypes import ULONG
from ctypes.wintypes import HKEY
from ctypes.wintypes import BYTE
import serial
from serial.win32 import ULONG_PTR
from serial.tools import list_ports_common
def ValidHandle(value, func, arguments):
if value == 0:
raise ctypes.WinError()
return value
NULL = 0
HDEVINFO = ctypes.c_void_p
LPCTSTR = ctypes.c_wchar_p
PCTSTR = ctypes.c_wchar_p
PTSTR = ctypes.c_wchar_p
LPDWORD = PDWORD = ctypes.POINTER(DWORD)
#~ LPBYTE = PBYTE = ctypes.POINTER(BYTE)
LPBYTE = PBYTE = ctypes.c_void_p # XXX avoids error about types
ACCESS_MASK = DWORD
REGSAM = ACCESS_MASK
class GUID(ctypes.Structure):
_fields_ = [
('Data1', DWORD),
('Data2', WORD),
('Data3', WORD),
('Data4', BYTE * 8),
]
def __str__(self):
return "{{{:08x}-{:04x}-{:04x}-{}-{}}}".format(
self.Data1,
self.Data2,
self.Data3,
''.join(["{:02x}".format(d) for d in self.Data4[:2]]),
''.join(["{:02x}".format(d) for d in self.Data4[2:]]),
)
class SP_DEVINFO_DATA(ctypes.Structure):
_fields_ = [
('cbSize', DWORD),
('ClassGuid', GUID),
('DevInst', DWORD),
('Reserved', ULONG_PTR),
]
def __str__(self):
return "ClassGuid:{} DevInst:{}".format(self.ClassGuid, self.DevInst)
PSP_DEVINFO_DATA = ctypes.POINTER(SP_DEVINFO_DATA)
PSP_DEVICE_INTERFACE_DETAIL_DATA = ctypes.c_void_p
setupapi = ctypes.windll.LoadLibrary("setupapi")
SetupDiDestroyDeviceInfoList = setupapi.SetupDiDestroyDeviceInfoList
SetupDiDestroyDeviceInfoList.argtypes = [HDEVINFO]
SetupDiDestroyDeviceInfoList.restype = BOOL
SetupDiClassGuidsFromName = setupapi.SetupDiClassGuidsFromNameW
SetupDiClassGuidsFromName.argtypes = [PCTSTR, ctypes.POINTER(GUID), DWORD, PDWORD]
SetupDiClassGuidsFromName.restype = BOOL
SetupDiEnumDeviceInfo = setupapi.SetupDiEnumDeviceInfo
SetupDiEnumDeviceInfo.argtypes = [HDEVINFO, DWORD, PSP_DEVINFO_DATA]
SetupDiEnumDeviceInfo.restype = BOOL
SetupDiGetClassDevs = setupapi.SetupDiGetClassDevsW
SetupDiGetClassDevs.argtypes = [ctypes.POINTER(GUID), PCTSTR, HWND, DWORD]
SetupDiGetClassDevs.restype = HDEVINFO
SetupDiGetClassDevs.errcheck = ValidHandle
SetupDiGetDeviceRegistryProperty = setupapi.SetupDiGetDeviceRegistryPropertyW
SetupDiGetDeviceRegistryProperty.argtypes = [HDEVINFO, PSP_DEVINFO_DATA, DWORD, PDWORD, PBYTE, DWORD, PDWORD]
SetupDiGetDeviceRegistryProperty.restype = BOOL
SetupDiGetDeviceInstanceId = setupapi.SetupDiGetDeviceInstanceIdW
SetupDiGetDeviceInstanceId.argtypes = [HDEVINFO, PSP_DEVINFO_DATA, PTSTR, DWORD, PDWORD]
SetupDiGetDeviceInstanceId.restype = BOOL
SetupDiOpenDevRegKey = setupapi.SetupDiOpenDevRegKey
SetupDiOpenDevRegKey.argtypes = [HDEVINFO, PSP_DEVINFO_DATA, DWORD, DWORD, DWORD, REGSAM]
SetupDiOpenDevRegKey.restype = HKEY
advapi32 = ctypes.windll.LoadLibrary("Advapi32")
RegCloseKey = advapi32.RegCloseKey
RegCloseKey.argtypes = [HKEY]
RegCloseKey.restype = LONG
RegQueryValueEx = advapi32.RegQueryValueExW
RegQueryValueEx.argtypes = [HKEY, LPCTSTR , LPDWORD, LPDWORD, LPBYTE, LPDWORD]
RegQueryValueEx.restype = LONG
DIGCF_PRESENT = 2
DIGCF_DEVICEINTERFACE = 16
INVALID_HANDLE_VALUE = 0
ERROR_INSUFFICIENT_BUFFER = 122
SPDRP_HARDWAREID = 1
SPDRP_FRIENDLYNAME = 12
SPDRP_LOCATION_PATHS = 35
SPDRP_MFG = 11
DICS_FLAG_GLOBAL = 1
DIREG_DEV = 0x00000001
KEY_READ = 0x20019
def iterate_comports():
"""Return a generator that yields descriptions for serial ports"""
GUIDs = (GUID * 8)() # so far only seen one used, so hope 8 are enough...
guids_size = DWORD()
if not SetupDiClassGuidsFromName(
"Ports",
GUIDs,
ctypes.sizeof(GUIDs),
ctypes.byref(guids_size)):
raise ctypes.WinError()
# repeat for all possible GUIDs
for index in range(guids_size.value):
bInterfaceNumber = None
g_hdi = SetupDiGetClassDevs(
ctypes.byref(GUIDs[index]),
None,
NULL,
DIGCF_PRESENT) # was DIGCF_PRESENT|DIGCF_DEVICEINTERFACE which misses CDC ports
devinfo = SP_DEVINFO_DATA()
devinfo.cbSize = ctypes.sizeof(devinfo)
index = 0
while SetupDiEnumDeviceInfo(g_hdi, index, ctypes.byref(devinfo)):
index += 1
# get the real com port name
hkey = SetupDiOpenDevRegKey(
g_hdi,
ctypes.byref(devinfo),
DICS_FLAG_GLOBAL,
0,
DIREG_DEV, # DIREG_DRV for SW info
KEY_READ)
port_name_buffer = ctypes.create_unicode_buffer(250)
port_name_length = ULONG(ctypes.sizeof(port_name_buffer))
RegQueryValueEx(
hkey,
"PortName",
None,
None,
ctypes.byref(port_name_buffer),
ctypes.byref(port_name_length))
RegCloseKey(hkey)
# unfortunately does this method also include parallel ports.
# we could check for names starting with COM or just exclude LPT
# and hope that other "unknown" names are serial ports...
if port_name_buffer.value.startswith('LPT'):
continue
# hardware ID
szHardwareID = ctypes.create_unicode_buffer(250)
# try to get ID that includes serial number
if not SetupDiGetDeviceInstanceId(
g_hdi,
ctypes.byref(devinfo),
#~ ctypes.byref(szHardwareID),
szHardwareID,
ctypes.sizeof(szHardwareID) - 1,
None):
# fall back to more generic hardware ID if that would fail
if not SetupDiGetDeviceRegistryProperty(
g_hdi,
ctypes.byref(devinfo),
SPDRP_HARDWAREID,
None,
ctypes.byref(szHardwareID),
ctypes.sizeof(szHardwareID) - 1,
None):
# Ignore ERROR_INSUFFICIENT_BUFFER
if ctypes.GetLastError() != ERROR_INSUFFICIENT_BUFFER:
raise ctypes.WinError()
# stringify
szHardwareID_str = szHardwareID.value
info = list_ports_common.ListPortInfo(port_name_buffer.value)
# in case of USB, make a more readable string, similar to that form
# that we also generate on other platforms
if szHardwareID_str.startswith('USB'):
m = re.search(r'VID_([0-9a-f]{4})(&PID_([0-9a-f]{4}))?(&MI_(\d{2}))?(\\(\w+))?', szHardwareID_str, re.I)
if m:
info.vid = int(m.group(1), 16)
if m.group(3):
info.pid = int(m.group(3), 16)
if m.group(5):
bInterfaceNumber = int(m.group(5))
if m.group(7):
info.serial_number = m.group(7)
# calculate a location string
loc_path_str = ctypes.create_unicode_buffer(250)
if SetupDiGetDeviceRegistryProperty(
g_hdi,
ctypes.byref(devinfo),
SPDRP_LOCATION_PATHS,
None,
ctypes.byref(loc_path_str),
ctypes.sizeof(loc_path_str) - 1,
None):
m = re.finditer(r'USBROOT\((\w+)\)|#USB\((\w+)\)', loc_path_str.value)
location = []
for g in m:
if g.group(1):
location.append('{:d}'.format(int(g.group(1)) + 1))
else:
if len(location) > 1:
location.append('.')
else:
location.append('-')
location.append(g.group(2))
if bInterfaceNumber is not None:
location.append(':{}.{}'.format(
'x', # XXX how to determine correct bConfigurationValue?
bInterfaceNumber))
if location:
info.location = ''.join(location)
info.hwid = info.usb_info()
elif szHardwareID_str.startswith('FTDIBUS'):
m = re.search(r'VID_([0-9a-f]{4})\+PID_([0-9a-f]{4})(\+(\w+))?', szHardwareID_str, re.I)
if m:
info.vid = int(m.group(1), 16)
info.pid = int(m.group(2), 16)
if m.group(4):
info.serial_number = m.group(4)
# USB location is hidden by FDTI driver :(
info.hwid = info.usb_info()
else:
info.hwid = szHardwareID_str
# friendly name
szFriendlyName = ctypes.create_unicode_buffer(250)
if SetupDiGetDeviceRegistryProperty(
g_hdi,
ctypes.byref(devinfo),
SPDRP_FRIENDLYNAME,
#~ SPDRP_DEVICEDESC,
None,
ctypes.byref(szFriendlyName),
ctypes.sizeof(szFriendlyName) - 1,
None):
info.description = szFriendlyName.value
#~ else:
# Ignore ERROR_INSUFFICIENT_BUFFER
#~ if ctypes.GetLastError() != ERROR_INSUFFICIENT_BUFFER:
#~ raise IOError("failed to get details for %s (%s)" % (devinfo, szHardwareID.value))
# ignore errors and still include the port in the list, friendly name will be same as port name
# manufacturer
szManufacturer = ctypes.create_unicode_buffer(250)
if SetupDiGetDeviceRegistryProperty(
g_hdi,
ctypes.byref(devinfo),
SPDRP_MFG,
#~ SPDRP_DEVICEDESC,
None,
ctypes.byref(szManufacturer),
ctypes.sizeof(szManufacturer) - 1,
None):
info.manufacturer = szManufacturer.value
yield info
SetupDiDestroyDeviceInfoList(g_hdi)
def comports(include_links=False):
"""Return a list of info objects about serial ports"""
return list(iterate_comports())
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# test
if __name__ == '__main__':
for port, desc, hwid in sorted(comports()):
print("{}: {} [{}]".format(port, desc, hwid))
|
prmtl/fuel-web
|
refs/heads/master
|
fuel_agent/fuel_agent/tests/test_nailgun.py
|
2
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mock
from oslotest import base as test_base
import yaml
from fuel_agent.drivers import nailgun
from fuel_agent import errors
from fuel_agent.objects import image
from fuel_agent.utils import hardware as hu
from fuel_agent.utils import utils
CEPH_JOURNAL = {
"partition_guid": "45b0969e-9b03-4f30-b4c6-b4b80ceff106",
"name": "cephjournal",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 0
}
CEPH_DATA = {
"partition_guid": "4fbd7e29-9d25-41b8-afd0-062c0ceff05d",
"name": "ceph",
"mount": "none",
"disk_label": "",
"type": "partition",
"file_system": "none",
"size": 3333
}
PROVISION_SAMPLE_DATA = {
"profile": "pro_fi-le",
"name_servers_search": "\"domain.tld\"",
"uid": "1",
"interfaces": {
"eth2": {
"static": "0",
"mac_address": "08:00:27:b1:d7:15"
},
"eth1": {
"static": "0",
"mac_address": "08:00:27:46:43:60"
},
"eth0": {
"ip_address": "10.20.0.3",
"dns_name": "node-1.domain.tld",
"netmask": "255.255.255.0",
"static": "0",
"mac_address": "08:00:27:79:da:80"
}
},
"interfaces_extra": {
"eth2": {
"onboot": "no",
"peerdns": "no"
},
"eth1": {
"onboot": "no",
"peerdns": "no"
},
"eth0": {
"onboot": "yes",
"peerdns": "no"
}
},
"power_type": "ssh",
"power_user": "root",
"kernel_options": {
"udevrules": "08:00:27:79:da:80_eth0,08:00:27:46:43:60_eth1,"
"08:00:27:b1:d7:15_eth2",
"netcfg/choose_interface": "08:00:27:79:da:80"
},
"power_address": "10.20.0.253",
"name_servers": "\"10.20.0.2\"",
"ks_meta": {
"gw": "10.20.0.1",
"image_data": {
"/": {
"uri": "http://fake.host.org:123/imgs/fake_image.img.gz",
"format": "ext4",
"container": "gzip"
}
},
"timezone": "America/Los_Angeles",
"master_ip": "10.20.0.2",
"mco_enable": 1,
"mco_vhost": "mcollective",
"mco_pskey": "unset",
"mco_user": "mcollective",
"puppet_enable": 0,
"fuel_version": "5.0.1",
"install_log_2_syslog": 1,
"mco_password": "marionette",
"puppet_auto_setup": 1,
"puppet_master": "fuel.domain.tld",
"mco_auto_setup": 1,
"auth_key": "fake_auth_key",
"authorized_keys": ["fake_authorized_key1", "fake_authorized_key2"],
"repo_setup": {
"repos": [
{
"name": "repo1",
"type": "deb",
"uri": "uri1",
"suite": "suite",
"section": "section",
"priority": 1001
},
{
"name": "repo2",
"type": "deb",
"uri": "uri2",
"suite": "suite",
"section": "section",
"priority": 1001
}
]
},
"pm_data": {
"kernel_params": "console=ttyS0,9600 console=tty0 rootdelay=90 "
"nomodeset",
"ks_spaces": [
{
"name": "sda",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-"
"b385c7cd",
"disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"mount": "/tmp",
"size": 200,
"type": "partition",
"file_system": "ext2",
"partition_guid": "fake_guid",
"name": "TMP"
},
{
"type": "lvm_meta_pool",
"size": 0
},
{
"size": 19438,
"type": "pv",
"lvm_meta_size": 64,
"vg": "os"
},
{
"size": 45597,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "sda",
"size": 65535
},
{
"name": "sdb",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VBf2923215-"
"708af674",
"disk/by-id/ata-VBOX_HARDDISK_VBf2923215-708af674"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 64971,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "sdb",
"size": 65535
},
{
"name": "sdc",
"extra": [
"disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50ee61eb-"
"84e74fdf",
"disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84e74fdf"
],
"free_space": 64907,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"type": "lvm_meta_pool",
"size": 64
},
{
"size": 0,
"type": "pv",
"lvm_meta_size": 0,
"vg": "os"
},
{
"size": 64971,
"type": "pv",
"lvm_meta_size": 64,
"vg": "image"
}
],
"type": "disk",
"id": "disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0",
"size": 65535
},
{
"_allocate_size": "min",
"label": "Base System",
"min_size": 19374,
"volumes": [
{
"mount": "/",
"size": 15360,
"type": "lv",
"name": "root",
"file_system": "ext4"
},
{
"mount": "swap",
"size": 4014,
"type": "lv",
"name": "swap",
"file_system": "swap"
}
],
"type": "vg",
"id": "os"
},
{
"_allocate_size": "min",
"label": "Zero size volume",
"min_size": 0,
"volumes": [
{
"mount": "none",
"size": 0,
"type": "lv",
"name": "zero_size",
"file_system": "xfs"
}
],
"type": "vg",
"id": "zero_size"
},
{
"_allocate_size": "all",
"label": "Image Storage",
"min_size": 5120,
"volumes": [
{
"mount": "/var/lib/glance",
"size": 175347,
"type": "lv",
"name": "glance",
"file_system": "xfs"
}
],
"type": "vg",
"id": "image"
}
]
},
"mco_connector": "rabbitmq",
"mco_host": "10.20.0.2"
},
"name": "node-1",
"hostname": "node-1.domain.tld",
"slave_name": "node-1",
"power_pass": "/root/.ssh/bootstrap.rsa",
"netboot_enabled": "1"
}
LIST_BLOCK_DEVICES_SAMPLE = [
{'uspec':
{'DEVLINKS': [
'disk/by-id/scsi-SATA_VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB69050467-b385c7cd',
'/dev/disk/by-id/wwn-fake_wwn_1',
'/dev/disk/by-path/pci-0000:00:1f.2-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_1',
'ID_WWN': 'fake_wwn_1',
'DEVPATH': '/devices/pci0000:00/0000:00:1f.2/ata1/host0/'
'target0:0:0/0:0:0:0/block/sda',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sda',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sda',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'
},
'size': 500107862016},
{'uspec':
{'DEVLINKS': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VBf2923215-708af674',
'/dev/disk/by-id/wwn-fake_wwn_2'],
'ID_SERIAL_SHORT': 'fake_serial_2',
'ID_WWN': 'fake_wwn_2',
'DEVPATH': '/devices/pci0000:00/0000:00:3f.2/ata2/host0/'
'target0:0:0/0:0:0:0/block/sdb',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sdb',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'
},
'startsec': '0',
'device': '/dev/sdb',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
{'uspec':
{'DEVLINKS': [
'/dev/disk/by-id/ata-VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/scsi-SATA_VBOX_HARDDISK_VB50ee61eb-84e74fdf',
'/dev/disk/by-id/wwn-fake_wwn_3',
'/dev/disk/by-path/pci-0000:00:0d.0-scsi-0:0:0:0'],
'ID_SERIAL_SHORT': 'fake_serial_3',
'ID_WWN': 'fake_wwn_3',
'DEVPATH': '/devices/pci0000:00/0000:00:0d.0/ata4/host0/target0:0:0/'
'0:0:0:0/block/sdc',
'ID_MODEL': 'fake_id_model',
'DEVNAME': '/dev/sdc',
'MAJOR': '8',
'DEVTYPE': 'disk', 'MINOR': '0', 'ID_BUS': 'ata'},
'startsec': '0',
'device': '/dev/sdc',
'espec': {'state': 'running', 'timeout': '30', 'removable': '0'},
'bspec': {
'sz': '976773168', 'iomin': '4096', 'size64': '500107862016',
'ss': '512', 'ioopt': '0', 'alignoff': '0', 'pbsz': '4096',
'ra': '256', 'ro': '0', 'maxsect': '1024'},
'size': 500107862016},
]
SINGLE_DISK_KS_SPACES = [
{
"name": "sda",
"extra": ["sda"],
"free_space": 1024,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "partition",
"file_system": "ext2",
"name": "Boot"
},
{
"mount": "/",
"size": 200,
"type": "partition",
"file_system": "ext4",
"name": "Root"
},
],
"type": "disk",
"id": "sda",
"size": 102400
}
]
NO_BOOT_KS_SPACES = [
{
"name": "sda",
"extra": ["sda"],
"free_space": 1024,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/",
"size": 200,
"type": "partition",
"file_system": "ext4",
"name": "Root"
},
],
"type": "disk",
"id": "sda",
"size": 102400
}
]
FIRST_DISK_HUGE_KS_SPACES = [
{
"name": "sda",
"extra": ["sda"],
"free_space": 1024,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"mount": "/",
"size": 200,
"type": "partition",
"file_system": "ext4",
"name": "Root"
},
],
"type": "disk",
"id": "sda",
"size": 2097153
},
{
"name": "sdb",
"extra": ["sdb"],
"free_space": 1024,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"mount": "/tmp",
"size": 200,
"type": "partition",
"file_system": "ext2",
"name": "TMP"
},
],
"type": "disk",
"id": "sdb",
"size": 65535
}
]
MANY_HUGE_DISKS_KS_SPACES = [
{
"name": "sda",
"extra": ["sda"],
"free_space": 1024,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"mount": "/",
"size": 200,
"type": "partition",
"file_system": "ext4",
"name": "Root"
},
],
"type": "disk",
"id": "sda",
"size": 2097153
},
{
"name": "sdb",
"extra": ["sdb"],
"free_space": 1024,
"volumes": [
{
"type": "boot",
"size": 300
},
{
"mount": "/boot",
"size": 200,
"type": "raid",
"file_system": "ext2",
"name": "Boot"
},
{
"mount": "/tmp",
"size": 200,
"type": "partition",
"file_system": "ext2",
"name": "TMP"
},
],
"type": "disk",
"id": "sdb",
"size": 2097153
}
]
class TestNailgun(test_base.BaseTestCase):
def test_match_device_by_id_matches(self):
# matches by 'by-id' links
fake_ks_disk = {
"extra": [
"disk/by-id/fake_scsi_matches",
"disk/by-id/fake_ata_dont_matches"
]
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path"
]
}
}
self.assertTrue(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_id_dont_matches_non_empty_extra(self):
# Shouldn't match. If non empty extra present it will match by what is
# presented `extra` field, ignoring the `id` at all. Eg.: on VirtualBox
fake_ks_disk = {
"extra": [
"disk/by-id/fake_scsi_dont_matches",
"disk/by-id/fake_ata_dont_matches"
],
"id": "sdd"
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
}
self.assertFalse(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_id_matches_empty_extra(self):
# since `extra` is empty, it will match by `id`
fake_ks_disk = {
"extra": [],
"id": "sdd"
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
}
self.assertTrue(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_id_matches_missing_extra(self):
# `extra` is empty or just missing entirely, it will match by `id`
fake_ks_disk = {"id": "sdd"}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
}
self.assertTrue(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_dont_macthes(self):
# Mismatches totally
fake_ks_disk = {
"extra": [
"disk/by-id/fake_scsi_dont_matches",
"disk/by-id/fake_ata_dont_matches"
],
"id": "sda"
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/fake_path",
"/dev/sdd"
]
}
}
self.assertFalse(nailgun.match_device(fake_hu_disk, fake_ks_disk))
def test_match_device_dont_macthes_by_id(self):
# disks are different but both of have same `by-path` link.
# it will match by `extra` ignoring `id`
fake_ks_disk = {
"extra": [
"disk/by-id/fake_scsi_dont_matches",
"disk/by-id/fake_ata_dont_matches"
],
"id": "disk/by-path/pci-fake_path"
}
fake_hu_disk = {
"uspec": {
"DEVLINKS": [
"/dev/disk/by-id/fake_scsi_matches",
"/dev/disk/by-path/pci-fake_path",
"/dev/sdd"
]
}
}
self.assertFalse(nailgun.match_device(fake_hu_disk, fake_ks_disk))
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(hu, 'list_block_devices')
def test_configdrive_scheme(self, mock_lbd, mock_http, mock_yaml):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
cd_scheme = nailgun.Nailgun(PROVISION_SAMPLE_DATA).configdrive_scheme
self.assertEqual(['fake_authorized_key1', 'fake_authorized_key2',
'fake_auth_key'], cd_scheme.common.ssh_auth_keys)
self.assertEqual('node-1.domain.tld', cd_scheme.common.hostname)
self.assertEqual('node-1.domain.tld', cd_scheme.common.fqdn)
self.assertEqual('node-1.domain.tld', cd_scheme.common.fqdn)
self.assertEqual('"10.20.0.2"', cd_scheme.common.name_servers)
self.assertEqual('"domain.tld"', cd_scheme.common.search_domain)
self.assertEqual('10.20.0.2', cd_scheme.common.master_ip)
self.assertEqual('http://10.20.0.2:8000/api',
cd_scheme.common.master_url)
self.assertEqual('08:00:27:79:da:80_eth0,08:00:27:46:43:60_eth1,'
'08:00:27:b1:d7:15_eth2', cd_scheme.common.udevrules)
self.assertEqual('08:00:27:79:da:80', cd_scheme.common.admin_mac)
self.assertEqual('10.20.0.3', cd_scheme.common.admin_ip)
self.assertEqual('255.255.255.0', cd_scheme.common.admin_mask)
self.assertEqual('eth0', cd_scheme.common.admin_iface_name)
self.assertEqual('America/Los_Angeles', cd_scheme.common.timezone)
self.assertEqual('fuel.domain.tld', cd_scheme.puppet.master)
self.assertEqual('unset', cd_scheme.mcollective.pskey)
self.assertEqual('mcollective', cd_scheme.mcollective.vhost)
self.assertEqual('10.20.0.2', cd_scheme.mcollective.host)
self.assertEqual('mcollective', cd_scheme.mcollective.user)
self.assertEqual('marionette', cd_scheme.mcollective.password)
self.assertEqual('rabbitmq', cd_scheme.mcollective.connector)
self.assertEqual('pro_fi-le', cd_scheme.profile)
self.assertEqual(
[
{
"name": "repo1",
"type": "deb",
"uri": "uri1",
"suite": "suite",
"section": "section",
"priority": 1001
},
{
"name": "repo2",
"type": "deb",
"uri": "uri2",
"suite": "suite",
"section": "section",
"priority": 1001
}
],
cd_scheme.common.ks_repos)
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(hu, 'list_block_devices')
def test_partition_scheme(self, mock_lbd, mock_http_req, mock_yaml):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(PROVISION_SAMPLE_DATA)
p_scheme = drv.partition_scheme
self.assertEqual(5, len(p_scheme.fss))
self.assertEqual(4, len(p_scheme.pvs))
self.assertEqual(3, len(p_scheme.lvs))
self.assertEqual(2, len(p_scheme.vgs))
self.assertEqual(3, len(p_scheme.parteds))
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(hu, 'list_block_devices')
def test_image_scheme(self, mock_lbd, mock_http_req, mock_yaml):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(PROVISION_SAMPLE_DATA)
p_scheme = drv.partition_scheme
i_scheme = drv.image_scheme
expected_images = []
for fs in p_scheme.fss:
if fs.mount not in PROVISION_SAMPLE_DATA['ks_meta']['image_data']:
continue
i_data = PROVISION_SAMPLE_DATA['ks_meta']['image_data'][fs.mount]
expected_images.append(image.Image(
uri=i_data['uri'],
target_device=fs.device,
format=i_data['format'],
container=i_data['container'],
))
expected_images = sorted(expected_images, key=lambda x: x.uri)
for i, img in enumerate(sorted(i_scheme.images, key=lambda x: x.uri)):
self.assertEqual(img.uri, expected_images[i].uri)
self.assertEqual(img.target_device,
expected_images[i].target_device)
self.assertEqual(img.format,
expected_images[i].format)
self.assertEqual(img.container,
expected_images[i].container)
self.assertIsNone(img.size)
self.assertIsNone(img.md5)
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(hu, 'list_block_devices')
def test_image_scheme_with_checksums(self, mock_lbd, mock_http_req):
fake_image_meta = {'images': [{'raw_md5': 'fakeroot', 'raw_size': 1,
'container_name': 'fake_image.img.gz'}]}
prop_mock = mock.PropertyMock(return_value=yaml.dump(fake_image_meta))
type(mock_http_req.return_value).text = prop_mock
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
p_data = PROVISION_SAMPLE_DATA.copy()
drv = nailgun.Nailgun(p_data)
p_scheme = drv.partition_scheme
i_scheme = drv.image_scheme
mock_http_req.assert_called_once_with(
'http://fake.host.org:123/imgs/fake_image.yaml')
expected_images = []
for fs in p_scheme.fss:
if fs.mount not in PROVISION_SAMPLE_DATA['ks_meta']['image_data']:
continue
i_data = PROVISION_SAMPLE_DATA['ks_meta']['image_data'][fs.mount]
expected_images.append(image.Image(
uri=i_data['uri'],
target_device=fs.device,
format=i_data['format'],
container=i_data['container'],
))
expected_images = sorted(expected_images, key=lambda x: x.uri)
for i, img in enumerate(sorted(i_scheme.images, key=lambda x: x.uri)):
self.assertEqual(img.uri, expected_images[i].uri)
self.assertEqual(img.target_device,
expected_images[i].target_device)
self.assertEqual(img.format,
expected_images[i].format)
self.assertEqual(img.container,
expected_images[i].container)
self.assertEqual(
img.size, fake_image_meta['images'][0]['raw_size'])
self.assertEqual(img.md5, fake_image_meta['images'][0]['raw_md5'])
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(hu, 'list_block_devices')
def test_getlabel(self, mock_lbd, mock_http_req, mock_yaml):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(PROVISION_SAMPLE_DATA)
self.assertEqual('', drv._getlabel(None))
long_label = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.assertEqual(' -L %s ' % long_label[:12],
drv._getlabel(long_label))
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(hu, 'list_block_devices')
def test_disk_dev_not_found(self, mock_lbd, mock_http_req, mock_yaml):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(PROVISION_SAMPLE_DATA)
fake_ks_disk = {
"name": "fake",
"extra": [
"disk/by-id/fake_scsi_matches",
"disk/by-id/fake_ata_dont_matches"
]
}
self.assertRaises(errors.DiskNotFoundError, drv._disk_dev,
fake_ks_disk)
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(hu, 'list_block_devices')
def test_get_partition_count(self, mock_lbd, mock_http_req, mock_yaml):
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(PROVISION_SAMPLE_DATA)
self.assertEqual(3, drv._get_partition_count('Boot'))
self.assertEqual(1, drv._get_partition_count('TMP'))
@mock.patch('yaml.load')
@mock.patch.object(utils, 'init_http_request')
@mock.patch.object(hu, 'list_block_devices')
def test_partition_scheme_ceph(self, mock_lbd, mock_http_req, mock_yaml):
# TODO(agordeev): perform better testing of ceph logic
p_data = copy.deepcopy(PROVISION_SAMPLE_DATA)
for i in range(0, 3):
p_data['ks_meta']['pm_data']['ks_spaces'][i]['volumes'].append(
CEPH_JOURNAL)
p_data['ks_meta']['pm_data']['ks_spaces'][i]['volumes'].append(
CEPH_DATA)
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(p_data)
p_scheme = drv.partition_scheme
self.assertEqual(5, len(p_scheme.fss))
self.assertEqual(4, len(p_scheme.pvs))
self.assertEqual(3, len(p_scheme.lvs))
self.assertEqual(2, len(p_scheme.vgs))
self.assertEqual(3, len(p_scheme.parteds))
self.assertEqual(3, drv._get_partition_count('ceph'))
# NOTE(agordeev): (-2, -1, -1) is the list of ceph data partition
# counts corresponding to (sda, sdb, sdc) disks respectively.
for disk, part in enumerate((-2, -1, -1)):
self.assertEqual(CEPH_DATA['partition_guid'],
p_scheme.parteds[disk].partitions[part].guid)
@mock.patch('fuel_agent.drivers.nailgun.yaml.load')
@mock.patch('fuel_agent.drivers.nailgun.utils.init_http_request')
@mock.patch('fuel_agent.drivers.nailgun.hu.list_block_devices')
def test_grub_centos_26(self, mock_lbd, mock_http_req, mock_yaml):
data = copy.deepcopy(PROVISION_SAMPLE_DATA)
data['profile'] = 'centos'
data['ks_meta']['kernel_lt'] = 0
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(data)
self.assertEqual(drv.grub.kernel_params,
' ' + data['ks_meta']['pm_data']['kernel_params'])
self.assertEqual(drv.grub.kernel_regexp, r'^vmlinuz-2\.6.*')
self.assertEqual(drv.grub.initrd_regexp, r'^initramfs-2\.6.*')
self.assertIsNone(drv.grub.version)
self.assertIsNone(drv.grub.kernel_name)
self.assertIsNone(drv.grub.initrd_name)
@mock.patch('fuel_agent.drivers.nailgun.yaml.load')
@mock.patch('fuel_agent.drivers.nailgun.utils.init_http_request')
@mock.patch('fuel_agent.drivers.nailgun.hu.list_block_devices')
def test_grub_centos_lt(self, mock_lbd, mock_http_req, mock_yaml):
data = copy.deepcopy(PROVISION_SAMPLE_DATA)
data['profile'] = 'centos'
data['ks_meta']['kernel_lt'] = 1
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(data)
self.assertEqual(drv.grub.kernel_params,
' ' + data['ks_meta']['pm_data']['kernel_params'])
self.assertIsNone(drv.grub.kernel_regexp)
self.assertIsNone(drv.grub.initrd_regexp)
self.assertIsNone(drv.grub.version)
self.assertIsNone(drv.grub.kernel_name)
self.assertIsNone(drv.grub.initrd_name)
@mock.patch('fuel_agent.drivers.nailgun.yaml.load')
@mock.patch('fuel_agent.drivers.nailgun.utils.init_http_request')
@mock.patch('fuel_agent.drivers.nailgun.hu.list_block_devices')
def test_grub_ubuntu(self, mock_lbd, mock_http_req, mock_yaml):
data = copy.deepcopy(PROVISION_SAMPLE_DATA)
data['profile'] = 'ubuntu'
data['ks_meta']['kernel_lt'] = 0
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(data)
self.assertEqual(drv.grub.kernel_params,
' ' + data['ks_meta']['pm_data']['kernel_params'])
self.assertIsNone(drv.grub.version)
self.assertIsNone(drv.grub.kernel_regexp)
self.assertIsNone(drv.grub.initrd_regexp)
self.assertIsNone(drv.grub.kernel_name)
self.assertIsNone(drv.grub.initrd_name)
@mock.patch('fuel_agent.drivers.nailgun.yaml.load')
@mock.patch('fuel_agent.drivers.nailgun.utils.init_http_request')
@mock.patch('fuel_agent.drivers.nailgun.hu.list_block_devices')
def test_boot_partition_ok_single_disk(self, mock_lbd,
mock_http_req, mock_yaml):
data = copy.deepcopy(PROVISION_SAMPLE_DATA)
data['ks_meta']['pm_data']['ks_spaces'] = SINGLE_DISK_KS_SPACES
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(data)
self.assertEqual(
drv.partition_scheme.fs_by_mount('/boot').device,
'/dev/sda3')
@mock.patch('fuel_agent.drivers.nailgun.yaml.load')
@mock.patch('fuel_agent.drivers.nailgun.utils.init_http_request')
@mock.patch('fuel_agent.drivers.nailgun.hu.list_block_devices')
def test_boot_partition_ok_many_normal_disks(self, mock_lbd,
mock_http_req, mock_yaml):
data = copy.deepcopy(PROVISION_SAMPLE_DATA)
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(data)
self.assertEqual(
drv.partition_scheme.fs_by_mount('/boot').device,
'/dev/sda3')
@mock.patch('fuel_agent.drivers.nailgun.yaml.load')
@mock.patch('fuel_agent.drivers.nailgun.utils.init_http_request')
@mock.patch('fuel_agent.drivers.nailgun.hu.list_block_devices')
def test_boot_partition_ok_first_disk_huge(self, mock_lbd,
mock_http_req, mock_yaml):
data = copy.deepcopy(PROVISION_SAMPLE_DATA)
data['ks_meta']['pm_data']['ks_spaces'] = FIRST_DISK_HUGE_KS_SPACES
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(data)
self.assertEqual(
drv.partition_scheme.fs_by_mount('/boot').device,
'/dev/sdb3')
@mock.patch('fuel_agent.drivers.nailgun.yaml.load')
@mock.patch('fuel_agent.drivers.nailgun.utils.init_http_request')
@mock.patch('fuel_agent.drivers.nailgun.hu.list_block_devices')
def test_boot_partition_ok_many_huge_disks(self, mock_lbd,
mock_http_req, mock_yaml):
data = copy.deepcopy(PROVISION_SAMPLE_DATA)
data['ks_meta']['pm_data']['ks_spaces'] = MANY_HUGE_DISKS_KS_SPACES
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
drv = nailgun.Nailgun(data)
self.assertEqual(
drv.partition_scheme.fs_by_mount('/boot').device,
'/dev/sda3')
@mock.patch('fuel_agent.drivers.nailgun.yaml.load')
@mock.patch('fuel_agent.drivers.nailgun.utils.init_http_request')
@mock.patch('fuel_agent.drivers.nailgun.hu.list_block_devices')
def test_boot_partition_no_boot(self, mock_lbd,
mock_http_req, mock_yaml):
data = copy.deepcopy(PROVISION_SAMPLE_DATA)
data['ks_meta']['pm_data']['ks_spaces'] = NO_BOOT_KS_SPACES
mock_lbd.return_value = LIST_BLOCK_DEVICES_SAMPLE
self.assertRaises(errors.WrongPartitionSchemeError,
nailgun.Nailgun, data)
|
Comcast/neutron
|
refs/heads/master
|
neutron/tests/__init__.py
|
80
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
|
nmalkin/crawl
|
refs/heads/master
|
hook.py
|
1
|
# This file contains custom hooks for wpull with the following functionality:
# - a domain whitelist
# - a regex-based URL blacklist
# - completion notification
import os
import re
WHITELIST_LOCATION = os.environ.get('WHITELIST', '/data/whitelist.txt')
BLACKLIST_LOCATION = os.environ.get('BLACKLIST', '/data/blacklist.txt')
def load_whitelist():
"""
Load whitelist of allowed domains
"""
whitelist = {}
if os.path.isfile(WHITELIST_LOCATION):
with open(WHITELIST_LOCATION, 'r') as f:
lines = f.readlines()
whitelist = {line.rstrip() for line in lines}
print('Registered whitelist with %d entries' % len(whitelist))
return whitelist
def load_blacklist():
"""
Load regular expressions to exclude URLs
"""
blacklist = {}
if os.path.isfile(BLACKLIST_LOCATION):
with open(BLACKLIST_LOCATION, 'r') as f:
lines = f.readlines()
blacklist = {re.compile(line.rstrip()) for line in lines}
print('Registered blacklist with %d entries' % len(blacklist))
return blacklist
def validate_urls():
"""
Apply rules for URL inclusion/exclusion
"""
whitelist = load_whitelist()
blacklist = load_blacklist()
def accept_url(url_info, record_info, verdict, reasons):
# If our whitelist isn't empty, only allow domains it includes
if len(whitelist) > 0 and url_info['hostname'] not in whitelist:
return False
# Exclude any URL that matches the pattern in the blacklist
for rule in blacklist:
if rule.search(url_info['url']):
return False
# Otherwise, defer to wpull's decision
return verdict
wpull_hook.callbacks.accept_url = accept_url
def completion_hook():
"""
Trigger an optional hook when the crawl completes
For the hook to be triggered, the script's original working directory must
contain a file named "complete.py" (or a module of the same name) with a
function named "on_complete" defined.
on_complete must have the following signature:
on_complete(start_time, end_time, num_urls, bytes_downloaded)
"""
if os.path.isfile('../complete.py'):
import sys
sys.path.append('..')
import complete
wpull_hook.callbacks.finishing_statistics = complete.on_complete
print('Registered completion hook')
validate_urls()
completion_hook()
|
ojengwa/odoo
|
refs/heads/8.0
|
addons/document/__openerp__.py
|
260
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Document Management System',
'version': '2.1',
'category': 'Knowledge Management',
'description': """
This is a complete document management system.
==============================================
* User Authentication
* Document Indexation:- .pptx and .docx files are not supported in Windows platform.
* Dashboard for Document that includes:
* New Files (list)
* Files by Resource Type (graph)
* Files by Partner (graph)
* Files Size by Month (graph)
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com',
'depends': ['knowledge', 'mail'],
'data': [
'security/document_security.xml',
'document_view.xml',
'document_data.xml',
'wizard/document_configuration_view.xml',
'security/ir.model.access.csv',
'report/document_report_view.xml',
'views/document.xml',
],
'demo': [ 'document_demo.xml' ],
'test': ['test/document_test2.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
eliasdesousa/indico
|
refs/heads/master
|
indico/modules/events/papers/controllers/display.py
|
2
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from flask import flash, request, session
from werkzeug.exceptions import Forbidden
from indico.modules.events.papers.controllers.base import RHPaperBase, RHPapersBase
from indico.modules.events.papers.forms import (PaperCommentForm, PaperJudgmentForm, PaperSubmissionForm,
build_review_form)
from indico.modules.events.papers.models.comments import PaperReviewComment
from indico.modules.events.papers.models.files import PaperFile
from indico.modules.events.papers.models.papers import Paper
from indico.modules.events.papers.models.reviews import PaperReview, PaperReviewType, PaperTypeProxy
from indico.modules.events.papers.models.revisions import PaperRevisionState
from indico.modules.events.papers.operations import (create_comment, create_paper_revision, create_review,
delete_comment, judge_paper, reset_paper_state, update_comment,
update_review)
from indico.modules.events.papers.util import (get_contributions_with_paper_submitted_by_user,
get_user_contributions_to_review, get_user_reviewed_contributions,
get_user_submittable_contributions)
from indico.modules.events.papers.views import WPDisplayCallForPapers, WPDisplayReviewingArea, render_paper_page
from indico.util.i18n import _
from indico.web.flask.templating import get_template_module
from indico.web.util import jsonify, jsonify_data, jsonify_form, jsonify_template
class RHSubmitPaper(RHPaperBase):
PAPER_REQUIRED = False
ALLOW_LOCKED = True
def _check_paper_protection(self):
if not RHPaperBase._check_paper_protection(self):
return False
if not self.contribution.is_user_associated(session.user, check_abstract=True):
return False
paper = self.contribution.paper
return paper is None or paper.state == PaperRevisionState.to_be_corrected
def _process(self):
form = PaperSubmissionForm()
if form.validate_on_submit():
if self.paper is None:
paper = Paper(self.contribution)
create_paper_revision(paper, session.user, form.files.data)
return jsonify_data(flash=False)
else:
create_paper_revision(self.paper, session.user, form.files.data)
return jsonify_data(flash=False, html=render_paper_page(self.paper))
return jsonify_form(form, form_header_kwargs={'action': request.relative_url}, disable_if_locked=False)
class RHPaperTimeline(RHPaperBase):
def _process(self):
return render_paper_page(self.paper, view_class=WPDisplayCallForPapers)
def _check_paper_protection(self):
return (self.contribution.is_user_associated(session.user, check_abstract=True) or
self.event.cfp.is_manager(session.user) or
self.paper.can_review(session.user) or
self.paper.can_judge(session.user))
class RHDownloadPaperFile(RHPaperBase):
"""Download a paper file"""
normalize_url_spec = {
'locators': {
lambda self: self.file
}
}
def _process_args(self):
RHPaperBase._process_args(self)
self.file = PaperFile.get_one(request.view_args['file_id'])
def _process(self):
return self.file.send()
class RHSubmitPaperReview(RHPaperBase):
"""Review an paper in a specific reviewing type"""
normalize_url_spec = {
'locators': {
lambda self: self.paper,
lambda self: self.type
}
}
def _check_paper_protection(self):
if self.paper.last_revision.get_reviews(user=session.user, group=self.type.instance):
return False
return self.paper.can_review(session.user, check_state=True)
def _process_args(self):
RHPaperBase._process_args(self)
self.type = PaperTypeProxy(PaperReviewType[request.view_args['review_type']])
def _process(self):
form = build_review_form(self.paper.last_revision, self.type)
if form.validate_on_submit():
create_review(self.paper, self.type, session.user, **form.split_data)
return jsonify_data(flash=False, html=render_paper_page(self.paper))
tpl = get_template_module('events/reviews/forms.html')
return jsonify(html=tpl.render_review_form(form, proposal=self.paper, group=self.type))
class RHEditPaperReview(RHPaperBase):
normalize_url_spec = {
'locators': {
lambda self: self.review
}
}
def _check_paper_protection(self):
return self.review.can_edit(session.user, check_state=True)
def _process_args(self):
RHPaperBase._process_args(self)
self.review = PaperReview.get_one(request.view_args['review_id'])
def _process(self):
form = build_review_form(review=self.review)
if form.validate_on_submit():
update_review(self.review, **form.split_data)
return jsonify_data(flash=False, html=render_paper_page(self.paper))
tpl = get_template_module('events/reviews/forms.html')
return jsonify(html=tpl.render_review_form(form, review=self.review))
class RHSubmitPaperComment(RHPaperBase):
def _check_paper_protection(self):
return self.paper.can_comment(session.user)
def _process(self):
form = PaperCommentForm(paper=self.paper, user=session.user)
if form.validate_on_submit():
visibility = form.visibility.data if form.visibility else None
create_comment(self.paper, form.text.data, visibility, session.user)
return jsonify_data(flash=False, html=render_paper_page(self.paper))
tpl = get_template_module('events/reviews/forms.html')
return jsonify(html=tpl.render_comment_form(form, proposal=self.paper))
class RHPaperCommentBase(RHPaperBase):
normalize_url_spec = {
'locators': {
lambda self: self.comment
}
}
def _process_args(self):
RHPaperBase._process_args(self)
self.comment = PaperReviewComment.get_one(request.view_args['comment_id'], is_deleted=False)
def _check_access(self):
RHPaperBase._check_access(self)
if not self.comment.can_edit(session.user):
raise Forbidden
class RHEditPaperComment(RHPaperCommentBase):
def _process(self):
form = PaperCommentForm(obj=self.comment, paper=self.paper, user=session.user,
prefix='edit-comment-{}-'.format(self.comment.id))
if form.validate_on_submit():
visibility = form.visibility.data if form.visibility else self.comment.visibility
update_comment(self.comment, form.text.data, visibility)
return jsonify_data(flash=False, html=render_paper_page(self.paper))
tpl = get_template_module('events/reviews/forms.html')
return jsonify(html=tpl.render_comment_form(form, proposal=self.paper, comment=self.comment, edit=True))
class RHDeletePaperComment(RHPaperCommentBase):
def _process(self):
delete_comment(self.comment)
return jsonify_data(flash=False)
class RHReviewingArea(RHPapersBase):
def _check_access(self):
if not session.user:
raise Forbidden
if not self.event.cfp.can_access_reviewing_area(session.user):
raise Forbidden
RHPapersBase._check_access(self)
def _process(self):
contribs_to_review = get_user_contributions_to_review(self.event, session.user)
reviewed_contribs = get_user_reviewed_contributions(self.event, session.user)
return WPDisplayReviewingArea.render_template('display/reviewing_area.html', self.event,
contribs_to_review=contribs_to_review,
reviewed_contribs=reviewed_contribs)
class RHJudgePaper(RHPaperBase):
def _check_paper_protection(self):
return self.paper.can_judge(session.user, check_state=True)
def _process(self):
form = PaperJudgmentForm(paper=self.paper)
if form.validate_on_submit():
judge_paper(self.paper, form.judgment.data, form.judgment_comment.data, judge=session.user)
return jsonify_data(flash=False, html=render_paper_page(self.paper))
class RHResetPaperState(RHPaperBase):
def _check_paper_protection(self):
if self.paper.state == PaperRevisionState.submitted:
return False
# managers and judges can always reset
if self.paper.event.can_manage(session.user) or self.paper.can_judge(session.user):
return True
def _process(self):
if self.paper.state != PaperRevisionState.submitted:
reset_paper_state(self.paper)
flash(_("The paper judgment has been reset"), 'success')
return jsonify_data(html=render_paper_page(self.paper))
class RHCallForPapers(RHPapersBase):
"""Show the main CFP page"""
def _check_access(self):
if not session.user:
raise Forbidden
RHPapersBase._check_access(self)
def _process_args(self):
RHPapersBase._process_args(self)
if not session.user:
# _check_access aborts in this case, but the functions below fail with a None user
return
self.papers = set(get_contributions_with_paper_submitted_by_user(self.event, session.user))
contribs = set(get_user_submittable_contributions(self.event, session.user))
self.contribs = contribs - self.papers
def _process(self):
return WPDisplayCallForPapers.render_template('display/call_for_papers.html', self.event,
contributions=self.contribs, papers=self.papers)
class RHSelectContribution(RHCallForPapers):
"""Select a contribution for which the user wants to submit a paper"""
def _process(self):
return jsonify_template('events/papers/display/select_contribution.html', contributions=self.contribs)
|
glessard/swift
|
refs/heads/snapshot
|
test/SourceKit/Inputs/sourcekitd_path_sanitize.py
|
20
|
#!/usr/bin/env python
# sourcekitd_path_sanitize.py - Cleans up paths from sourcekitd-test output
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2019 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
import re
import sys
SWIFTMODULE_BUNDLE_RE = re.compile(
r'key.filepath: ".*[/\\](.*)\.swiftmodule[/\\].*\.swiftmodule"')
SWIFTMODULE_RE = re.compile(r'key.filepath: ".*[/\\](.*)\.swiftmodule"')
SWIFT_RE = re.compile(r'key.filepath: ".*[/\\](.*)\.swift"')
PCM_RE = re.compile(r'key.filepath: ".*[/\\](.*)-[0-9A-Z]*\.pcm"')
HEADER_RE = re.compile(r' file=\\".*[/\\](.*)\.h\\"')
try:
for line in sys.stdin.readlines():
line = re.sub(SWIFTMODULE_BUNDLE_RE,
r'key.filepath: \1.swiftmodule', line)
line = re.sub(SWIFTMODULE_RE, r'key.filepath: \1.swiftmodule', line)
line = re.sub(SWIFT_RE, r'key.filepath: \1.swift', line)
line = re.sub(PCM_RE, r'key.filepath: \1.pcm', line)
line = re.sub(HEADER_RE, r' file=\1.h', line)
sys.stdout.write(line)
except KeyboardInterrupt:
sys.stdout.flush()
|
IV-GII/SocialCookies
|
refs/heads/master
|
ENV1/lib/python2.7/site-packages/django/http/utils.py
|
40
|
"""
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
def fix_IE_for_attach(request, response):
"""
This function will prevent Django from serving a Content-Disposition header
while expecting the browser to cache it (only when the browser is IE). This
leads to IE not allowing the client to download.
"""
useragent = request.META.get('HTTP_USER_AGENT', '').upper()
if 'MSIE' not in useragent and 'CHROMEFRAME' not in useragent:
return response
offending_headers = ('no-cache', 'no-store')
if response.has_header('Content-Disposition'):
try:
del response['Pragma']
except KeyError:
pass
if response.has_header('Cache-Control'):
cache_control_values = [value.strip() for value in
response['Cache-Control'].split(',')
if value.strip().lower() not in offending_headers]
if not len(cache_control_values):
del response['Cache-Control']
else:
response['Cache-Control'] = ', '.join(cache_control_values)
return response
def fix_IE_for_vary(request, response):
"""
This function will fix the bug reported at
http://support.microsoft.com/kb/824847/en-us?spid=8722&sid=global
by clearing the Vary header whenever the mime-type is not safe
enough for Internet Explorer to handle. Poor thing.
"""
useragent = request.META.get('HTTP_USER_AGENT', '').upper()
if 'MSIE' not in useragent and 'CHROMEFRAME' not in useragent:
return response
# These mime-types that are decreed "Vary-safe" for IE:
safe_mime_types = ('text/html', 'text/plain', 'text/sgml')
# The first part of the Content-Type field will be the MIME type,
# everything after ';', such as character-set, can be ignored.
mime_type = response.get('Content-Type', '').partition(';')[0]
if mime_type not in safe_mime_types:
try:
del response['Vary']
except KeyError:
pass
return response
|
sidartaoliveira/ansible
|
refs/heads/devel
|
lib/ansible/plugins/action/net_config.py
|
137
|
#
# Copyright 2015 Peter Sprygada <psprygada@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.normal import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
branto1/ceph-deploy
|
refs/heads/master
|
ceph_deploy/hosts/suse/uninstall.py
|
12
|
def uninstall(distro, purge=False):
packages = [
'ceph',
'ceph-common',
'libcephfs1',
'librados2',
'librbd1',
'ceph-radosgw',
]
distro.packager.remove(packages)
|
junix/powerline
|
refs/heads/develop
|
tests/setup_statusline_catcher.py
|
28
|
# vim:fileencoding=utf-8:noet
import json
import vim
from powerline.lib.unicode import u
_powerline_old_render = powerline.render # NOQA
def _powerline_test_render_function(*args, **kwargs):
ret = _powerline_old_render(*args, **kwargs)
vim.eval('add(g:statusline_values, %s)' % json.dumps(u(ret)))
return ret
powerline.render = _powerline_test_render_function # NOQA
|
mtagle/airflow
|
refs/heads/master
|
airflow/operators/hive_to_druid.py
|
4
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.apache.druid.operators.hive_to_druid`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.apache.druid.operators.hive_to_druid import HiveToDruidTransfer # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.druid.operators.hive_to_druid`.",
DeprecationWarning, stacklevel=2
)
|
darron/dd-agent
|
refs/heads/master
|
tests/checks/mock/test_cacti.py
|
27
|
# stdlib
import logging
import os
import shutil
import unittest
# project
from tests.checks.common import Fixtures, get_check
log = logging.getLogger()
CONFIG = """
init_config:
instances:
- mysql_host: localhost
mysql_user: root
rrd_path: /tmp/cacti_test/rrds
rrd_whitelist: %s
""" % Fixtures.file('whitelist.txt')
class TestCacti(unittest.TestCase):
def setUp(self):
self.tmp_dir = '/tmp/cacti_test'
self.rrd_dir = os.path.join(os.path.dirname(__file__), "cacti")
# Create our temporary RRD path, if needed
try:
os.mkdir(self.tmp_dir)
except Exception:
# Ignore, directory already exists
pass
def tearDown(self):
# Clean up the temp directory
shutil.rmtree(self.tmp_dir)
def _copy_rrds(self, xml_dir):
if os.access("/usr/bin/rrdtool", os.R_OK | os.X_OK):
# Copy the latest RRDs from /var/lib/rra/ to the test location
shutil.copytree("/var/lib/cacti/rra/", os.path.join(self.tmp_dir, 'rrds'))
return True
else:
return False
def testChecks(self):
check, instances = get_check('cacti', CONFIG)
rrd_dir = os.path.join(self.tmp_dir, 'rrds')
# Restore the RRDs from the XML dumps
if not self._copy_rrds(self.rrd_dir):
return
# Do a check to establish the last timestamps
check.check(instances[0])
check.get_metrics()
# Bump the last timestamps back 20 minutes so we have some actual data
twenty_min = 20 * 60
for k,v in check.last_ts.items():
check.last_ts[k] = v - twenty_min
# Do a first check
check.check(instances[0])
results1 = check.get_metrics()
# Check again and make sure no new metrics are picked up
# But we will still have the payload stats
check.check(instances[0])
results2 = check.get_metrics()
last_ts1 = check.last_ts[rrd_dir + '/localhost_hdd_free_10.rrd.AVERAGE']
# Check once more to make sure last_ts ignores None vals when calculating
# where to start from
check.check(instances[0])
results3 = check.get_metrics()
last_ts2 = check.last_ts[rrd_dir + '/localhost_hdd_free_10.rrd.AVERAGE']
self.assertEquals(last_ts1, last_ts2)
metrics = [r[0] for r in results2]
# make sure diagnostic metrics are included
assert 'cacti.metrics.count' in metrics
assert 'cacti.rrd.count' in metrics
assert 'cacti.hosts.count' in metrics
metrics_count = [r for r in results2 if r[0] == 'cacti.metrics.count'][0][2]
hosts_count = [r for r in results2 if r[0] == 'cacti.hosts.count'][0][2]
rrd_count = [r for r in results2 if r[0] == 'cacti.rrd.count'][0][2]
assert metrics_count == 0
assert hosts_count == 1
assert rrd_count == 3
load1 = [m[2] for m in results1 if m[0] == 'system.load.1' and m[2]]
# Make sure some load metrics were returned
assert len(load1) > 0
# Should not have any - not included in the whitelist
current_users = [m[2] for m in results1 if m[0] == 'system.users.current' and m[2]]
self.assertEquals(len(current_users), 0)
disk_used = [m for m in results1 if m[0] == 'system.disk.used' and m[2]]
assert len(disk_used) > 0
# Make sure no None values are picked up
none_metrics = [m[2] for m in results1 if m[2] is None]
self.assertEquals(len(none_metrics), 0)
|
SPKian/Testing
|
refs/heads/master
|
erpnext/config/desktop.py
|
27
|
from __future__ import unicode_literals
from frappe import _
def get_data():
return {
"Accounts": {
"color": "#3498db",
"icon": "octicon octicon-repo",
"type": "module"
},
"Buying": {
"color": "#c0392b",
"icon": "icon-shopping-cart",
"icon": "octicon octicon-briefcase",
"type": "module"
},
"HR": {
"color": "#2ecc71",
"icon": "icon-group",
"icon": "octicon octicon-organization",
"label": _("Human Resources"),
"type": "module"
},
"Manufacturing": {
"color": "#7f8c8d",
"icon": "icon-cogs",
"icon": "octicon octicon-tools",
"type": "module"
},
"POS": {
"color": "#589494",
"icon": "icon-th",
"icon": "octicon octicon-credit-card",
"type": "page",
"link": "pos"
},
"Projects": {
"color": "#8e44ad",
"icon": "icon-puzzle-piece",
"icon": "octicon octicon-rocket",
"type": "module"
},
"Selling": {
"color": "#1abc9c",
"icon": "icon-tag",
"icon": "octicon octicon-tag",
"type": "module"
},
"CRM": {
"color": "#EF4DB6",
"icon": "octicon octicon-broadcast",
"type": "module"
},
"Stock": {
"color": "#f39c12",
"icon": "icon-truck",
"icon": "octicon octicon-package",
"type": "module"
},
"Support": {
"color": "#2c3e50",
"icon": "icon-phone",
"icon": "octicon octicon-issue-opened",
"type": "module"
},
"Learn": {
"color": "#FCB868",
"force_show": True,
"icon": "icon-facetime-video",
"type": "module",
"is_help": True
}
}
|
iychoi/syndicate-core
|
refs/heads/master
|
ms/google/protobuf/json_format.py
|
3
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains routines for printing protocol messages in JSON format.
Simple usage example:
# Create a proto object and serialize it to a json format string.
message = my_proto_pb2.MyMessage(foo='bar')
json_string = json_format.MessageToJson(message)
# Parse a json format string to proto object.
message = json_format.Parse(json_string, my_proto_pb2.MyMessage())
"""
__author__ = 'jieluo@google.com (Jie Luo)'
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict #PY26
import base64
import json
import math
import re
import six
import sys
from operator import methodcaller
from google.protobuf import descriptor
from google.protobuf import symbol_database
_TIMESTAMPFOMAT = '%Y-%m-%dT%H:%M:%S'
_INT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT32,
descriptor.FieldDescriptor.CPPTYPE_UINT32,
descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64])
_INT64_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_INT64,
descriptor.FieldDescriptor.CPPTYPE_UINT64])
_FLOAT_TYPES = frozenset([descriptor.FieldDescriptor.CPPTYPE_FLOAT,
descriptor.FieldDescriptor.CPPTYPE_DOUBLE])
_INFINITY = 'Infinity'
_NEG_INFINITY = '-Infinity'
_NAN = 'NaN'
_UNPAIRED_SURROGATE_PATTERN = re.compile(six.u(
r'[\ud800-\udbff](?![\udc00-\udfff])|(?<![\ud800-\udbff])[\udc00-\udfff]'
))
class Error(Exception):
"""Top-level module error for json_format."""
class SerializeToJsonError(Error):
"""Thrown if serialization to JSON fails."""
class ParseError(Error):
"""Thrown in case of parsing error."""
def MessageToJson(message, including_default_value_fields=False):
"""Converts protobuf message to JSON format.
Args:
message: The protocol buffers message instance to serialize.
including_default_value_fields: If True, singular primitive fields,
repeated fields, and map fields will always be serialized. If
False, only serialize non-empty fields. Singular message fields
and oneof fields are not affected by this option.
Returns:
A string containing the JSON formatted protocol buffer message.
"""
printer = _Printer(including_default_value_fields)
return printer.ToJsonString(message)
def _IsMapEntry(field):
return (field.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
field.message_type.has_options and
field.message_type.GetOptions().map_entry)
class _Printer(object):
"""JSON format printer for protocol message."""
def __init__(self,
including_default_value_fields=False):
self.including_default_value_fields = including_default_value_fields
def ToJsonString(self, message):
js = self._MessageToJsonObject(message)
return json.dumps(js, indent=2)
def _MessageToJsonObject(self, message):
"""Converts message to an object according to Proto3 JSON Specification."""
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
return self._WrapperMessageToJsonObject(message)
if full_name in _WKTJSONMETHODS:
return methodcaller(_WKTJSONMETHODS[full_name][0], message)(self)
js = {}
return self._RegularMessageToJsonObject(message, js)
def _RegularMessageToJsonObject(self, message, js):
"""Converts normal message according to Proto3 JSON Specification."""
fields = message.ListFields()
try:
for field, value in fields:
name = field.camelcase_name
if _IsMapEntry(field):
# Convert a map field.
v_field = field.message_type.fields_by_name['value']
js_map = {}
for key in value:
if isinstance(key, bool):
if key:
recorded_key = 'true'
else:
recorded_key = 'false'
else:
recorded_key = key
js_map[recorded_key] = self._FieldToJsonObject(
v_field, value[key])
js[name] = js_map
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
# Convert a repeated field.
js[name] = [self._FieldToJsonObject(field, k)
for k in value]
else:
js[name] = self._FieldToJsonObject(field, value)
# Serialize default value if including_default_value_fields is True.
if self.including_default_value_fields:
message_descriptor = message.DESCRIPTOR
for field in message_descriptor.fields:
# Singular message fields and oneof fields will not be affected.
if ((field.label != descriptor.FieldDescriptor.LABEL_REPEATED and
field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE) or
field.containing_oneof):
continue
name = field.camelcase_name
if name in js:
# Skip the field which has been serailized already.
continue
if _IsMapEntry(field):
js[name] = {}
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
js[name] = []
else:
js[name] = self._FieldToJsonObject(field, field.default_value)
except ValueError as e:
raise SerializeToJsonError(
'Failed to serialize {0} field: {1}.'.format(field.name, e))
return js
def _FieldToJsonObject(self, field, value):
"""Converts field value according to Proto3 JSON Specification."""
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
return self._MessageToJsonObject(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
return enum_value.name
else:
raise SerializeToJsonError('Enum field contains an integer value '
'which can not mapped to an enum value.')
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
# Use base64 Data encoding for bytes
return base64.b64encode(value).decode('utf-8')
else:
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return bool(value)
elif field.cpp_type in _INT64_TYPES:
return str(value)
elif field.cpp_type in _FLOAT_TYPES:
if math.isinf(value):
if value < 0.0:
return _NEG_INFINITY
else:
return _INFINITY
if math.isnan(value):
return _NAN
return value
def _AnyMessageToJsonObject(self, message):
"""Converts Any message according to Proto3 JSON Specification."""
if not message.ListFields():
return {}
# Must print @type first, use OrderedDict instead of {}
js = OrderedDict()
type_url = message.type_url
js['@type'] = type_url
sub_message = _CreateMessageFromTypeUrl(type_url)
sub_message.ParseFromString(message.value)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
js['value'] = self._WrapperMessageToJsonObject(sub_message)
return js
if full_name in _WKTJSONMETHODS:
js['value'] = methodcaller(_WKTJSONMETHODS[full_name][0],
sub_message)(self)
return js
return self._RegularMessageToJsonObject(sub_message, js)
def _GenericMessageToJsonObject(self, message):
"""Converts message according to Proto3 JSON Specification."""
# Duration, Timestamp and FieldMask have ToJsonString method to do the
# convert. Users can also call the method directly.
return message.ToJsonString()
def _ValueMessageToJsonObject(self, message):
"""Converts Value message according to Proto3 JSON Specification."""
which = message.WhichOneof('kind')
# If the Value message is not set treat as null_value when serialize
# to JSON. The parse back result will be different from original message.
if which is None or which == 'null_value':
return None
if which == 'list_value':
return self._ListValueMessageToJsonObject(message.list_value)
if which == 'struct_value':
value = message.struct_value
else:
value = getattr(message, which)
oneof_descriptor = message.DESCRIPTOR.fields_by_name[which]
return self._FieldToJsonObject(oneof_descriptor, value)
def _ListValueMessageToJsonObject(self, message):
"""Converts ListValue message according to Proto3 JSON Specification."""
return [self._ValueMessageToJsonObject(value)
for value in message.values]
def _StructMessageToJsonObject(self, message):
"""Converts Struct message according to Proto3 JSON Specification."""
fields = message.fields
ret = {}
for key in fields:
ret[key] = self._ValueMessageToJsonObject(fields[key])
return ret
def _WrapperMessageToJsonObject(self, message):
return self._FieldToJsonObject(
message.DESCRIPTOR.fields_by_name['value'], message.value)
def _IsWrapperMessage(message_descriptor):
return message_descriptor.file.name == 'google/protobuf/wrappers.proto'
def _DuplicateChecker(js):
result = {}
for name, value in js:
if name in result:
raise ParseError('Failed to load JSON: duplicate key {0}.'.format(name))
result[name] = value
return result
def _CreateMessageFromTypeUrl(type_url):
# TODO(jieluo): Should add a way that users can register the type resolver
# instead of the default one.
db = symbol_database.Default()
type_name = type_url.split('/')[-1]
try:
message_descriptor = db.pool.FindMessageTypeByName(type_name)
except KeyError:
raise TypeError(
'Can not find message descriptor by type_url: {0}.'.format(type_url))
message_class = db.GetPrototype(message_descriptor)
return message_class()
def Parse(text, message, ignore_unknown_fields=False):
"""Parses a JSON representation of a protocol message into a message.
Args:
text: Message JSON representation.
message: A protocol buffer message to merge into.
ignore_unknown_fields: If True, do not raise errors for unknown fields.
Returns:
The same message passed as argument.
Raises::
ParseError: On JSON parsing problems.
"""
if not isinstance(text, six.text_type): text = text.decode('utf-8')
try:
if sys.version_info < (2, 7):
# object_pair_hook is not supported before python2.7
js = json.loads(text)
else:
js = json.loads(text, object_pairs_hook=_DuplicateChecker)
except ValueError as e:
raise ParseError('Failed to load JSON: {0}.'.format(str(e)))
parser = _Parser(ignore_unknown_fields)
parser.ConvertMessage(js, message)
return message
_INT_OR_FLOAT = six.integer_types + (float,)
class _Parser(object):
"""JSON format parser for protocol message."""
def __init__(self,
ignore_unknown_fields):
self.ignore_unknown_fields = ignore_unknown_fields
def ConvertMessage(self, value, message):
"""Convert a JSON object into a message.
Args:
value: A JSON object.
message: A WKT or regular protocol message to record the data.
Raises:
ParseError: In case of convert problems.
"""
message_descriptor = message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value, message)
elif full_name in _WKTJSONMETHODS:
methodcaller(_WKTJSONMETHODS[full_name][1], value, message)(self)
else:
self._ConvertFieldValuePair(value, message)
def _ConvertFieldValuePair(self, js, message):
"""Convert field value pairs into regular message.
Args:
js: A JSON object to convert the field value pairs.
message: A regular protocol message to record the data.
Raises:
ParseError: In case of problems converting.
"""
names = []
message_descriptor = message.DESCRIPTOR
for name in js:
try:
field = message_descriptor.fields_by_camelcase_name.get(name, None)
if not field:
if self.ignore_unknown_fields:
continue
raise ParseError(
'Message type "{0}" has no field named "{1}".'.format(
message_descriptor.full_name, name))
if name in names:
raise ParseError('Message type "{0}" should not have multiple '
'"{1}" fields.'.format(
message.DESCRIPTOR.full_name, name))
names.append(name)
# Check no other oneof field is parsed.
if field.containing_oneof is not None:
oneof_name = field.containing_oneof.name
if oneof_name in names:
raise ParseError('Message type "{0}" should not have multiple '
'"{1}" oneof fields.'.format(
message.DESCRIPTOR.full_name, oneof_name))
names.append(oneof_name)
value = js[name]
if value is None:
message.ClearField(field.name)
continue
# Parse field value.
if _IsMapEntry(field):
message.ClearField(field.name)
self._ConvertMapFieldValue(value, message, field)
elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
message.ClearField(field.name)
if not isinstance(value, list):
raise ParseError('repeated field {0} must be in [] which is '
'{1}.'.format(name, value))
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
# Repeated message field.
for item in value:
sub_message = getattr(message, field.name).add()
# None is a null_value in Value.
if (item is None and
sub_message.DESCRIPTOR.full_name != 'google.protobuf.Value'):
raise ParseError('null is not allowed to be used as an element'
' in a repeated field.')
self.ConvertMessage(item, sub_message)
else:
# Repeated scalar field.
for item in value:
if item is None:
raise ParseError('null is not allowed to be used as an element'
' in a repeated field.')
getattr(message, field.name).append(
_ConvertScalarFieldValue(item, field))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
sub_message = getattr(message, field.name)
self.ConvertMessage(value, sub_message)
else:
setattr(message, field.name, _ConvertScalarFieldValue(value, field))
except ParseError as e:
if field and field.containing_oneof is None:
raise ParseError('Failed to parse {0} field: {1}'.format(name, e))
else:
raise ParseError(str(e))
except ValueError as e:
raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
except TypeError as e:
raise ParseError('Failed to parse {0} field: {1}.'.format(name, e))
def _ConvertAnyMessage(self, value, message):
"""Convert a JSON representation into Any message."""
if isinstance(value, dict) and not value:
return
try:
type_url = value['@type']
except KeyError:
raise ParseError('@type is missing when parsing any message.')
sub_message = _CreateMessageFromTypeUrl(type_url)
message_descriptor = sub_message.DESCRIPTOR
full_name = message_descriptor.full_name
if _IsWrapperMessage(message_descriptor):
self._ConvertWrapperMessage(value['value'], sub_message)
elif full_name in _WKTJSONMETHODS:
methodcaller(
_WKTJSONMETHODS[full_name][1], value['value'], sub_message)(self)
else:
del value['@type']
self._ConvertFieldValuePair(value, sub_message)
# Sets Any message
message.value = sub_message.SerializeToString()
message.type_url = type_url
def _ConvertGenericMessage(self, value, message):
"""Convert a JSON representation into message with FromJsonString."""
# Durantion, Timestamp, FieldMask have FromJsonString method to do the
# convert. Users can also call the method directly.
message.FromJsonString(value)
def _ConvertValueMessage(self, value, message):
"""Convert a JSON representation into Value message."""
if isinstance(value, dict):
self._ConvertStructMessage(value, message.struct_value)
elif isinstance(value, list):
self. _ConvertListValueMessage(value, message.list_value)
elif value is None:
message.null_value = 0
elif isinstance(value, bool):
message.bool_value = value
elif isinstance(value, six.string_types):
message.string_value = value
elif isinstance(value, _INT_OR_FLOAT):
message.number_value = value
else:
raise ParseError('Unexpected type for Value message.')
def _ConvertListValueMessage(self, value, message):
"""Convert a JSON representation into ListValue message."""
if not isinstance(value, list):
raise ParseError(
'ListValue must be in [] which is {0}.'.format(value))
message.ClearField('values')
for item in value:
self._ConvertValueMessage(item, message.values.add())
def _ConvertStructMessage(self, value, message):
"""Convert a JSON representation into Struct message."""
if not isinstance(value, dict):
raise ParseError(
'Struct must be in a dict which is {0}.'.format(value))
for key in value:
self._ConvertValueMessage(value[key], message.fields[key])
return
def _ConvertWrapperMessage(self, value, message):
"""Convert a JSON representation into Wrapper message."""
field = message.DESCRIPTOR.fields_by_name['value']
setattr(message, 'value', _ConvertScalarFieldValue(value, field))
def _ConvertMapFieldValue(self, value, message, field):
"""Convert map field value for a message map field.
Args:
value: A JSON object to convert the map field value.
message: A protocol message to record the converted data.
field: The descriptor of the map field to be converted.
Raises:
ParseError: In case of convert problems.
"""
if not isinstance(value, dict):
raise ParseError(
'Map field {0} must be in a dict which is {1}.'.format(
field.name, value))
key_field = field.message_type.fields_by_name['key']
value_field = field.message_type.fields_by_name['value']
for key in value:
key_value = _ConvertScalarFieldValue(key, key_field, True)
if value_field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
self.ConvertMessage(value[key], getattr(
message, field.name)[key_value])
else:
getattr(message, field.name)[key_value] = _ConvertScalarFieldValue(
value[key], value_field)
def _ConvertScalarFieldValue(value, field, require_str=False):
"""Convert a single scalar field value.
Args:
value: A scalar value to convert the scalar field value.
field: The descriptor of the field to convert.
require_str: If True, the field value must be a str.
Returns:
The converted scalar field value
Raises:
ParseError: In case of convert problems.
"""
if field.cpp_type in _INT_TYPES:
return _ConvertInteger(value)
elif field.cpp_type in _FLOAT_TYPES:
return _ConvertFloat(value)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
return _ConvertBool(value, require_str)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
return base64.b64decode(value)
else:
# Checking for unpaired surrogates appears to be unreliable,
# depending on the specific Python version, so we check manually.
if _UNPAIRED_SURROGATE_PATTERN.search(value):
raise ParseError('Unpaired surrogate')
return value
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
# Convert an enum value.
enum_value = field.enum_type.values_by_name.get(value, None)
if enum_value is None:
raise ParseError(
'Enum value must be a string literal with double quotes. '
'Type "{0}" has no value named {1}.'.format(
field.enum_type.full_name, value))
return enum_value.number
def _ConvertInteger(value):
"""Convert an integer.
Args:
value: A scalar value to convert.
Returns:
The integer value.
Raises:
ParseError: If an integer couldn't be consumed.
"""
if isinstance(value, float):
raise ParseError('Couldn\'t parse integer: {0}.'.format(value))
if isinstance(value, six.text_type) and value.find(' ') != -1:
raise ParseError('Couldn\'t parse integer: "{0}".'.format(value))
return int(value)
def _ConvertFloat(value):
"""Convert an floating point number."""
if value == 'nan':
raise ParseError('Couldn\'t parse float "nan", use "NaN" instead.')
try:
# Assume Python compatible syntax.
return float(value)
except ValueError:
# Check alternative spellings.
if value == _NEG_INFINITY:
return float('-inf')
elif value == _INFINITY:
return float('inf')
elif value == _NAN:
return float('nan')
else:
raise ParseError('Couldn\'t parse float: {0}.'.format(value))
def _ConvertBool(value, require_str):
"""Convert a boolean value.
Args:
value: A scalar value to convert.
require_str: If True, value must be a str.
Returns:
The bool parsed.
Raises:
ParseError: If a boolean value couldn't be consumed.
"""
if require_str:
if value == 'true':
return True
elif value == 'false':
return False
else:
raise ParseError('Expected "true" or "false", not {0}.'.format(value))
if not isinstance(value, bool):
raise ParseError('Expected true or false without quotes.')
return value
_WKTJSONMETHODS = {
'google.protobuf.Any': ['_AnyMessageToJsonObject',
'_ConvertAnyMessage'],
'google.protobuf.Duration': ['_GenericMessageToJsonObject',
'_ConvertGenericMessage'],
'google.protobuf.FieldMask': ['_GenericMessageToJsonObject',
'_ConvertGenericMessage'],
'google.protobuf.ListValue': ['_ListValueMessageToJsonObject',
'_ConvertListValueMessage'],
'google.protobuf.Struct': ['_StructMessageToJsonObject',
'_ConvertStructMessage'],
'google.protobuf.Timestamp': ['_GenericMessageToJsonObject',
'_ConvertGenericMessage'],
'google.protobuf.Value': ['_ValueMessageToJsonObject',
'_ConvertValueMessage']
}
|
shhui/nova
|
refs/heads/master
|
nova/tests/api/openstack/compute/plugins/v3/test_extended_status.py
|
16
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob
from nova import compute
from nova import db
from nova import exception
from nova.objects import instance as instance_obj
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
UUID1 = '00000000-0000-0000-0000-000000000001'
UUID2 = '00000000-0000-0000-0000-000000000002'
UUID3 = '00000000-0000-0000-0000-000000000003'
def fake_compute_get(*args, **kwargs):
inst = fakes.stub_instance(1, uuid=UUID3, task_state="kayaking",
vm_state="slightly crunchy", power_state=1, locked_by='owner')
return fake_instance.fake_instance_obj(args[1], **inst)
def fake_compute_get_all(*args, **kwargs):
db_list = [
fakes.stub_instance(1, uuid=UUID1, task_state="task-1",
vm_state="vm-1", power_state=1, locked_by=None),
fakes.stub_instance(2, uuid=UUID2, task_state="task-2",
vm_state="vm-2", power_state=2, locked_by='admin'),
]
fields = instance_obj.INSTANCE_DEFAULT_FIELDS
return instance_obj._make_instance_list(args[1],
instance_obj.InstanceList(),
db_list, fields)
class ExtendedStatusTest(test.TestCase):
content_type = 'application/json'
prefix = 'os-extended-status:'
def setUp(self):
super(ExtendedStatusTest, self).setUp()
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
self.stubs.Set(compute.api.API, 'get_all', fake_compute_get_all)
return_server = fakes.fake_instance_get()
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
def _make_request(self, url):
req = webob.Request.blank(url)
req.headers['Accept'] = self.content_type
res = req.get_response(fakes.wsgi_app_v3(
init_only=('servers',
'os-extended-status')))
return res
def _get_server(self, body):
return jsonutils.loads(body).get('server')
def _get_servers(self, body):
return jsonutils.loads(body).get('servers')
def assertServerStates(self, server, vm_state, power_state, task_state,
locked_by):
self.assertEqual(server.get('%svm_state' % self.prefix), vm_state)
self.assertEqual(int(server.get('%spower_state' % self.prefix)),
power_state)
self.assertEqual(server.get('%stask_state' % self.prefix), task_state)
self.assertEqual(str(server.get('%slocked_by' % self.prefix)),
locked_by)
def test_show(self):
url = '/v3/servers/%s' % UUID3
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
self.assertServerStates(self._get_server(res.body),
vm_state='slightly crunchy',
power_state=1,
task_state='kayaking',
locked_by='owner')
def test_detail(self):
url = '/v3/servers/detail'
res = self._make_request(url)
self.assertEqual(res.status_int, 200)
for i, server in enumerate(self._get_servers(res.body)):
self.assertServerStates(server,
vm_state='vm-%s' % (i + 1),
power_state=(i + 1),
task_state='task-%s' % (i + 1),
locked_by=['None', 'admin'][i])
def test_no_instance_passthrough_404(self):
def fake_compute_get(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
self.stubs.Set(compute.api.API, 'get', fake_compute_get)
url = '/v3/servers/70f6db34-de8d-4fbd-aafb-4065bdfa6115'
res = self._make_request(url)
self.assertEqual(res.status_int, 404)
|
jegger/kivy
|
refs/heads/master
|
examples/shader/plasma.py
|
12
|
'''
Plasma Shader
=============
This shader example have been taken from
http://www.iquilezles.org/apps/shadertoy/ with some adaptation.
This might become a Kivy widget when experimentation will be done.
'''
from kivy.clock import Clock
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.core.window import Window
from kivy.graphics import RenderContext
from kivy.properties import StringProperty
# This header must be not changed, it contains the minimum information
# from Kivy.
header = '''
#ifdef GL_ES
precision highp float;
#endif
/* Outputs from the vertex shader */
varying vec4 frag_color;
varying vec2 tex_coord0;
/* uniform texture samplers */
uniform sampler2D texture0;
'''
# Plasma shader
plasma_shader = header + '''
uniform vec2 resolution;
uniform float time;
void main(void)
{
float x = gl_FragCoord.x;
float y = gl_FragCoord.y;
float mov0 = x+y+cos(sin(time)*2.)*100.+sin(x/100.)*1000.;
float mov1 = y / resolution.y / 0.2 + time;
float mov2 = x / resolution.x / 0.2;
float c1 = abs(sin(mov1+time)/2.+mov2/2.-mov1-mov2+time);
float c2 = abs(sin(c1+sin(mov0/1000.+time)
+sin(y/40.+time)+sin((x+y)/100.)*3.));
float c3 = abs(sin(c2+cos(mov1+mov2+c2)+cos(mov2)+sin(x/1000.)));
gl_FragColor = vec4( c1,c2,c3,1.0);
}
'''
class ShaderWidget(FloatLayout):
# property to set the source code for fragment shader
fs = StringProperty(None)
def __init__(self, **kwargs):
# Instead of using Canvas, we will use a RenderContext,
# and change the default shader used.
self.canvas = RenderContext()
# call the constructor of parent
# if they are any graphics object, they will be added on our new canvas
super(ShaderWidget, self).__init__(**kwargs)
# We'll update our glsl variables in a clock
Clock.schedule_interval(self.update_glsl, 1 / 60.)
def on_fs(self, instance, value):
# set the fragment shader to our source code
shader = self.canvas.shader
old_value = shader.fs
shader.fs = value
if not shader.success:
shader.fs = old_value
raise Exception('failed')
def update_glsl(self, *largs):
self.canvas['time'] = Clock.get_boottime()
self.canvas['resolution'] = list(map(float, self.size))
# This is needed for the default vertex shader.
self.canvas['projection_mat'] = Window.render_context['projection_mat']
class PlasmaApp(App):
def build(self):
return ShaderWidget(fs=plasma_shader)
if __name__ == '__main__':
PlasmaApp().run()
|
benchisell/photostream-bc
|
refs/heads/master
|
flask/lib/python2.7/site-packages/flup/__init__.py
|
9480
|
#
|
archyufa/CloudFerry
|
refs/heads/master
|
cloudferrylib/utils/rbd_util.py
|
3
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from cloudferrylib.utils import cmd_cfg
from cloudferrylib.utils import ssh_util
class RbdUtil(ssh_util.SshUtil):
rbd_rm_cmd = cmd_cfg.rbd_cmd("rm -p %s %s")
rbd_import_cmd = cmd_cfg.rbd_cmd("import --image-format=%s %s %s")
rbd_import_diff_cmd = cmd_cfg.rbd_cmd("import-diff %s %s")
rbd_export_cmd = cmd_cfg.rbd_cmd("export %s %s")
rbd_export_diff_cmd = cmd_cfg.rbd_cmd("export-diff %s %s")
rbd_export_diff_snap_cmd = cmd_cfg.rbd_cmd("export-diff --snap %s %s %s")
rbd_export_diff_from_snap_cmd = \
cmd_cfg.rbd_cmd("export-diff --from-snap %s --snap %s %s %s")
rbd_export_diff_from_cmd = \
cmd_cfg.rbd_cmd("export-diff --from-snap %s %s %s")
rbd_info_cmd = cmd_cfg.rbd_cmd("-p %s info %s --format %s")
rbd_snap_rm = cmd_cfg.rbd_cmd("snap rm %s@%s")
# exmaple pool=compute filename = %s_disk.local % instance_id
def rm(self, pool, filename, int_host=None):
cmd = self.rbd_rm_cmd(pool, filename)
return self.execute(cmd, int_host)
def snap_rm(self, volume_path, snapshot_name, int_host=None):
cmd = self.rbd_snap_rm(volume_path, snapshot_name)
return self.execute(cmd, int_host)
# example image-format=2 output="-" filename=%s_disk.local
def rbd_import(self, image_format, output, filename, int_host=None):
cmd = self.rbd_import_cmd(image_format, output, filename)
return self.execute(cmd, int_host)
# example output="-" ceph_path=%s_disk.local
def rbd_import_diff(self, output, ceph_path, int_host=None):
cmd = self.rbd_import_cmd(output, ceph_path)
return self.execute(cmd, int_host)
# example filename=volume-id1 output=-
def rbd_export(self, filename, output, int_host=None):
cmd = self.rbd_export_cmd(filename, output)
return self.execute(cmd, int_host)
# example ceph_path=volume-id1 output=-
def rbd_export_diff(self, ceph_path, output, int_host=None):
cmd = self.rbd_export_cmd(ceph_path, output)
return self.execute(cmd, int_host)
# pool=images filename=image_id format=json
def rbd_get_info(self, pool, filename, format_output='json',
int_host=None):
cmd = self.rbd_info_cmd(pool, filename, format_output)
return self.execute(cmd, int_host)
|
mrh1997/cymu
|
refs/heads/master
|
sample/main.py
|
1
|
"""
This module demonstrates the usage of cymu.
"""
import os
import sys
from cymu.compiler import compile_file
module1Path = os.path.join(os.path.dirname(sys.argv[0]), 'module1.c')
module1_cls = compile_file(module1Path)
module1 = module1_cls()
module1.demo_func()
print "a =",module1.a
print "b =",module1.b
print "c =",module1.c
|
allmende/synnefo
|
refs/heads/develop
|
snf-astakos-app/astakos/im/migrations/0061_auto__chg_field_astakosuser_uuid.py
|
10
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'AstakosUser.uuid'
db.alter_column('im_astakosuser', 'uuid', self.gf('django.db.models.fields.CharField')(default=None, unique=True, max_length=255))
def backwards(self, orm):
# Changing field 'AstakosUser.uuid'
db.alter_column('im_astakosuser', 'uuid', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255, null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'im.additionalmail': {
'Meta': {'object_name': 'AdditionalMail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"})
},
'im.approvalterms': {
'Meta': {'object_name': 'ApprovalTerms'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'im.astakosuser': {
'Meta': {'object_name': 'AstakosUser', '_ormbases': ['auth.User']},
'accepted_email': ('django.db.models.fields.EmailField', [], {'default': 'None', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'accepted_policy': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'activation_sent': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'auth_token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'auth_token_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'auth_token_expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'date_signed_terms': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deactivated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'deactivated_reason': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}),
'disturbed_quota': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_credits': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_signed_terms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'invitations': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_rejected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'level': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'moderated_data': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['im.Resource']", 'null': 'True', 'through': "orm['im.AstakosUserQuota']", 'symmetrical': 'False'}),
'rejected_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'verification_code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True'}),
'verified_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'im.astakosuserauthprovider': {
'Meta': {'ordering': "('module', 'created')", 'unique_together': "(('identifier', 'module', 'user'),)", 'object_name': 'AstakosUserAuthProvider'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'affiliation': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'auth_backend': ('django.db.models.fields.CharField', [], {'default': "'astakos'", 'max_length': '255'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'info_data': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'module': ('django.db.models.fields.CharField', [], {'default': "'local'", 'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_providers'", 'to': "orm['im.AstakosUser']"})
},
'im.astakosuserquota': {
'Meta': {'unique_together': "(('resource', 'user'),)", 'object_name': 'AstakosUserQuota'},
'capacity': ('django.db.models.fields.BigIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Resource']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"})
},
'im.authproviderpolicyprofile': {
'Meta': {'ordering': "['priority']", 'object_name': 'AuthProviderPolicyProfile'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'authpolicy_profiles'", 'symmetrical': 'False', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_exclusive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'policy_add': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_automoderate': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_create': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_limit': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True'}),
'policy_login': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_remove': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_required': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'policy_switch': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'authpolicy_profiles'", 'symmetrical': 'False', 'to': "orm['im.AstakosUser']"})
},
'im.chain': {
'Meta': {'object_name': 'Chain'},
'chain': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'im.component': {
'Meta': {'object_name': 'Component'},
'auth_token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'auth_token_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'auth_token_expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'base_url': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'})
},
'im.emailchange': {
'Meta': {'object_name': 'EmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_email_address': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'requested_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'emailchanges'", 'unique': 'True', 'to': "orm['im.AstakosUser']"})
},
'im.endpoint': {
'Meta': {'object_name': 'Endpoint'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'service': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'endpoints'", 'to': "orm['im.Service']"})
},
'im.endpointdata': {
'Meta': {'unique_together': "(('endpoint', 'key'),)", 'object_name': 'EndpointData'},
'endpoint': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['im.Endpoint']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
},
'im.invitation': {
'Meta': {'object_name': 'Invitation'},
'code': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}),
'consumed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invitations_sent'", 'null': 'True', 'to': "orm['im.AstakosUser']"}),
'is_consumed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'realname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'im.pendingthirdpartyuser': {
'Meta': {'unique_together': "(('provider', 'third_party_identifier'),)", 'object_name': 'PendingThirdPartyUser'},
'affiliation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'third_party_identifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'im.project': {
'Meta': {'object_name': 'Project'},
'application': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'project'", 'unique': 'True', 'to': "orm['im.ProjectApplication']"}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True', 'db_column': "'id'"}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['im.AstakosUser']", 'through': "orm['im.ProjectMembership']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'})
},
'im.projectapplication': {
'Meta': {'unique_together': "(('chain', 'id'),)", 'object_name': 'ProjectApplication'},
'applicant': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects_applied'", 'to': "orm['im.AstakosUser']"}),
'chain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'chained_apps'", 'db_column': "'chain'", 'to': "orm['im.Project']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'limit_on_members_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'member_join_policy': ('django.db.models.fields.IntegerField', [], {}),
'member_leave_policy': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects_owned'", 'to': "orm['im.AstakosUser']"}),
'resource_grants': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['im.Resource']", 'null': 'True', 'through': "orm['im.ProjectResourceGrant']", 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'response_actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'responded_apps'", 'null': 'True', 'to': "orm['im.AstakosUser']"}),
'response_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'waive_actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'waived_apps'", 'null': 'True', 'to': "orm['im.AstakosUser']"}),
'waive_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'waive_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'im.projectlock': {
'Meta': {'object_name': 'ProjectLock'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'im.projectlog': {
'Meta': {'object_name': 'ProjectLog'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']", 'null': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'from_state': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'log'", 'to': "orm['im.Project']"}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'to_state': ('django.db.models.fields.IntegerField', [], {})
},
'im.projectmembership': {
'Meta': {'unique_together': "(('person', 'project'),)", 'object_name': 'ProjectMembership'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Project']"}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'})
},
'im.projectmembershiplog': {
'Meta': {'object_name': 'ProjectMembershipLog'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']", 'null': 'True'}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {}),
'from_state': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'membership': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'log'", 'to': "orm['im.ProjectMembership']"}),
'reason': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'to_state': ('django.db.models.fields.IntegerField', [], {})
},
'im.projectresourcegrant': {
'Meta': {'unique_together': "(('resource', 'project_application'),)", 'object_name': 'ProjectResourceGrant'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member_capacity': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'project_application': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.ProjectApplication']", 'null': 'True'}),
'project_capacity': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'resource': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Resource']"})
},
'im.resource': {
'Meta': {'object_name': 'Resource'},
'api_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'service_origin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'service_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'ui_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'uplimit': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'im.service': {
'Meta': {'object_name': 'Service'},
'component': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.Component']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'im.sessioncatalog': {
'Meta': {'object_name': 'SessionCatalog'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sessions'", 'null': 'True', 'to': "orm['im.AstakosUser']"})
},
'im.usersetting': {
'Meta': {'unique_together': "(('user', 'setting'),)", 'object_name': 'UserSetting'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'setting': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['im.AstakosUser']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['im']
|
centwave/jg82ksgvqkuan
|
refs/heads/master
|
django/contrib/gis/tests/geogapp/tests.py
|
222
|
"""
Tests for geography support in PostGIS 1.5+
"""
import os
from django.contrib.gis import gdal
from django.contrib.gis.measure import D
from django.test import TestCase
from models import City, County, Zipcode
class GeographyTest(TestCase):
def test01_fixture_load(self):
"Ensure geography features loaded properly."
self.assertEqual(8, City.objects.count())
def test02_distance_lookup(self):
"Testing GeoQuerySet distance lookup support on non-point geography fields."
z = Zipcode.objects.get(code='77002')
cities1 = list(City.objects
.filter(point__distance_lte=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
cities2 = list(City.objects
.filter(point__dwithin=(z.poly, D(mi=500)))
.order_by('name')
.values_list('name', flat=True))
for cities in [cities1, cities2]:
self.assertEqual(['Dallas', 'Houston', 'Oklahoma City'], cities)
def test03_distance_method(self):
"Testing GeoQuerySet.distance() support on non-point geography fields."
# `GeoQuerySet.distance` is not allowed geometry fields.
htown = City.objects.get(name='Houston')
qs = Zipcode.objects.distance(htown.point)
def test04_invalid_operators_functions(self):
"Ensuring exceptions are raised for operators & functions invalid on geography fields."
# Only a subset of the geometry functions & operator are available
# to PostGIS geography types. For more information, visit:
# http://postgis.refractions.net/documentation/manual-1.5/ch08.html#PostGIS_GeographyFunctions
z = Zipcode.objects.get(code='77002')
# ST_Within not available.
self.assertRaises(ValueError, City.objects.filter(point__within=z.poly).count)
# `@` operator not available.
self.assertRaises(ValueError, City.objects.filter(point__contained=z.poly).count)
# Regression test for #14060, `~=` was never really implemented for PostGIS.
htown = City.objects.get(name='Houston')
self.assertRaises(ValueError, City.objects.get, point__exact=htown.point)
def test05_geography_layermapping(self):
"Testing LayerMapping support on models with geography fields."
# There is a similar test in `layermap` that uses the same data set,
# but the County model here is a bit different.
if not gdal.HAS_GDAL: return
from django.contrib.gis.utils import LayerMapping
# Getting the shapefile and mapping dictionary.
shp_path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'data'))
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
co_mapping = {'name' : 'Name',
'state' : 'State',
'mpoly' : 'MULTIPOLYGON',
}
# Reference county names, number of polygons, and state names.
names = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
num_polys = [1, 2, 1, 19, 1] # Number of polygons for each.
st_names = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269, unique='name')
lm.save(silent=True, strict=True)
for c, name, num_poly, state in zip(County.objects.order_by('name'), names, num_polys, st_names):
self.assertEqual(4326, c.mpoly.srid)
self.assertEqual(num_poly, len(c.mpoly))
self.assertEqual(name, c.name)
self.assertEqual(state, c.state)
def test06_geography_area(self):
"Testing that Area calculations work on geography columns."
from django.contrib.gis.measure import A
# SELECT ST_Area(poly) FROM geogapp_zipcode WHERE code='77002';
ref_area = 5439084.70637573
tol = 5
z = Zipcode.objects.area().get(code='77002')
self.assertAlmostEqual(z.area.sq_m, ref_area, tol)
|
mcldev/DjangoCMS_Charts
|
refs/heads/master
|
djangocms_charts/models.py
|
1
|
from cms.models import CMSPlugin
from .chartjs.models import *
|
zdary/intellij-community
|
refs/heads/master
|
python/testData/completion/notImportedQualifiedName/NoImportForSubpackages/mypackage/__init__.py
|
12133432
| |
huongttlan/statsmodels
|
refs/heads/master
|
statsmodels/duration/tests/__init__.py
|
12133432
| |
nitzmahone/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/vmware/vmware_deploy_ovf.py
|
18
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Matt Martz <matt@sivel.net>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
author: 'Matt Martz (@sivel)'
short_description: 'Deploys a VMware virtual machine from an OVF or OVA file'
description:
- 'This module can be used to deploy a VMware VM from an OVF or OVA file'
module: vmware_deploy_ovf
notes: []
options:
allow_duplicates:
default: "yes"
description:
- Whether or not to allow duplicate VM names. ESXi allows duplicates, vCenter may not.
type: bool
datacenter:
default: ha-datacenter
description:
- Datacenter to deploy to.
cluster:
description:
- Cluster to deploy to.
datastore:
default: datastore1
description:
- Datastore to deploy to.
deployment_option:
description:
- The key of the chosen deployment option.
disk_provisioning:
choices:
- flat
- eagerZeroedThick
- monolithicSparse
- twoGbMaxExtentSparse
- twoGbMaxExtentFlat
- thin
- sparse
- thick
- seSparse
- monolithicFlat
default: thin
description:
- Disk provisioning type.
fail_on_spec_warnings:
description:
- Cause the module to treat OVF Import Spec warnings as errors.
default: "no"
type: bool
folder:
description:
- Absolute path of folder to place the virtual machine.
- If not specified, defaults to the value of C(datacenter.vmFolder).
name:
description:
- Name of the VM to work with.
- Virtual machine names in vCenter are not necessarily unique, which may be problematic.
networks:
default:
VM Network: VM Network
description:
- 'C(key: value) mapping of OVF network name, to the vCenter network name.'
ovf:
description:
- 'Path to OVF or OVA file to deploy.'
aliases:
- ova
power_on:
default: true
description:
- 'Whether or not to power on the virtual machine after creation.'
type: bool
properties:
description:
- The assignment of values to the properties found in the OVF as key value pairs.
resource_pool:
default: Resources
description:
- 'Resource Pool to deploy to.'
wait:
default: true
description:
- 'Wait for the host to power on.'
type: bool
wait_for_ip_address:
default: false
description:
- Wait until vCenter detects an IP address for the VM.
- This requires vmware-tools (vmtoolsd) to properly work after creation.
type: bool
requirements:
- pyvmomi
version_added: "2.7"
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- vmware_deploy_ovf:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
ovf: /path/to/ubuntu-16.04-amd64.ovf
wait_for_ip_address: true
delegate_to: localhost
'''
RETURN = r'''
instance:
description: metadata about the new virtual machine
returned: always
type: dict
sample: None
'''
import io
import os
import sys
import tarfile
import time
import traceback
from threading import Thread
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
from ansible.module_utils.urls import generic_urlparse, open_url, urlparse, urlunparse
from ansible.module_utils.vmware import (HAS_PYVMOMI, connect_to_api, find_datacenter_by_name, find_datastore_by_name,
find_network_by_name, find_resource_pool_by_name, find_vm_by_name, find_cluster_by_name,
gather_vm_facts, vmware_argument_spec, wait_for_task, wait_for_vm_ip)
try:
from ansible.module_utils.vmware import vim
from pyVmomi import vmodl
except ImportError:
pass
def path_exists(value):
if not isinstance(value, string_types):
value = str(value)
value = os.path.expanduser(os.path.expandvars(value))
if not os.path.exists(value):
raise ValueError('%s is not a valid path' % value)
return value
class ProgressReader(io.FileIO):
def __init__(self, name, mode='r', closefd=True):
self.bytes_read = 0
io.FileIO.__init__(self, name, mode=mode, closefd=closefd)
def read(self, size=10240):
chunk = io.FileIO.read(self, size)
self.bytes_read += len(chunk)
return chunk
class TarFileProgressReader(tarfile.ExFileObject):
def __init__(self, *args):
self.bytes_read = 0
tarfile.ExFileObject.__init__(self, *args)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except:
pass
def read(self, size=10240):
chunk = tarfile.ExFileObject.read(self, size)
self.bytes_read += len(chunk)
return chunk
class VMDKUploader(Thread):
def __init__(self, vmdk, url, validate_certs=True, tarinfo=None, create=False):
Thread.__init__(self)
self.vmdk = vmdk
if tarinfo:
self.size = tarinfo.size
else:
self.size = os.stat(vmdk).st_size
self.url = url
self.validate_certs = validate_certs
self.tarinfo = tarinfo
self.f = None
self.e = None
self._create = create
@property
def bytes_read(self):
try:
return self.f.bytes_read
except AttributeError:
return 0
def _request_opts(self):
'''
Requests for vmdk files differ from other file types. Build the request options here to handle that
'''
headers = {
'Content-Length': self.size,
'Content-Type': 'application/octet-stream',
}
if self._create:
# Non-VMDK
method = 'PUT'
headers['Overwrite'] = 't'
else:
# VMDK
method = 'POST'
headers['Content-Type'] = 'application/x-vnd.vmware-streamVmdk'
return {
'method': method,
'headers': headers,
}
def _open_url(self):
open_url(self.url, data=self.f, validate_certs=self.validate_certs, **self._request_opts())
def run(self):
if self.tarinfo:
try:
with TarFileProgressReader(self.vmdk, self.tarinfo) as self.f:
self._open_url()
except Exception:
self.e = sys.exc_info()
else:
try:
with ProgressReader(self.vmdk, 'rb') as self.f:
self._open_url()
except Exception:
self.e = sys.exc_info()
class VMwareDeployOvf:
def __init__(self, module):
self.si = connect_to_api(module)
self.module = module
self.params = module.params
self.datastore = None
self.datacenter = None
self.resource_pool = None
self.network_mappings = []
self.ovf_descriptor = None
self.tar = None
self.lease = None
self.import_spec = None
self.entity = None
def get_objects(self):
self.datastore = find_datastore_by_name(self.si, self.params['datastore'])
if not self.datastore:
self.module.fail_json(msg='%(datastore)s could not be located' % self.params)
self.datacenter = find_datacenter_by_name(self.si, self.params['datacenter'])
if not self.datacenter:
self.module.fail_json(msg='%(datacenter)s could not be located' % self.params)
if self.params['cluster']:
cluster = find_cluster_by_name(self.si, self.params['cluster'])
if cluster is None:
self.module.fail_json(msg="Unable to find cluster '%(cluster)s'" % self.params)
else:
self.resource_pool = cluster.resourcePool
else:
self.resource_pool = find_resource_pool_by_name(self.si, self.params['resource_pool'])
if not self.resource_pool:
self.module.fail_json(msg='%(resource_pool)s could not be located' % self.params)
for key, value in self.params['networks'].items():
network = find_network_by_name(self.si, value)
if not network:
self.module.fail_json(msg='%(network)s could not be located' % self.params)
network_mapping = vim.OvfManager.NetworkMapping()
network_mapping.name = key
network_mapping.network = network
self.network_mappings.append(network_mapping)
return self.datastore, self.datacenter, self.resource_pool, self.network_mappings
def get_ovf_descriptor(self):
if tarfile.is_tarfile(self.params['ovf']):
self.tar = tarfile.open(self.params['ovf'])
ovf = None
for candidate in self.tar.getmembers():
dummy, ext = os.path.splitext(candidate.name)
if ext.lower() == '.ovf':
ovf = candidate
break
if not ovf:
self.module.fail_json(msg='Could not locate OVF file in %(ovf)s' % self.params)
self.ovf_descriptor = to_native(self.tar.extractfile(ovf).read())
else:
with open(self.params['ovf']) as f:
self.ovf_descriptor = f.read()
return self.ovf_descriptor
def get_lease(self):
datastore, datacenter, resource_pool, network_mappings = self.get_objects()
params = {
'diskProvisioning': self.params['disk_provisioning'],
}
if self.params['name']:
params['entityName'] = self.params['name']
if network_mappings:
params['networkMapping'] = network_mappings
if self.params['deployment_option']:
params['deploymentOption'] = self.params['deployment_option']
if self.params['properties']:
params['propertyMapping'] = []
for key, value in self.params['properties'].items():
property_mapping = vim.KeyValue()
property_mapping.key = key
property_mapping.value = str(value) if isinstance(value, bool) else value
params['propertyMapping'].append(property_mapping)
if self.params['folder']:
folder = self.si.searchIndex.FindByInventoryPath(self.params['folder'])
else:
folder = datacenter.vmFolder
spec_params = vim.OvfManager.CreateImportSpecParams(**params)
ovf_descriptor = self.get_ovf_descriptor()
self.import_spec = self.si.ovfManager.CreateImportSpec(
ovf_descriptor,
resource_pool,
datastore,
spec_params
)
errors = [to_native(e.msg) for e in getattr(self.import_spec, 'error', [])]
if self.params['fail_on_spec_warnings']:
errors.extend(
(to_native(w.msg) for w in getattr(self.import_spec, 'warning', []))
)
if errors:
self.module.fail_json(
msg='Failure validating OVF import spec: %s' % '. '.join(errors)
)
for warning in getattr(self.import_spec, 'warning', []):
self.module.warn('Problem validating OVF import spec: %s' % to_native(warning.msg))
if not self.params['allow_duplicates']:
name = self.import_spec.importSpec.configSpec.name
match = find_vm_by_name(self.si, name, folder=folder)
if match:
self.module.exit_json(instance=gather_vm_facts(self.si, match), changed=False)
if self.module.check_mode:
self.module.exit_json(changed=True, instance={'hw_name': name})
try:
self.lease = resource_pool.ImportVApp(
self.import_spec.importSpec,
folder
)
except vmodl.fault.SystemError as e:
self.module.fail_json(
msg='Failed to start import: %s' % to_native(e.msg)
)
while self.lease.state != vim.HttpNfcLease.State.ready:
time.sleep(0.1)
self.entity = self.lease.info.entity
return self.lease, self.import_spec
def _normalize_url(self, url):
'''
The hostname in URLs from vmware may be ``*`` update it accordingly
'''
url_parts = generic_urlparse(urlparse(url))
if url_parts.hostname == '*':
if url_parts.port:
url_parts.netloc = '%s:%d' % (self.params['hostname'], url_parts.port)
else:
url_parts.netloc = self.params['hostname']
return urlunparse(url_parts.as_list())
def upload(self):
if self.params['ovf'] is None:
self.module.fail_json(msg="OVF path is required for upload operation.")
ovf_dir = os.path.dirname(self.params['ovf'])
lease, import_spec = self.get_lease()
uploaders = []
for file_item in import_spec.fileItem:
device_upload_url = None
for device_url in lease.info.deviceUrl:
if file_item.deviceId == device_url.importKey:
device_upload_url = self._normalize_url(device_url.url)
break
if not device_upload_url:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='Failed to find deviceUrl for file %s' % file_item.path)
)
self.module.fail_json(
msg='Failed to find deviceUrl for file %s' % file_item.path
)
vmdk_tarinfo = None
if self.tar:
vmdk = self.tar
try:
vmdk_tarinfo = self.tar.getmember(file_item.path)
except KeyError:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='Failed to find VMDK file %s in OVA' % file_item.path)
)
self.module.fail_json(
msg='Failed to find VMDK file %s in OVA' % file_item.path
)
else:
vmdk = os.path.join(ovf_dir, file_item.path)
try:
path_exists(vmdk)
except ValueError:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='Failed to find VMDK file at %s' % vmdk)
)
self.module.fail_json(
msg='Failed to find VMDK file at %s' % vmdk
)
uploaders.append(
VMDKUploader(
vmdk,
device_upload_url,
self.params['validate_certs'],
tarinfo=vmdk_tarinfo,
create=file_item.create
)
)
total_size = sum(u.size for u in uploaders)
total_bytes_read = [0] * len(uploaders)
for i, uploader in enumerate(uploaders):
uploader.start()
while uploader.is_alive():
time.sleep(0.1)
total_bytes_read[i] = uploader.bytes_read
lease.HttpNfcLeaseProgress(int(100.0 * sum(total_bytes_read) / total_size))
if uploader.e:
lease.HttpNfcLeaseAbort(
vmodl.fault.SystemError(reason='%s' % to_native(uploader.e[1]))
)
self.module.fail_json(
msg='%s' % to_native(uploader.e[1]),
exception=''.join(traceback.format_tb(uploader.e[2]))
)
def complete(self):
self.lease.HttpNfcLeaseComplete()
def power_on(self):
facts = {}
if self.params['power_on']:
task = self.entity.PowerOn()
if self.params['wait']:
wait_for_task(task)
if self.params['wait_for_ip_address']:
_facts = wait_for_vm_ip(self.si, self.entity)
if not _facts:
self.module.fail_json(msg='Waiting for IP address timed out')
facts.update(_facts)
if not facts:
facts.update(gather_vm_facts(self.si, self.entity))
return facts
def main():
argument_spec = vmware_argument_spec()
argument_spec.update({
'name': {},
'datastore': {
'default': 'datastore1',
},
'datacenter': {
'default': 'ha-datacenter',
},
'cluster': {
'default': None,
},
'deployment_option': {
'default': None,
},
'folder': {
'default': None,
},
'resource_pool': {
'default': 'Resources',
},
'networks': {
'default': {
'VM Network': 'VM Network',
},
'type': 'dict',
},
'ovf': {
'type': path_exists,
'aliases': ['ova'],
},
'disk_provisioning': {
'choices': [
'flat',
'eagerZeroedThick',
'monolithicSparse',
'twoGbMaxExtentSparse',
'twoGbMaxExtentFlat',
'thin',
'sparse',
'thick',
'seSparse',
'monolithicFlat'
],
'default': 'thin',
},
'power_on': {
'type': 'bool',
'default': True,
},
'properties': {
'type': 'dict',
},
'wait': {
'type': 'bool',
'default': True,
},
'wait_for_ip_address': {
'type': 'bool',
'default': False,
},
'allow_duplicates': {
'type': 'bool',
'default': True,
},
'fail_on_spec_warnings': {
'type': 'bool',
'default': False,
},
})
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi python library not found')
deploy_ovf = VMwareDeployOvf(module)
deploy_ovf.upload()
deploy_ovf.complete()
facts = deploy_ovf.power_on()
module.exit_json(instance=facts, changed=True)
if __name__ == '__main__':
main()
|
shanestafford/moose
|
refs/heads/devel
|
gui/vtk/MeshRenderWidget.py
|
34
|
import os, sys, getopt
try:
from PyQt4 import QtCore, QtGui
QtCore.Signal = QtCore.pyqtSignal
QtCore.Slot = QtCore.pyqtSlot
except ImportError:
try:
from PySide import QtCore, QtGui
QtCore.QString = str
except ImportError:
raise ImportError("Cannot load either PyQt or PySide")
import vtk
from vtk.util.colors import peacock, tomato, red, white, black
from vtk.qt4.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from PeacockActor import PeacockActor
from ClippedActor import ClippedActor
import RendererFactory
class MeshRenderWidget(QtGui.QWidget):
def __init__(self, tree_widget):
QtGui.QWidget.__init__(self)
self.tree_widget = tree_widget
self.tree_widget.mesh_item_changed.connect(self.meshItemChanged)
self.mesh_file_name = ''
self.mesh_renderer = None
self.current_block_actors = {}
self.current_sideset_actors = {}
self.current_nodeset_actors = {}
self.this_layout = QtGui.QVBoxLayout()
self.setLayout(self.this_layout)
self.vtkwidget = QVTKRenderWindowInteractor(self)
self.renderer = vtk.vtkRenderer()
self.renderer.SetBackground(0.2,0.2,0.2)
self.renderer.SetBackground2(1,1,1)
self.renderer.SetGradientBackground(1)
self.renderer.ResetCamera()
self.this_layout.addWidget(self.vtkwidget)
self.this_layout.setStretchFactor(self.vtkwidget, 10)
self.vtkwidget.setMinimumHeight(300)
self.vtkwidget.GetRenderWindow().AddRenderer(self.renderer)
self.interactor = self.vtkwidget.GetRenderWindow().GetInteractor()
self.interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
self.show()
self.interactor.Initialize()
self.controls_layout = QtGui.QHBoxLayout()
self.left_controls_layout = QtGui.QVBoxLayout()
self.block_view_group_box = QtGui.QGroupBox('Show Blocks')
self.block_view_group_box.setMaximumWidth(150)
# self.block_view_group_box.setMaximumHeight(200)
self.block_view_layout = QtGui.QVBoxLayout()
self.block_view_list = QtGui.QListView()
self.block_view_model = QtGui.QStandardItemModel()
self.block_view_model.itemChanged.connect(self._blockViewItemChanged)
self.block_view_list.setModel(self.block_view_model)
self.block_view_layout.addWidget(self.block_view_list)
self.block_view_group_box.setLayout(self.block_view_layout)
self.left_controls_layout.addWidget(self.block_view_group_box)
self.controls_layout.addLayout(self.left_controls_layout)
self.right_controls_layout = QtGui.QVBoxLayout()
self.controls_layout.addLayout(self.right_controls_layout)
self.view_mesh_checkbox = QtGui.QCheckBox('View Mesh')
self.view_mesh_checkbox.setToolTip('Toggle viewing of mesh elements')
self.view_mesh_checkbox.setCheckState(QtCore.Qt.Checked)
self.view_mesh_checkbox.stateChanged.connect(self.viewMeshCheckboxChanged)
self.right_controls_layout.addWidget(self.view_mesh_checkbox)
self.highlight_group_box = QtGui.QGroupBox('Highlight')
self.highlight_group_box.setMaximumHeight(70)
self.highlight_group_box.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
# self.highlight_group_box.setMaximumWidth(200)
self.highlight_layout = QtGui.QHBoxLayout()
self.highlight_group_box.setLayout(self.highlight_layout)
self.right_controls_layout.addWidget(self.highlight_group_box)
self.highlight_block_label = QtGui.QLabel('Block:')
self.highlight_block_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.highlight_block_combo = QtGui.QComboBox()
# self.highlight_block_combo.setMaximumWidth(50)
self.highlight_block_combo.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.highlight_block_combo.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
self.highlight_block_combo.setToolTip('Highlight a block in the mesh')
self.highlight_block_combo.currentIndexChanged[str].connect(self.showBlockSelected)
self.highlight_layout.addWidget(self.highlight_block_label)
self.highlight_layout.addWidget(self.highlight_block_combo)
self.highlight_sideset_label = QtGui.QLabel('Sideset:')
self.highlight_sideset_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.highlight_sideset_combo = QtGui.QComboBox()
# self.highlight_sideset_combo.setMaximumWidth(50)
self.highlight_sideset_combo.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.highlight_sideset_combo.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
self.highlight_sideset_combo.setToolTip('Highlight a sideset in the mesh')
self.highlight_sideset_combo.currentIndexChanged[str].connect(self.showSidesetSelected)
self.highlight_layout.addWidget(self.highlight_sideset_label)
self.highlight_layout.addWidget(self.highlight_sideset_combo)
self.highlight_nodeset_label = QtGui.QLabel('Nodeset:')
self.highlight_nodeset_label.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.highlight_nodeset_combo = QtGui.QComboBox()
# self.highlight_nodeset_combo.setMaximumWidth(50)
self.highlight_nodeset_combo.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToMinimumContentsLength)
self.highlight_nodeset_combo.setSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
self.highlight_nodeset_combo.setToolTip('Highlight a nodeset in the mesh')
self.highlight_nodeset_combo.currentIndexChanged[str].connect(self.showNodesetSelected)
self.highlight_layout.addWidget(self.highlight_nodeset_label)
self.highlight_layout.addWidget(self.highlight_nodeset_combo)
self.highlight_clear = QtGui.QPushButton('Clear')
self.highlight_clear.setToolTip('Clear highlighting')
self.highlight_clear.setDisabled(True)
self.highlight_clear.clicked.connect(self.clearHighlight)
self.highlight_layout.addWidget(self.highlight_clear)
self.plane = vtk.vtkPlane()
self.plane.SetOrigin(0, 0, 0)
self.plane.SetNormal(1, 0, 0)
self.clip_groupbox = QtGui.QGroupBox("Clip")
self.clip_groupbox.setToolTip('Toggle clip mode to slice the mesh open along a plane')
self.clip_groupbox.setCheckable(True)
self.clip_groupbox.setChecked(False)
self.clip_groupbox.setMaximumHeight(70)
self.clip_groupbox.toggled[bool].connect(self._clippingToggled)
clip_layout = QtGui.QHBoxLayout()
self.clip_plane_combobox = QtGui.QComboBox()
self.clip_plane_combobox.setToolTip('Direction of the normal for the clip plane')
self.clip_plane_combobox.addItem('x')
self.clip_plane_combobox.addItem('y')
self.clip_plane_combobox.addItem('z')
self.clip_plane_combobox.currentIndexChanged[str].connect(self._clipNormalChanged)
clip_layout.addWidget(self.clip_plane_combobox)
self.clip_plane_slider = QtGui.QSlider(QtCore.Qt.Horizontal)
self.clip_plane_slider.setToolTip('Slide to change plane position')
self.clip_plane_slider.setRange(0, 100)
self.clip_plane_slider.setSliderPosition(50)
self.clip_plane_slider.sliderMoved[int].connect(self._clipSliderMoved)
clip_layout.addWidget(self.clip_plane_slider)
# vbox->addStretch(1);
self.clip_groupbox.setLayout(clip_layout)
self.right_controls_layout.addWidget(self.clip_groupbox)
self.this_layout.addLayout(self.controls_layout)
self.this_layout.setStretchFactor(self.controls_layout, 1)
self.bounds = {}
self.bounds['x'] = [0.0, 0.0]
self.bounds['y'] = [0.0, 0.0]
self.bounds['z'] = [0.0, 0.0]
# self.draw_edges_checkbox = QtGui.QCheckBox("View Mesh")
# self.left_controls_layout.addWidget(self.draw_edges_checkbox)
def clear(self):
self.highlight_block_combo.clear()
self.highlight_sideset_combo.clear()
self.highlight_nodeset_combo.clear()
for block_actor_name, block_actor in self.current_block_actors.items():
block_actor.hide()
for sideset_actor_name, sideset_actor in self.current_sideset_actors.items():
sideset_actor.hide()
for nodeset_actor_name, nodeset_actor in self.current_nodeset_actors.items():
nodeset_actor.hide()
self.current_block_actors = {}
self.current_sideset_actors = {}
self.current_nodeset_actors = {}
def meshItemChanged(self, item):
# Disconnect some actions while we fill stuff in
if self.mesh_renderer:
self.highlight_block_combo.currentIndexChanged[str].disconnect(self.showBlockSelected)
self.highlight_sideset_combo.currentIndexChanged[str].disconnect(self.showSidesetSelected)
self.highlight_nodeset_combo.currentIndexChanged[str].disconnect(self.showNodesetSelected)
self.clear()
self.mesh_renderer = RendererFactory.getRenderer(self, item.table_data)
if self.mesh_renderer:
self.show()
else:
self.hide()
return
self.current_block_actors = self.mesh_renderer.block_actors
self.current_sideset_actors = self.mesh_renderer.sideset_actors
self.current_nodeset_actors = self.mesh_renderer.nodeset_actors
self.block_view_model.clear()
for block in self.mesh_renderer.blocks:
block_display_name = str(block)
if block in self.mesh_renderer.block_id_to_name:
block_display_name += ' : ' + self.mesh_renderer.block_id_to_name[block]
item = QtGui.QStandardItem(str(block_display_name))
item.exodus_block = block
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(QtCore.Qt.Checked)
self.block_view_model.appendRow(item)
for block_actor_name, block_actor in self.current_block_actors.items():
block_actor.show()
block_actor.showEdges()
block_names = []
for block_actor_id, block_actor in self.current_block_actors.items():
name = block_actor_id.strip(' ')
if int(name) in self.mesh_renderer.block_id_to_name:
name += ' : ' + self.mesh_renderer.block_id_to_name[int(name)]
block_names.append(name)
self.highlight_block_combo.addItem('')
for block_actor_name in sorted(block_names, key=lambda name: int(name.split(' ')[0])):
self.highlight_block_combo.addItem(str(block_actor_name))
sideset_names = []
for sideset_actor_id, sideset_actor in self.current_sideset_actors.items():
sideset_actor.setColor(red)
name = sideset_actor_id.strip(' ')
if int(name) in self.mesh_renderer.sideset_id_to_name:
name += ' : ' + self.mesh_renderer.sideset_id_to_name[int(name)]
sideset_names.append(name)
self.highlight_sideset_combo.addItem('')
for sideset_actor_name in sorted(sideset_names, key=lambda name: int(name.split(' ')[0])):
self.highlight_sideset_combo.addItem(sideset_actor_name)
nodeset_names = []
for nodeset_actor_id, nodeset_actor in self.current_nodeset_actors.items():
nodeset_actor.setColor(red)
name = nodeset_actor_id.strip(' ')
if int(name) in self.mesh_renderer.nodeset_id_to_name:
name += ' : ' + self.mesh_renderer.nodeset_id_to_name[int(name)]
nodeset_names.append(name)
self.highlight_nodeset_combo.addItem('')
for nodeset_actor_name in sorted(nodeset_names, key=lambda name: int(name.split(' ')[0])):
self.highlight_nodeset_combo.addItem(nodeset_actor_name)
self.highlight_block_combo.currentIndexChanged[str].connect(self.showBlockSelected)
self.highlight_sideset_combo.currentIndexChanged[str].connect(self.showSidesetSelected)
self.highlight_nodeset_combo.currentIndexChanged[str].connect(self.showNodesetSelected)
self.setBounds()
# Avoid z-buffer fighting
vtk.vtkPolyDataMapper().SetResolveCoincidentTopologyToPolygonOffset()
self.renderer.ResetCamera()
self.vtkwidget.repaint()
def setBounds(self):
for actor_name, actor in self.current_block_actors.items():
current_bounds = actor.getBounds()
self.bounds['x'][0] = min(self.bounds['x'][0], current_bounds[0])
self.bounds['x'][1] = max(self.bounds['x'][1], current_bounds[1])
self.bounds['y'][0] = min(self.bounds['y'][0], current_bounds[2])
self.bounds['y'][1] = max(self.bounds['y'][1], current_bounds[3])
self.bounds['z'][0] = min(self.bounds['z'][0], current_bounds[4])
self.bounds['z'][1] = max(self.bounds['z'][1], current_bounds[5])
def swapActors(self, current, new):
for old_name, old_actor in current.items():
new[old_name].sync(old_actor)
old_actor.hide()
def _blockViewItemChanged(self, item):
if item.checkState() == QtCore.Qt.Checked:
self.current_block_actors[str(item.exodus_block)].show()
else:
self.current_block_actors[str(item.exodus_block)].hide()
self.vtkwidget.repaint()
def _clippingToggled(self, value):
if value:
self.swapActors(self.current_block_actors, self.mesh_renderer.clipped_block_actors)
self.current_block_actors = self.mesh_renderer.clipped_block_actors
self.swapActors(self.current_sideset_actors, self.mesh_renderer.clipped_sideset_actors)
self.current_sideset_actors = self.mesh_renderer.clipped_sideset_actors
self.swapActors(self.current_nodeset_actors, self.mesh_renderer.clipped_nodeset_actors)
self.current_nodeset_actors = self.mesh_renderer.clipped_nodeset_actors
self._clipNormalChanged(self.clip_plane_combobox.currentText())
else:
self.swapActors(self.current_block_actors, self.mesh_renderer.block_actors)
self.current_block_actors = self.mesh_renderer.block_actors
self.swapActors(self.current_sideset_actors, self.mesh_renderer.sideset_actors)
self.current_sideset_actors = self.mesh_renderer.sideset_actors
self.swapActors(self.current_nodeset_actors, self.mesh_renderer.nodeset_actors)
self.current_nodeset_actors = self.mesh_renderer.nodeset_actors
self.vtkwidget.repaint()
def _clipNormalChanged(self, value):
self.plane.SetOrigin(self.bounds['x'][0],
self.bounds['y'][0],
self.bounds['z'][0])
if value == 'x':
self.plane.SetNormal(1, 0, 0)
elif value == 'y':
self.plane.SetNormal(0, 1, 0)
else:
self.plane.SetNormal(0, 0, 1)
self.clip_plane_slider.setSliderPosition(50)
self._clipSliderMoved(50)
def _clipSliderMoved(self, value):
direction = str(self.clip_plane_combobox.currentText())
step_size = (self.bounds[direction][1] - self.bounds[direction][0])/100.0
steps = value
distance = float(steps)*step_size
position = self.bounds[direction][0] + distance
old = self.plane.GetOrigin()
self.plane.SetOrigin(position if direction == 'x' else old[0],
position if direction == 'y' else old[1],
position if direction == 'z' else old[2])
for actor_name, actor in self.current_sideset_actors.items():
actor.movePlane()
for actor_name, actor in self.current_nodeset_actors.items():
actor.movePlane()
for actor_name, actor in self.current_block_actors.items():
actor.movePlane()
self.vtkwidget.repaint()
def viewMeshCheckboxChanged(self, value):
if value == QtCore.Qt.Checked:
for actor_name, actor in self.current_sideset_actors.items():
actor.showEdges()
for actor_name, actor in self.current_nodeset_actors.items():
actor.showEdges()
for actor_name, actor in self.current_block_actors.items():
actor.showEdges()
else:
for actor_name, actor in self.current_sideset_actors.items():
actor.hideEdges()
for actor_name, actor in self.current_nodeset_actors.items():
actor.hideEdges()
for actor_name, actor in self.current_block_actors.items():
actor.hideEdges()
self.vtkwidget.repaint()
def clearBlockComboBox(self):
self.highlight_block_combo.currentIndexChanged[str].disconnect(self.showBlockSelected)
self.highlight_block_combo.setCurrentIndex(0)
self.highlight_block_combo.currentIndexChanged[str].connect(self.showBlockSelected)
def clearSidesetComboBox(self):
self.highlight_sideset_combo.currentIndexChanged[str].disconnect(self.showSidesetSelected)
self.highlight_sideset_combo.setCurrentIndex(0)
self.highlight_sideset_combo.currentIndexChanged[str].connect(self.showSidesetSelected)
def clearNodesetComboBox(self):
self.highlight_nodeset_combo.currentIndexChanged[str].disconnect(self.showNodesetSelected)
self.highlight_nodeset_combo.setCurrentIndex(0)
self.highlight_nodeset_combo.currentIndexChanged[str].connect(self.showNodesetSelected)
def showBlockSelected(self, block_name):
if block_name != '':
self.clearSidesetComboBox()
self.clearNodesetComboBox()
self.highlightBlock(str(block_name).split(' ')[0])
else:
self.clearActors()
def showSidesetSelected(self, sideset_name):
if sideset_name != '':
self.clearBlockComboBox()
self.clearNodesetComboBox()
self.highlightBoundary(str(sideset_name).split(' ')[0])
else:
self.clearActors()
def showNodesetSelected(self, nodeset_name):
if nodeset_name != '':
self.clearBlockComboBox()
self.clearSidesetComboBox()
self.highlightNodeset(str(nodeset_name).split(' ')[0])
else:
self.clearActors()
def highlightBoundary(self, boundary):
self.highlight_clear.setDisabled(False)
# Turn off all sidesets
for actor_name, actor in self.current_sideset_actors.items():
actor.hide()
# Turn off all nodesets
for actor_name, actor in self.current_nodeset_actors.items():
actor.hide()
# Turn solids to only edges... but only if they are visible
for actor_name, actor in self.current_block_actors.items():
actor.setColor(black)
actor.goWireframe()
boundaries = boundary.strip("'").split(' ')
for the_boundary in boundaries:
if the_boundary in self.current_sideset_actors:
self.current_sideset_actors[the_boundary].show()
elif the_boundary in self.current_nodeset_actors:
self.current_nodeset_actors[the_boundary].show()
elif the_boundary in self.mesh_renderer.name_to_sideset_id:
self.current_sideset_actors[str(self.mesh_renderer.name_to_sideset_id[the_boundary])].show()
elif the_boundary in self.mesh_renderer.name_to_nodeset_id:
self.current_nodeset_actors[str(self.mesh_renderer.name_to_nodeset_id[the_boundary])].show()
self.vtkwidget.repaint()
def highlightNodeset(self, boundary):
self.highlight_clear.setDisabled(False)
# Turn off all sidesets
for actor_name, actor in self.current_sideset_actors.items():
actor.hide()
# Turn off all nodesets
for actor_name, actor in self.current_nodeset_actors.items():
actor.hide()
# Turn solids to only edges... but only if they are visible
for actor_name, actor in self.current_block_actors.items():
actor.setColor(black)
actor.goWireframe()
boundaries = boundary.strip("'").split(' ')
for the_boundary in boundaries:
if the_boundary in self.current_nodeset_actors:
self.current_nodeset_actors[the_boundary].show()
elif the_boundary in self.mesh_renderer.name_to_nodeset_id:
self.current_nodeset_actors[str(self.mesh_renderer.name_to_nodeset_id[the_boundary])].show()
self.vtkwidget.repaint()
def highlightBlock(self, block):
self.highlight_clear.setDisabled(False)
# Turn off all sidesets
for actor_name, actor in self.current_sideset_actors.items():
actor.hide()
# Turn off all nodesets
for actor_name, actor in self.current_nodeset_actors.items():
actor.hide()
# Turn solids to only edges...
for actor_name, actor in self.current_block_actors.items():
actor.setColor(black)
actor.goWireframe()
blocks = block.strip("'").split(' ')
for the_block in blocks:
if the_block in self.current_block_actors:
self.current_block_actors[the_block].setColor(red)
self.current_block_actors[the_block].goSolid()
elif the_block in self.mesh_renderer.name_to_block_id:
self.current_block_actors[str(self.mesh_renderer.name_to_block_id[the_block])].setColor(red)
self.current_block_actors[str(self.mesh_renderer.name_to_block_id[the_block])].goSolid()
self.vtkwidget.repaint()
def clearActors(self):
# Turn off all sidesets
for actor_name, actor in self.current_sideset_actors.items():
actor.hide()
# Turn off all nodesets
for actor_name, actor in self.current_nodeset_actors.items():
actor.hide()
# Show solids and edges - but only if something is visible
for actor_name, actor in self.current_block_actors.items():
actor.setColor(white)
actor.goSolid()
self.vtkwidget.repaint()
def clearHighlight(self):
self.highlight_block_combo.setCurrentIndex(0)
self.highlight_sideset_combo.setCurrentIndex(0)
self.highlight_nodeset_combo.setCurrentIndex(0)
self.highlight_clear.setDisabled(True)
self.clearActors()
|
LazyCodingCat/gyp
|
refs/heads/master
|
pylib/gyp/generator/analyzer.py
|
88
|
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
targets: list of targets to search for. The target names are unqualified.
The following is output:
error: only supplied if there is an error.
targets: the set of targets passed in via targets that either directly or
indirectly depend upon the set of paths supplied in files.
build_targets: minimal set of targets that directly depend on the changed
files and need to be built. The expectation is this set of targets is passed
into a build step.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case targets and build_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets thare were not found.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable.
is_static_library: true if the type of target is static_library.
is_or_has_linked_ancestor: true if the target does a link (eg executable), or
if there is a target in back_deps that does a link."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
self.is_static_library = False
self.is_or_has_linked_ancestor = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.targets = set(config.get('targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return bool(target_dict['type'] != 'none' or
target_dict.get('actions') or target_dict.get('rules'))
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Set of root Targets reachable from the the files |build_files|.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
targets = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(targets, target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target_type = target_dicts[target_name]['type']
target.is_executable = target_type == 'executable'
target.is_static_library = target_type == 'static_library'
target.is_or_has_linked_ancestor = (target_type == 'executable' or
target_type == 'shared_library')
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if source in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(targets, dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return targets, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|."""
result = {}
if not to_find:
return result
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result
return result
def _DoesTargetDependOn(target):
"""Returns true if |target| or any of its dependencies matches the supplied
set of paths. This updates |matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOn(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
print '\t', target.name, 'matches by dep', dep.name
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOn(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on the matched targets.
possible_targets: targets to search from."""
found = []
print 'Targets that matched by dependency:'
for target in possible_targets:
if _DoesTargetDependOn(target):
found.append(target)
return found
def _AddBuildTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = not target.back_deps and target in roots
for back_dep_target in target.back_deps:
_AddBuildTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
target.is_or_has_linked_ancestor |= (
back_dep_target.is_or_has_linked_ancestor)
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
# And always add static_libraries that have no dependencies on them from
# linkables. This is necessary as the other dependencies on them may be
# static libraries themselves, which are not compile time dependencies.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build)) or
(target.is_static_library and add_if_no_ancestor and
not target.is_or_has_linked_ancestor)):
print '\t\tadding to build targets', target.name, 'executable', \
target.is_executable, 'added_to_compile_targets', \
target.added_to_compile_targets, 'add_if_no_ancestor', \
add_if_no_ancestor, 'requires_build', target.requires_build, \
'is_static_library', target.is_static_library, \
'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor
result.add(target)
target.added_to_compile_targets = True
def _GetBuildTargets(matching_targets, roots):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
roots: set of root targets in the build files to search from."""
result = set()
for target in matching_targets:
print '\tfinding build targets for match', target.name
_AddBuildTargets(target, roots, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(include) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'targets': list(config.targets) }
_WriteOutput(params, **result_dict)
return
all_targets, matching_targets, roots = _GenerateTargets(
data, target_list, target_dicts, toplevel_dir, frozenset(config.files),
params['build_files'])
print 'roots:'
for root in roots:
print '\t', root.name
unqualified_mapping = _GetUnqualifiedToTargetMapping(all_targets,
config.targets)
invalid_targets = None
if len(unqualified_mapping) != len(config.targets):
invalid_targets = _NamesNotIn(config.targets, unqualified_mapping)
if matching_targets:
search_targets = _LookupTargets(config.targets, unqualified_mapping)
print 'supplied targets'
for target in config.targets:
print '\t', target
print 'expanded supplied targets'
for target in search_targets:
print '\t', target.name
matched_search_targets = _GetTargetsDependingOn(search_targets)
print 'raw matched search targets:'
for target in matched_search_targets:
print '\t', target.name
# Reset the visited status for _GetBuildTargets.
for target in all_targets.itervalues():
target.visited = False
print 'Finding build targets'
build_targets = _GetBuildTargets(matching_targets, roots)
matched_search_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matched_search_targets]
build_targets = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in build_targets]
else:
matched_search_targets = []
build_targets = []
result_dict = { 'targets': matched_search_targets,
'status': found_dependency_string if matching_targets else
no_dependency_string,
'build_targets': build_targets}
if invalid_targets:
result_dict['invalid_targets'] = invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
|
major684/ud858-master
|
refs/heads/master
|
Lesson_4/00_Conference_Central/models.py
|
86
|
#!/usr/bin/env python
"""models.py
Udacity conference server-side Python App Engine data & ProtoRPC models
$Id: models.py,v 1.1 2014/05/24 22:01:10 wesc Exp $
created/forked from conferences.py by wesc on 2014 may 24
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
import httplib
import endpoints
from protorpc import messages
from google.appengine.ext import ndb
class Profile(ndb.Model):
"""Profile -- User profile object"""
displayName = ndb.StringProperty()
mainEmail = ndb.StringProperty()
teeShirtSize = ndb.StringProperty(default='NOT_SPECIFIED')
class ProfileMiniForm(messages.Message):
"""ProfileMiniForm -- update Profile form message"""
displayName = messages.StringField(1)
teeShirtSize = messages.EnumField('TeeShirtSize', 2)
class ProfileForm(messages.Message):
"""ProfileForm -- Profile outbound form message"""
displayName = messages.StringField(1)
mainEmail = messages.StringField(2)
teeShirtSize = messages.EnumField('TeeShirtSize', 3)
class TeeShirtSize(messages.Enum):
"""TeeShirtSize -- t-shirt size enumeration value"""
NOT_SPECIFIED = 1
XS_M = 2
XS_W = 3
S_M = 4
S_W = 5
M_M = 6
M_W = 7
L_M = 8
L_W = 9
XL_M = 10
XL_W = 11
XXL_M = 12
XXL_W = 13
XXXL_M = 14
XXXL_W = 15
|
devs1991/test_edx_docmode
|
refs/heads/master
|
lms/tests.py
|
72
|
"""Tests for the lms module itself."""
import mimetypes
from mock import patch
from django.test import TestCase
from django.core.urlresolvers import reverse
from edxmako import add_lookup, LOOKUP
from lms import startup
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class LmsModuleTests(TestCase):
"""
Tests for lms module itself.
"""
def test_new_mimetypes(self):
extensions = ['eot', 'otf', 'ttf', 'woff']
for extension in extensions:
mimetype, _ = mimetypes.guess_type('test.' + extension)
self.assertIsNotNone(mimetype)
class TemplateLookupTests(TestCase):
"""
Tests for TemplateLookup.
"""
def test_add_lookup_to_main(self):
"""Test that any template directories added are not cleared when microsites are enabled."""
add_lookup('main', 'external_module', __name__)
directories = LOOKUP['main'].directories
self.assertEqual(len([directory for directory in directories if 'external_module' in directory]), 1)
# This should not clear the directories list
startup.enable_microsites()
directories = LOOKUP['main'].directories
self.assertEqual(len([directory for directory in directories if 'external_module' in directory]), 1)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_FEEDBACK_SUBMISSION': True})
class HelpModalTests(ModuleStoreTestCase):
"""Tests for the help modal"""
def setUp(self):
super(HelpModalTests, self).setUp()
self.course = CourseFactory.create()
def test_simple_test(self):
"""
Simple test to make sure that you don't get a 500 error when the modal
is enabled.
"""
url = reverse('info', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
|
ojengwa/odoo
|
refs/heads/8.0
|
addons/website_crm_partner_assign/controllers/__init__.py
|
7372
|
import main
|
audaciouscode/Books-Mac-OS-X
|
refs/heads/master
|
Versions/Books_3.0b3/Library of Congress.plugin/Contents/Resources/PyZ3950_parsetab.py
|
30
|
# PyZ3950_parsetab.py
# This file is automatically generated. Do not edit.
_lr_method = 'SLR'
_lr_signature = '\xfc\xb2\xa8\xb7\xd9\xe7\xad\xba"\xb2Ss\'\xcd\x08\x16'
_lr_action_items = {'QUOTEDVALUE':([18,12,14,0,26,],[1,1,1,1,1,]),'LOGOP':([3,5,20,4,6,27,19,24,25,13,22,1,],[-5,-8,-4,-14,14,14,14,-9,-6,-13,-7,-12,]),'SET':([12,14,0,26,],[10,10,10,10,]),'WORD':([12,14,0,5,18,13,24,4,16,15,1,26,],[4,4,4,13,4,-13,13,-14,22,21,-12,4,]),'$':([2,5,3,7,28,25,13,1,4,6,22,20,24,],[-1,-8,-5,0,-3,-6,-13,-12,-14,-2,-7,-4,-9,]),'SLASH':([21,],[26,]),'ATTRSET':([0,],[8,]),'QUAL':([26,17,14,0,12,],[9,23,9,9,9,]),'COMMA':([23,9,11,],[-11,-10,17,]),'LPAREN':([26,14,0,8,12,],[12,12,12,15,12,]),'RPAREN':([19,3,22,1,25,27,5,13,20,4,24,],[25,-5,-7,-12,-6,28,-8,-13,-4,-14,-9,]),'RELOP':([9,11,23,10,],[-10,18,-11,16,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
_lr_action[(_x,_k)] = _y
del _lr_action_items
_lr_goto_items = {'cclfind_or_attrset':([0,],[2,]),'elements':([12,14,26,0,],[3,20,3,3,]),'val':([12,18,14,26,0,],[5,24,5,5,5,]),'top':([0,],[7,]),'cclfind':([12,0,26,],[19,6,27,]),'quallist':([26,12,14,0,],[11,11,11,11,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
_lr_goto[(_x,_k)] = _y
del _lr_goto_items
_lr_productions = [
("S'",1,None,None,None),
('top',1,'p_top','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',170),
('cclfind_or_attrset',1,'p_cclfind_or_attrset_1','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',174),
('cclfind_or_attrset',6,'p_cclfind_or_attrset_2','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',178),
('cclfind',3,'p_ccl_find_1','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',182),
('cclfind',1,'p_ccl_find_2','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',186),
('elements',3,'p_elements_1','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',190),
('elements',3,'p_elements_2','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',212),
('elements',1,'p_elements_3','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',218),
('elements',3,'p_elements_4','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',222),
('quallist',1,'p_quallist_1','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',229),
('quallist',3,'p_quallist_2','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',233),
('val',1,'p_val_1','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',237),
('val',2,'p_val_2','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',241),
('val',1,'p_val_3','/Users/cjkarr/Desktop/python-z3950-importer/PyZ3950/ccl.py',245),
]
|
dfdx2/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_ref_squashed/app1/4_auto.py
|
133
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("app1", "2_squashed_3")]
|
openhatch/oh-mainline
|
refs/heads/master
|
vendor/packages/webtest/webtest/debugapp.py
|
26
|
import os
import six
import webob
__all__ = ['DebugApp', 'make_debug_app']
class DebugApp(object):
"""The WSGI application used for testing"""
def __init__(self, form=None, show_form=False):
if form and os.path.isfile(form):
fd = open(form, 'rb')
self.form = fd.read()
fd.close()
else:
self.form = form
self.show_form = show_form
def __call__(self, environ, start_response):
req = webob.Request(environ)
if req.path_info == '/form.html' and req.method == 'GET':
resp = webob.Response(content_type='text/html')
resp.body = self.form
return resp(environ, start_response)
if 'error' in req.GET:
raise Exception('Exception requested')
if 'errorlog' in req.GET:
log = req.GET['errorlog']
if not six.PY3 and not isinstance(log, six.binary_type):
log = log.encode('utf8')
req.environ['wsgi.errors'].write(log)
status = str(req.GET.get('status', '200 OK'))
parts = []
if not self.show_form:
for name, value in sorted(environ.items()):
if name.upper() != name:
value = repr(value)
parts.append(str('%s: %s\n') % (name, value))
body = ''.join(parts)
if not isinstance(body, six.binary_type):
body = body.encode('ascii')
if req.content_length:
body += six.b('-- Body ----------\n')
body += req.body
else:
body = ''
for name, value in req.POST.items():
body += '%s=%s\n' % (name, value)
if status[:3] in ('204', '304') and not req.content_length:
body = ''
headers = [
('Content-Type', str('text/plain')),
('Content-Length', str(len(body)))]
if not self.show_form:
for name, value in req.GET.items():
if name.startswith('header-'):
header_name = name[len('header-'):]
if isinstance(header_name, six.text_type):
header_name = str(header_name)
header_name = header_name.title()
headers.append((header_name, str(value)))
resp = webob.Response()
resp.status = status
resp.headers.update(headers)
if req.method != 'HEAD':
if isinstance(body, six.text_type):
resp.body = body.encode('utf8')
else:
resp.body = body
return resp(environ, start_response)
debug_app = DebugApp(form=six.b('''<html><body>
<form action="/form-submit" method="POST">
<input type="text" name="name">
<input type="submit" name="submit" value="Submit!">
</form></body></html>'''))
def make_debug_app(global_conf, **local_conf):
"""An application that displays the request environment, and does
nothing else (useful for debugging and test purposes).
"""
return DebugApp(**local_conf)
|
vnsofthe/odoo
|
refs/heads/8.0
|
openerp/pooler.py
|
374
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Functions kept for backward compatibility.
They are simple wrappers around a global RegistryManager methods.
"""
import logging
import openerp.conf.deprecation
from openerp.modules.registry import RegistryManager
_logger = logging.getLogger(__name__)
def get_db_and_pool(db_name, force_demo=False, status=None, update_module=False):
"""Create and return a database connection and a newly initialized registry."""
assert openerp.conf.deprecation.openerp_pooler
_logger.warning('openerp.pooler.get_db_and_pool() is deprecated.')
registry = RegistryManager.get(db_name, force_demo, status, update_module)
return registry._db, registry
def restart_pool(db_name, force_demo=False, status=None, update_module=False):
"""Delete an existing registry and return a database connection and a newly initialized registry."""
_logger.warning('openerp.pooler.restart_pool() is deprecated.')
assert openerp.conf.deprecation.openerp_pooler
registry = RegistryManager.new(db_name, force_demo, status, update_module)
return registry._db, registry
def get_db(db_name):
"""Return a database connection. The corresponding registry is initialized."""
assert openerp.conf.deprecation.openerp_pooler
return get_db_and_pool(db_name)[0]
def get_pool(db_name, force_demo=False, status=None, update_module=False):
"""Return a model registry."""
assert openerp.conf.deprecation.openerp_pooler
return get_db_and_pool(db_name, force_demo, status, update_module)[1]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
SunguckLee/MariaDB-PageCompression
|
refs/heads/master
|
storage/tokudb/mysql-test/tokudb/locks-blocking-row-locks-testgen.py
|
47
|
# 9/23/2011 Generate blocking row lock tests
import datetime
# generate sql write queries
def mysqlgen_select_for_update(k, kv, c, cv):
print "select * from t where %s=%s for update;" % (k, kv)
def mysqlgen_select_for_update_range(k, c, where):
print "select * from t where %s%s for update;" % (k, where)
def mysqlgen_update(k, kv, c, cv):
print "update t set %s=%s where %s=%s;" % (c, c, k, kv);
def mysqlgen_update_range(k, c, where):
print "update t set %s=%s where %s%s;" % (c, c, k, where);
def mysqlgen_insert_ignore(k, kv, c, cv):
print "insert ignore t values(%s, %s);" % (kv, cv)
def mysqlgen_insert_on_dup_update(k, kv, c, cv):
print "insert t values(%s, %s) on duplicate key update %s=%s;" % (kv, cv, c, c)
def mysqlgen_replace(k, kv, c, cv):
print "replace t values(%s, %s);" % (kv, cv)
# genrate sql read queries
def mysqlgen_select_star():
print "select * from t;"
def mysqlgen_select_where(k, where):
print "select * from t where %s%s;" % (k, where)
# mysql test code generation
def mysqlgen_prepare():
print "# prepare with some common parameters"
print "connect(conn1, localhost, root);"
print "set session transaction isolation level serializable;"
print "connect(conn2, localhost, root);"
print "set session transaction isolation level serializable;"
print "connection conn1;"
print ""
def mysqlgen_reload_table():
print "# drop old table, generate new one. 4 rows"
print "--disable_warnings"
print "drop table if exists t;"
print "--enable_warnings"
print "create table t (a int primary key, b int) engine=tokudb;"
for i in range(1, 7):
mysqlgen_insert_ignore("a", i, "b", i*i)
print ""
def mysqlgen_cleanup():
print "# clean it all up"
print "drop table t;"
print ""
write_point_queries = [
("select for update", mysqlgen_select_for_update),
("update", mysqlgen_update),
("insert", mysqlgen_insert_ignore),
("replace", mysqlgen_replace) ]
write_range_queries = [
("select for update", mysqlgen_select_for_update_range),
("update", mysqlgen_update_range) ]
timeouts = [0, 500]
# Here's where all the magic happens
print "# Tokutek"
print "# Blocking row lock tests;"
print "# Generated by %s on %s;" % (__file__, datetime.date.today())
print ""
mysqlgen_prepare()
mysqlgen_reload_table()
for timeout in timeouts:
print "# testing with timeout %s" % timeout
print "connection conn1;"
print "set session tokudb_lock_timeout=%s;" % timeout
print "connection conn2;"
print "set session tokudb_lock_timeout=%s;" % timeout
print ""
print "# testing each point query vs each point query"
for ta, qa in write_point_queries:
# point vs point contention
for tb, qb in write_point_queries:
print "# testing conflict \"%s\" vs. \"%s\"" % (ta, tb)
print "connection conn1;"
print "begin;"
print "# about to do qa.."
qa("a", "1", "b", "100")
print "connection conn2;"
for k in range(1, 5):
if k == 1:
print "--error ER_LOCK_WAIT_TIMEOUT"
qb("a", k, "b", "100")
# point write lock vs read query
print "# make sure we can't read that row, but can read others."
print "begin;"
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_star()
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", "=1")
mysqlgen_select_where("a", ">=2")
print "commit;"
# Always check in the end that a commit
# allows the other transaction full access
print "connection conn1;"
print "commit;"
print "connection conn2;"
qb("a", "1", "b", "100")
print "begin;"
mysqlgen_select_star()
print "commit;"
print "connection conn1;"
print ""
# test early commit
if timeout > 0:
print "# check that an early commit allows a blocked"
print "# transaction to complete"
print "connection conn1;"
print "begin;"
qa("a", "1", "b", "150")
print "connection conn2;"
# this makes the query asynchronous, so we can jump back
# to the conn1 connection and commit it.
print "send ",
qb("a", "1", "b", "175")
print "connection conn1;"
print "commit;"
print "connection conn2;"
print "reap;"
# point vs range contention
for rt, rq in write_range_queries:
print "# testing range query \"%s\" vs \"%s\"" % (rt, ta)
print "connection conn1;"
print "begin;"
print ""
qa("a", "1", "b", "100")
print "connection conn2;"
print "--error ER_LOCK_WAIT_TIMEOUT"
rq("a", "b", "<=2")
print "--error ER_LOCK_WAIT_TIMEOUT"
rq("a", "b", ">=0")
rq("a", "b", ">2")
# write range lock vs read query
print "# make sure we can't read that row, but can read others."
print "begin;"
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_star()
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", "=1")
mysqlgen_select_where("a", ">=2")
print "commit;"
# Always check in the end that a commit
# allows the other transaction full access
print "connection conn1;"
print "commit;"
print "connection conn2;"
rq("a", "b", "<=2")
rq("a", "b", ">=0")
print "begin;"
mysqlgen_select_star()
print "commit;"
print "connection conn1;"
print ""
# test early commit
if timeout > 0:
print "# check that an early commit allows a blocked"
print "# transaction to complete"
print "connection conn1;"
print "begin;"
qa("a", "1", "b", "150")
print "connection conn2;"
# this makes the query asynchronous, so we can jump back
# to the conn1 connection and commit it.
print "send ",
rq("a", "b", "<=2")
print "connection conn1;"
print "commit;"
print "connection conn2;"
print "reap;"
for rt, rq in write_range_queries:
for rtb, rqb in write_range_queries:
print "# testing range query \"%s\" vs range query \"%s\"" % (rt, rtb)
print "connection conn1;"
print "begin;"
print ""
rq("a", "b", ">=2 and a<=4")
print "connection conn2;"
print "--error ER_LOCK_WAIT_TIMEOUT"
rqb("a", "b", ">=0 and a<=3")
print "--error ER_LOCK_WAIT_TIMEOUT"
rqb("a", "b", ">=3 and a<=6")
print "--error ER_LOCK_WAIT_TIMEOUT"
rqb("a", "b", "<=2")
rqb("a", "b", ">=5")
# point write lock vs read query
print "# make sure we can't read that row, but can read others."
print "begin;"
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_star()
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", "=2")
print "--error ER_LOCK_WAIT_TIMEOUT"
mysqlgen_select_where("a", ">=3 and a<=5")
mysqlgen_select_where("a", ">=5")
print "commit;"
# Always check in the end that a commit
# allows the other transaction full access
print "connection conn1;"
print "commit;"
print "connection conn2;"
rqb("a", "b", ">=0 and a<=3")
rqb("a", "b", ">=3 and a<=6")
rqb("a", "b", "<=2")
print "begin;"
mysqlgen_select_star()
print "commit;"
print "connection conn1;"
print ""
# test early commit
if timeout > 0:
print "# check that an early commit allows a blocked"
print "# transaction to complete"
print "connection conn1;"
print "begin;"
rq("a", "b", ">=2 and a<=4")
print "connection conn2;"
# this makes the query asynchronous, so we can jump back
# to the conn1 connection and commit it.
print "send ",
rqb("a", "b", ">=0 and a<=3")
print "connection conn1;"
print "commit;"
print "connection conn2;"
print "reap;"
mysqlgen_cleanup()
|
virgree/odoo
|
refs/heads/8.0
|
openerp/addons/test_convert/tests/test_convert.py
|
382
|
import collections
import unittest2
from lxml import etree as ET
from lxml.builder import E
from openerp.tests import common
from openerp.tools.convert import _eval_xml
Field = E.field
Value = E.value
class TestEvalXML(common.TransactionCase):
def eval_xml(self, node, obj=None, idref=None):
return _eval_xml(obj, node, pool=None, cr=self.cr, uid=self.uid,
idref=idref, context=None)
def test_char(self):
self.assertEqual(
self.eval_xml(Field("foo")),
"foo")
self.assertEqual(
self.eval_xml(Field("None")),
"None")
def test_int(self):
self.assertIsNone(
self.eval_xml(Field("None", type='int')),
"what the fuck?")
self.assertEqual(
self.eval_xml(Field(" 42 ", type="int")),
42)
with self.assertRaises(ValueError):
self.eval_xml(Field("4.82", type="int"))
with self.assertRaises(ValueError):
self.eval_xml(Field("Whelp", type="int"))
def test_float(self):
self.assertEqual(
self.eval_xml(Field("4.78", type="float")),
4.78)
with self.assertRaises(ValueError):
self.eval_xml(Field("None", type="float"))
with self.assertRaises(ValueError):
self.eval_xml(Field("Foo", type="float"))
def test_list(self):
self.assertEqual(
self.eval_xml(Field(type="list")),
[])
self.assertEqual(
self.eval_xml(Field(
Value("foo"),
Value("5", type="int"),
Value("4.76", type="float"),
Value("None", type="int"),
type="list"
)),
["foo", 5, 4.76, None])
def test_file(self):
Obj = collections.namedtuple('Obj', 'module')
obj = Obj('test_convert')
self.assertEqual(
self.eval_xml(Field('test_file.txt', type='file'), obj),
'test_convert,test_file.txt')
with self.assertRaises(IOError):
self.eval_xml(Field('test_nofile.txt', type='file'), obj)
@unittest2.skip("not tested")
def test_xml(self):
pass
@unittest2.skip("not tested")
def test_html(self):
pass
|
camilonova/sentry
|
refs/heads/master
|
tests/sentry/tasks/test_options.py
|
24
|
from mock import patch
from datetime import timedelta
from sentry.models import Option
from sentry.options import default_manager
from sentry.tasks.options import sync_options
from sentry.testutils import TestCase
class SyncOptionsTest(TestCase):
def test_task_persistent_name(self):
assert sync_options.name == 'sentry.tasks.options.sync_options'
@patch.object(default_manager, 'update_cached_value')
def test_simple(self, mock_update_cached_value):
option = Option.objects.create(
key='foo',
value='bar',
)
sync_options(cutoff=60)
mock_update_cached_value.assert_called_once_with(key='foo', value='bar')
mock_update_cached_value.reset_mock()
option.update(last_updated=option.last_updated - timedelta(days=1))
sync_options(cutoff=60)
assert not mock_update_cached_value.called
|
SteveMcGrath/pySecurityCenter
|
refs/heads/master
|
examples/sc4/average_time_to_mitigate/avg_ttm.py
|
1
|
import securitycenter
HOST = 'HOSTNAME or IP_ADDRESS'
USER = 'USERNAME'
PASS = 'PASSWORD'
ASSET_ID = 81
def get_ttm(**filters):
sc = securitycenter.SecurityCenter4(HOST)
sc.login(USER, PASS)
data = sc.query('vulndetails', source='patched', **filters)
agg = 0
for item in data:
agg += int(item['lastSeen']) - int(item['firstSeen'])
avg = float(agg) / len(data)
print 'Average Hours to Mitigate : %d' % int(avg / 3600)
print 'Average Days to Mitigate : %s' % int(avg / 3600 / 24)
if __name__ == '__main__':
get_ttm(assetID=ASSET_ID, severity='4,3,2,1')
|
iain-peddie/well-behaved-python
|
refs/heads/master
|
tests/WellBehavedPythonTests/Expectations/ContainerExpectationsTests.py
|
1
|
#!/usr/bin/env python3
# Copyright 2013 Iain Peddie inr314159@hotmail.com
#
# This file is part of WellBehavedPython
#
# WellBehavedPython is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WellBehavedPython is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WellBehavedPython. If not, see <http://www.gnu.org/licenses/>.
from WellBehavedPython.api import *
from WellBehavedPython.Engine.TestCase import *
class ContainerExpectationsTests(TestCase):
def test_expect_x_to_be_in_y_passes_when_x_is_in_y(self):
x = 602
y = [601, x, 603]
expect(x).toBeIn(y)
def test_expect_x_to_be_in_y_passes_when_item_equal_to_x_in_y(self):
# use numbers > 256 because of python internal behavior:
# all numbers < 255 are declared in the machine runtime and are always
# the same as each other. So x = 1; y = 1; least to x is y being true
# We don't want that in this test (otherwise we'd be duplicating tests
# so we pick larger inteers to do this with
x = 602
y = [601, 602, 603]
expect(x).toBeIn(y)
def test_expect_x_to_be_in_y_raises_AssertionError_when_x_not_in_y(self):
x = 602
y = [601, 603, 605]
expect(lambda: expect(x).toBeIn(y)).toRaise(
AssertionError,
expectedMessage = "Expected 602 to be in [601, 603, 605]")
def test_expect_x_to_be_in_y_prepends_usermessage_when_condition_fails(self):
x = 602
y = [601, 603, 605]
expect(lambda: withUserMessage("userMessage").expect(x).toBeIn(y)).toRaise(
AssertionError,
expectedMessageMatches = "^userMessage")
def expect_y_to_contain_x_passes_when_x_in_y(self):
x = 602
y = [601, x, 603]
expect(y).toContain(x)
def expect_y_to_contain_x_passes_when_item_equal_to_x_in_y(self):
x = 602
y = [601, 602, 603]
expect(y).toContain(x)
def test_expect_y_to_contain_x_fails_when_x_not_in_y(self):
x = 602
y = [601, 603, 605]
expect(lambda: expect(y).toContain(x)).toRaise(
AssertionError,
expectedMessage = "Expected [601, 603, 605] to contain 602")
def test_expect_y_to_contain_x_prepends_usermessage_to_message(self):
x = 602
y = [601, 603, 605]
expect(lambda: withUserMessage("userMessage").expect(y).toContain(x)).toRaise(
AssertionError,
expectedMessageMatches = "^userMessage")
def test_expect_0_to_be_superset_of_empty_passes(self):
expect([1]).toBeASupersetOf(())
def test_expect_01_to_be_superset_of_0_and_superset_of_1(self):
expect([0, 1]).toBeASupersetOf([0])
expect([0, 1]).toBeASupersetOf([1])
def test_expect_0_to_be_a_superset_of_1_fails(self):
expect(lambda: expect([0]).toBeASupersetOf(1)).toRaise(
AssertionError,
expectedMessage = "Expected [0] to be a superset of 1")
def test_expect_00_to_be_a_superset_of_empty_passes(self):
expect([0, 0]).toBeASupersetOf(())
def test_expect_0_to_be_a_superset_of_00_passes(self):
expect([0, 0]).toBeASupersetOf([0])
def test_toBeASuperset_prepends_userMessage(self):
expect(lambda: withUserMessage("userMessage").expect([0]).toBeASupersetOf(1)).toRaise(
AssertionError,
expectedMessageMatches = "^userMessage: ")
def test_expect_empty_list_to_be_a_subset_of_1_passes(self):
expect([]).toBeASubsetOf([1])
def test_expect_0_and_1_to_be_subsets_of_01_pass(self):
expect([0]).toBeASubsetOf([0, 1])
expect([1]).toBeASubsetOf([0, 1])
def test_expect_0_to_be_a_subset_of_1_fails(self):
expect(lambda: expect([0]).toBeASubsetOf([1])).toRaise(
AssertionError,
expectedMessage = "Expected [0] to be a subset of [1]")
def test_toBeASubset_prepends_userMessage(self):
expect(lambda: withUserMessage("userMessage").expect([0]).toBeASubsetOf([1])).toRaise(
AssertionError,
expectedMessageMatches = "^userMessage: ")
def test_expect_two_empty_lists_to_be_equal_passes(self):
expect([]).toEqual([])
def test_expect_two_empty_tuplet_to_be_equal_passes(self):
expect(tuple()).toEqual(tuple())
def test_expect_two_nonempty_identical_lists_to_be_equal_passes(self):
expect([1]).toEqual([1])
def test_expect_two_nonempty_nonidentical_lists_of_the_same_length_to_be_equal_fails(self):
expect(lambda:
expect([0]).toEqual([1])).toRaise(
AssertionError,
expectedMessage = """Expected [0] to equal [1]
First difference at index 0: 0 != 1""")
def test_containers_of_unequal_length_get_length_mismatch_message(self):
expect(lambda: expect([0]).toEqual([])).toRaise(
AssertionError,
expectedMessage = "Expected [0] to be a container of length 0")
def test_expect_container_equals_prepends_user_message_when_containers_equal_length(self):
expect(lambda:
withUserMessage("userMessage").expect([0]).toEqual([1])).toRaise(
AssertionError,
expectedMessageMatches = "^userMessage")
def test_expect_container_equals_prepends_user_message_when_containers_unequal_length(self):
expect(lambda:
withUserMessage("userMessage").expect([0]).toEqual([])).toRaise(
AssertionError,
expectedMessageMatches = "^userMessage")
def test_tuple_comparse_to_equivalent_list(self):
expect((1, 2)).toEqual([1, 2])
class ContainerNotExpectationsTests(TestCase):
def test_expect_not_x_to_be_in_y_passes_when_x_is_not_in_y(self):
x = 602
y = [601, 603, 605]
expect(x).Not.toBeIn(y)
def test_expect_not_x_to_be_in_y_raises_AssertionError_when_x_in_y(self):
x = 602
y = [601, x, 603]
expect(lambda: expect(x).Not.toBeIn(y)).toRaise(
AssertionError,
expectedMessage = "Expected 602 not to be in [601, 602, 603]")
def test_expect_not_x_to_be_in_y_raises_AssertionError_when_item_equal_to_x_in_y(self):
x = 602
y = [601, 602, 603]
expect(lambda: expect(x).Not.toBeIn(y)).toRaise(
AssertionError,
expectedMessage = "Expected 602 not to be in [601, 602, 603]")
def test_expect_not_x_to_be_in_y_prepends_usermessage_on_failure(self):
x = 602
y = [601, 602, 603]
expect(lambda: withUserMessage("userMessage").expect(x).Not.toBeIn(y)).toRaise(
AssertionError,
expectedMessageMatches = "^userMessage")
def test_expect_not_y_to_contain_x_passes_when_x_not_in_y(self):
x = 602
y = [601, 603, 605]
expect(y).Not.toContain(x)
def test_expect_not_y_to_contain_x_fails_when_x_in_y(self):
x = 602
y = [601, 602, 603]
expect(lambda: expect(y).Not.toContain(x)).toRaise(
AssertionError,
expectedMessage = "Expected [601, 602, 603] not to contain 602")
def test_expect_not_y_to_contain_x_prepends_usermessage(self):
x = 602
y = [601, 602, 603]
expect(lambda: expect(y).withUserMessage("user message").Not.toContain(x)).toRaise(
AssertionError,
expectedMessageMatches = "^user message")
def test_expect_0_not_to_be_a_superset_of_empty_fails(self):
expect(lambda: expect([1]).Not.toBeASupersetOf(())).toRaise(
AssertionError,
expectedMessage = "Expected [1] not to be a superset of ()")
def test_expect_01_to_be_superset_of_0_and_superset_of_1(self):
expect(lambda: expect([0, 1]).Not.toBeASupersetOf([0])).toRaise(
AssertionError,
expectedMessage = "Expected [0, 1] not to be a superset of [0]")
expect(lambda: expect([0, 1]).Not.toBeASupersetOf([1])).toRaise(
AssertionError,
expectedMessage = "Expected [0, 1] not to be a superset of [1]")
def test_expect_0_to_be_a_superset_of_1_fails(self):
expect([0]).Not.toBeASupersetOf(1)
def test_toBeASuperset_prepends_userMessage(self):
expect(lambda: withUserMessage("userMessage").expect([0, 1]).Not.toBeASupersetOf([0])).toRaise(
AssertionError,
expectedMessageMatches = "^userMessage: ")
def test_0_not_toBeASubset_of_empty_passes(self):
expect([0]).Not.toBeASubsetOf([])
def test_0_not_to_beASubset_of_01_fails(self):
expect(lambda: expect([0]).Not.toBeASubsetOf([0, 1])).toRaise(
AssertionError,
expectedMessage = "Expected [0] not to be a subset of [0, 1]")
def test_not_to_beASubset_prepends_userMessage(self):
expect(lambda: withUserMessage("userMessage").expect([0]).Not.toBeASubsetOf([0, 1])).toRaise(
AssertionError,
expectedMessageMatches = "^userMessage: ")
|
Dahlgren/HTPC-Manager
|
refs/heads/master2
|
libs/sqlobject/manager/__init__.py
|
9480
|
#
|
nerith/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/html5lib/html5lib/trie/__init__.py
|
1735
|
from __future__ import absolute_import, division, unicode_literals
from .py import Trie as PyTrie
Trie = PyTrie
try:
from .datrie import Trie as DATrie
except ImportError:
pass
else:
Trie = DATrie
|
matthaywardwebdesign/rethinkdb
|
refs/heads/next
|
test/rdb_workloads/stress_workloads/x_between.py
|
50
|
#!/usr/bin/env python
import sys, os, datetime, x_stress_util
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import utils
r = utils.import_python_driver()
class Workload:
def __init__(self, options):
self.db = options["db"]
self.table = options["table"]
self.time_dist = x_stress_util.TimeDistribution(os.getenv("X_END_DATE"), os.getenv("X_DATE_INTERVAL"))
def run(self, conn):
(start_date, end_date) = self.time_dist.get()
time_1 = r.time(start_date.year, start_date.month, start_date.day, 'Z')
time_2 = r.time(end_date.year, end_date.month, end_date.day, 'Z')
cursor = r.db(self.db).table(self.table).between(time_1, time_2, index="datetime").count().run(conn)
return {}
|
rodrigob/visiongrader
|
refs/heads/master
|
src/measures.py
|
1
|
#!/usr/bin/python
# Copyright (C) 2011 Pierre Sermanet <pierre.sermanet@gmail.com>
#
# This file is part of visiongrader.
#
# visiongrader is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# visiongrader is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with visiongrader. If not, see <http://www.gnu.org/licenses/>.
#
# Authors :
# Pierre Sermanet <pierre.sermanet@gmail.com>
# Compute maximum possible area under curve (AUC),
# assuming a rectangle area of height y0 and width x1 - x0
def auc_max(x0, y0, x1):
return (x1 - x0) * y0
# Return the percentage of AUC over its maximum possible area, times 100.
def auc_percent(points, x0, y0, x1):
return auc(points, x0, y0, x1) / auc_max(x0, y0, x1) * 100
# Return the area under curve (AUC) of curve defined by 'points'
# in the [x0, x1] x-range and [0, y0] y-range.
def auc(points, x0, y0, x1):
# we assume that points are ordered in increasing x order
minx = points[0][0]
minxy = -points[0][1]
maxx = points[len(points) - 1][0]
maxxy = -points[len(points) - 1][1]
# print label + ": min x: " + str(minx) + " (y: " + str(minxy) \
# + ") max x: " + str(maxx) + " (y: " + str(maxxy) + ")"
auc = 0
# build integral of curve
# special cases: integral's limits
# TODO: we assume x0 is always 0 and points don't go below 0 for now,
# handle when many points can be below whatever x0's value
if minx > x0: # fill integral gap between x0 and minx with y0
auc += y0 * (minx - x0)
# loop on all points
p0x = minx
p0y = minxy
for p in points:
p1x = p[0]
p1y = -p[1]
# stop if p1x is beyond x1
if p1x >= x1:
# interpolate point to the x limit
y1 = p0y + (p1y - p0y) * (x1 - p0x)
auc += ((x1 - p0x) / 2) * (y1 + p0y)
# stop loop
break
# update auc with trapezoid
auc += ((p1x - p0x) / 2) * (p1y + p0y)
# shift p1 to p0
p0x = p1x
p0y = p1y
# special case: end limit
if p1x < x1: # fill integral gap between maxx and x1 with maxxy
auc += p1y * (x1 - p1x)
return auc
|
Saurabh7/shogun
|
refs/heads/master
|
examples/undocumented/python_modular/graphical/svr_sinc.py
|
16
|
from pylab import figure,pcolor,scatter,contour,colorbar,show,subplot,plot,legend, connect
from modshogun import *
from modshogun import *
from modshogun import *
import util
util.set_title('SVR on Sinus')
X, Y=util.get_sinedata()
C=10
width=0.5
epsilon=0.01
feat = RealFeatures(X)
lab = RegressionLabels(Y.flatten())
gk=GaussianKernel(feat,feat, width)
#svr = SVRLight(C, epsilon, gk, lab)
svr = LibSVR(C, epsilon, gk, lab)
svr.train()
plot(X, Y, '.', label='train data')
plot(X[0], svr.apply().get_labels(), hold=True, label='train output')
XE, YE=util.compute_output_plot_isolines_sine(svr, gk, feat, regression=True)
plot(XE[0], YE, hold=True, label='test output')
connect('key_press_event', util.quit)
show()
|
vidyacraghav/cplusdratchio
|
refs/heads/master
|
deps/boost_1_55_0/tools/build/v2/util/utility.py
|
49
|
# (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
""" Utility functions to add/remove/get grists.
Grists are string enclosed in angle brackets (<>) that are used as prefixes. See Jam for more information.
"""
import re
import os
import bjam
from b2.exceptions import *
__re_grist_and_value = re.compile (r'(<[^>]*>)(.*)')
__re_grist_content = re.compile ('^<(.*)>$')
__re_backslash = re.compile (r'\\')
def to_seq (value):
""" If value is a sequence, returns it.
If it is a string, returns a sequence with value as its sole element.
"""
if not value:
return []
if isinstance (value, str):
return [value]
else:
return value
def replace_references_by_objects (manager, refs):
objs = []
for r in refs:
objs.append (manager.get_object (r))
return objs
def add_grist (features):
""" Transform a string by bracketing it with "<>". If already bracketed, does nothing.
features: one string or a sequence of strings
return: the gristed string, if features is a string, or a sequence of gristed strings, if features is a sequence
"""
def grist_one (feature):
if feature [0] != '<' and feature [len (feature) - 1] != '>':
return '<' + feature + '>'
else:
return feature
if isinstance (features, str):
return grist_one (features)
else:
return [ grist_one (feature) for feature in features ]
def replace_grist (features, new_grist):
""" Replaces the grist of a string by a new one.
Returns the string with the new grist.
"""
def replace_grist_one (name, new_grist):
split = __re_grist_and_value.match (name)
if not split:
return new_grist + name
else:
return new_grist + split.group (2)
if isinstance (features, str):
return replace_grist_one (features, new_grist)
else:
return [ replace_grist_one (feature, new_grist) for feature in features ]
def get_value (property):
""" Gets the value of a property, that is, the part following the grist, if any.
"""
return replace_grist (property, '')
def get_grist (value):
""" Returns the grist of a string.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
def get_grist_one (name):
split = __re_grist_and_value.match (name)
if not split:
return ''
else:
return split.group (1)
if isinstance (value, str):
return get_grist_one (value)
else:
return [ get_grist_one (v) for v in value ]
def ungrist (value):
""" Returns the value without grist.
If value is a sequence, does it for every value and returns the result as a sequence.
"""
def ungrist_one (value):
stripped = __re_grist_content.match (value)
if not stripped:
raise BaseException ("in ungrist: '%s' is not of the form <.*>" % value)
return stripped.group (1)
if isinstance (value, str):
return ungrist_one (value)
else:
return [ ungrist_one (v) for v in value ]
def replace_suffix (name, new_suffix):
""" Replaces the suffix of name by new_suffix.
If no suffix exists, the new one is added.
"""
split = os.path.splitext (name)
return split [0] + new_suffix
def forward_slashes (s):
""" Converts all backslashes to forward slashes.
"""
return __re_backslash.sub ('/', s)
def split_action_id (id):
""" Splits an id in the toolset and specific rule parts. E.g.
'gcc.compile.c++' returns ('gcc', 'compile.c++')
"""
split = id.split ('.', 1)
toolset = split [0]
name = ''
if len (split) > 1:
name = split [1]
return (toolset, name)
def os_name ():
result = bjam.variable("OS")
assert(len(result) == 1)
return result[0]
def platform ():
return bjam.variable("OSPLAT")
def os_version ():
return bjam.variable("OSVER")
def on_windows ():
""" Returns true if running on windows, whether in cygwin or not.
"""
if bjam.variable("NT"):
return True
elif bjam.variable("UNIX"):
uname = bjam.variable("JAMUNAME")
if uname and uname[0].startswith("CYGWIN"):
return True
return False
|
slozier/ironpython2
|
refs/heads/master
|
Src/StdLib/Lib/test/test_complex.py
|
4
|
import unittest
from test import test_support
from random import random
from math import atan2, isnan, copysign
INF = float("inf")
NAN = float("nan")
# These tests ensure that complex math does the right thing
# decorator for skipping tests on non-IEEE 754 platforms
have_getformat = hasattr(float, "__getformat__")
requires_IEEE_754 = unittest.skipUnless(have_getformat and
float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
class ComplexTest(unittest.TestCase):
def assertAlmostEqual(self, a, b):
if isinstance(a, complex):
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a.real, b.real)
unittest.TestCase.assertAlmostEqual(self, a.imag, b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a.real, b)
unittest.TestCase.assertAlmostEqual(self, a.imag, 0.)
else:
if isinstance(b, complex):
unittest.TestCase.assertAlmostEqual(self, a, b.real)
unittest.TestCase.assertAlmostEqual(self, 0., b.imag)
else:
unittest.TestCase.assertAlmostEqual(self, a, b)
def assertCloseAbs(self, x, y, eps=1e-9):
"""Return true iff floats x and y "are close"."""
# put the one with larger magnitude second
if abs(x) > abs(y):
x, y = y, x
if y == 0:
return abs(x) < eps
if x == 0:
return abs(y) < eps
# check that relative difference < eps
self.assertTrue(abs((x-y)/y) < eps)
def assertFloatsAreIdentical(self, x, y):
"""assert that floats x and y are identical, in the sense that:
(1) both x and y are nans, or
(2) both x and y are infinities, with the same sign, or
(3) both x and y are zeros, with the same sign, or
(4) x and y are both finite and nonzero, and x == y
"""
msg = 'floats {!r} and {!r} are not identical'
if isnan(x) or isnan(y):
if isnan(x) and isnan(y):
return
elif x == y:
if x != 0.0:
return
# both zero; check that signs match
elif copysign(1.0, x) == copysign(1.0, y):
return
else:
msg += ': zeros have different signs'
self.fail(msg.format(x, y))
def assertClose(self, x, y, eps=1e-9):
"""Return true iff complexes x and y "are close"."""
self.assertCloseAbs(x.real, y.real, eps)
self.assertCloseAbs(x.imag, y.imag, eps)
def check_div(self, x, y):
"""Compute complex z=x*y, and check that z/x==y and z/y==x."""
z = x * y
if x != 0:
q = z / x
self.assertClose(q, y)
q = z.__div__(x)
self.assertClose(q, y)
q = z.__truediv__(x)
self.assertClose(q, y)
if y != 0:
q = z / y
self.assertClose(q, x)
q = z.__div__(y)
self.assertClose(q, x)
q = z.__truediv__(y)
self.assertClose(q, x)
def test_div(self):
simple_real = [float(i) for i in xrange(-5, 6)]
simple_complex = [complex(x, y) for x in simple_real for y in simple_real]
for x in simple_complex:
for y in simple_complex:
self.check_div(x, y)
# A naive complex division algorithm (such as in 2.0) is very prone to
# nonsense errors for these (overflows and underflows).
self.check_div(complex(1e200, 1e200), 1+0j)
self.check_div(complex(1e-200, 1e-200), 1+0j)
# Just for fun.
for i in xrange(100):
self.check_div(complex(random(), random()),
complex(random(), random()))
self.assertRaises(ZeroDivisionError, complex.__div__, 1+1j, 0+0j)
# FIXME: The following currently crashes on Alpha
# self.assertRaises(OverflowError, pow, 1e200+1j, 1e200+1j)
def test_truediv(self):
self.assertAlmostEqual(complex.__truediv__(2+0j, 1+1j), 1-1j)
self.assertRaises(ZeroDivisionError, complex.__truediv__, 1+1j, 0+0j)
for denom_real, denom_imag in [(0, NAN), (NAN, 0), (NAN, NAN)]:
z = complex(0, 0) / complex(denom_real, denom_imag)
self.assertTrue(isnan(z.real))
self.assertTrue(isnan(z.imag))
def test_floordiv(self):
self.assertAlmostEqual(complex.__floordiv__(3+0j, 1.5+0j), 2)
self.assertRaises(ZeroDivisionError, complex.__floordiv__, 3+0j, 0+0j)
def test_coerce(self):
self.assertRaises(OverflowError, complex.__coerce__, 1+1j, 1L<<10000)
def test_no_implicit_coerce(self):
# Python 2.7 removed implicit coercion from the complex type
class A(object):
def __coerce__(self, other):
raise RuntimeError
__hash__ = None
def __cmp__(self, other):
return -1
a = A()
self.assertRaises(TypeError, lambda: a + 2.0j)
self.assertTrue(a < 2.0j)
def test_richcompare(self):
self.assertEqual(complex.__eq__(1+1j, 1L<<10000), False)
self.assertEqual(complex.__lt__(1+1j, None), NotImplemented)
self.assertIs(complex.__eq__(1+1j, 1+1j), True)
self.assertIs(complex.__eq__(1+1j, 2+2j), False)
self.assertIs(complex.__ne__(1+1j, 1+1j), False)
self.assertIs(complex.__ne__(1+1j, 2+2j), True)
self.assertRaises(TypeError, complex.__lt__, 1+1j, 2+2j)
self.assertRaises(TypeError, complex.__le__, 1+1j, 2+2j)
self.assertRaises(TypeError, complex.__gt__, 1+1j, 2+2j)
self.assertRaises(TypeError, complex.__ge__, 1+1j, 2+2j)
def test_richcompare_boundaries(self):
def check(n, deltas, is_equal, imag = 0.0):
for delta in deltas:
i = n + delta
z = complex(i, imag)
self.assertIs(complex.__eq__(z, i), is_equal(delta))
self.assertIs(complex.__ne__(z, i), not is_equal(delta))
# For IEEE-754 doubles the following should hold:
# x in [2 ** (52 + i), 2 ** (53 + i + 1)] -> x mod 2 ** i == 0
# where the interval is representable, of course.
for i in range(1, 10):
pow = 52 + i
mult = 2 ** i
check(2 ** pow, range(1, 101), lambda delta: delta % mult == 0)
check(2 ** pow, range(1, 101), lambda delta: False, float(i))
check(2 ** 53, range(-100, 0), lambda delta: True)
def test_mod(self):
self.assertRaises(ZeroDivisionError, (1+1j).__mod__, 0+0j)
a = 3.33+4.43j
try:
a % 0
except ZeroDivisionError:
pass
else:
self.fail("modulo parama can't be 0")
def test_divmod(self):
self.assertRaises(ZeroDivisionError, divmod, 1+1j, 0+0j)
def test_pow(self):
self.assertAlmostEqual(pow(1+1j, 0+0j), 1.0)
self.assertAlmostEqual(pow(0+0j, 2+0j), 0.0)
self.assertRaises(ZeroDivisionError, pow, 0+0j, 1j)
self.assertAlmostEqual(pow(1j, -1), 1/1j)
self.assertAlmostEqual(pow(1j, 200), 1)
self.assertRaises(ValueError, pow, 1+1j, 1+1j, 1+1j)
a = 3.33+4.43j
self.assertEqual(a ** 0j, 1)
self.assertEqual(a ** 0.+0.j, 1)
self.assertEqual(3j ** 0j, 1)
self.assertEqual(3j ** 0, 1)
try:
0j ** a
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
try:
0j ** (3-2j)
except ZeroDivisionError:
pass
else:
self.fail("should fail 0.0 to negative or complex power")
# The following is used to exercise certain code paths
self.assertEqual(a ** 105, a ** 105)
self.assertEqual(a ** -105, a ** -105)
self.assertEqual(a ** -30, a ** -30)
self.assertEqual(0.0j ** 0, 1)
b = 5.1+2.3j
self.assertRaises(ValueError, pow, a, b, 0)
def test_boolcontext(self):
for i in xrange(100):
self.assertTrue(complex(random() + 1e-6, random() + 1e-6))
self.assertTrue(not complex(0.0, 0.0))
def test_conjugate(self):
self.assertClose(complex(5.3, 9.8).conjugate(), 5.3-9.8j)
def test_constructor(self):
class OS:
def __init__(self, value): self.value = value
def __complex__(self): return self.value
class NS(object):
def __init__(self, value): self.value = value
def __complex__(self): return self.value
self.assertEqual(complex(OS(1+10j)), 1+10j)
self.assertEqual(complex(NS(1+10j)), 1+10j)
self.assertRaises(TypeError, complex, OS(None))
self.assertRaises(TypeError, complex, NS(None))
self.assertAlmostEqual(complex("1+10j"), 1+10j)
self.assertAlmostEqual(complex(10), 10+0j)
self.assertAlmostEqual(complex(10.0), 10+0j)
self.assertAlmostEqual(complex(10L), 10+0j)
self.assertAlmostEqual(complex(10+0j), 10+0j)
self.assertAlmostEqual(complex(1,10), 1+10j)
self.assertAlmostEqual(complex(1,10L), 1+10j)
self.assertAlmostEqual(complex(1,10.0), 1+10j)
self.assertAlmostEqual(complex(1L,10), 1+10j)
self.assertAlmostEqual(complex(1L,10L), 1+10j)
self.assertAlmostEqual(complex(1L,10.0), 1+10j)
self.assertAlmostEqual(complex(1.0,10), 1+10j)
self.assertAlmostEqual(complex(1.0,10L), 1+10j)
self.assertAlmostEqual(complex(1.0,10.0), 1+10j)
self.assertAlmostEqual(complex(3.14+0j), 3.14+0j)
self.assertAlmostEqual(complex(3.14), 3.14+0j)
self.assertAlmostEqual(complex(314), 314.0+0j)
self.assertAlmostEqual(complex(314L), 314.0+0j)
self.assertAlmostEqual(complex(3.14+0j, 0j), 3.14+0j)
self.assertAlmostEqual(complex(3.14, 0.0), 3.14+0j)
self.assertAlmostEqual(complex(314, 0), 314.0+0j)
self.assertAlmostEqual(complex(314L, 0L), 314.0+0j)
self.assertAlmostEqual(complex(0j, 3.14j), -3.14+0j)
self.assertAlmostEqual(complex(0.0, 3.14j), -3.14+0j)
self.assertAlmostEqual(complex(0j, 3.14), 3.14j)
self.assertAlmostEqual(complex(0.0, 3.14), 3.14j)
self.assertAlmostEqual(complex("1"), 1+0j)
self.assertAlmostEqual(complex("1j"), 1j)
self.assertAlmostEqual(complex(), 0)
self.assertAlmostEqual(complex("-1"), -1)
self.assertAlmostEqual(complex("+1"), +1)
self.assertAlmostEqual(complex("(1+2j)"), 1+2j)
self.assertAlmostEqual(complex("(1.3+2.2j)"), 1.3+2.2j)
self.assertAlmostEqual(complex("3.14+1J"), 3.14+1j)
self.assertAlmostEqual(complex(" ( +3.14-6J )"), 3.14-6j)
self.assertAlmostEqual(complex(" ( +3.14-J )"), 3.14-1j)
self.assertAlmostEqual(complex(" ( +3.14+j )"), 3.14+1j)
self.assertAlmostEqual(complex("J"), 1j)
self.assertAlmostEqual(complex("( j )"), 1j)
self.assertAlmostEqual(complex("+J"), 1j)
self.assertAlmostEqual(complex("( -j)"), -1j)
self.assertAlmostEqual(complex('1e-500'), 0.0 + 0.0j)
self.assertAlmostEqual(complex('-1e-500j'), 0.0 - 0.0j)
self.assertAlmostEqual(complex('-1e-500+1e-500j'), -0.0 + 0.0j)
class complex2(complex): pass
self.assertAlmostEqual(complex(complex2(1+1j)), 1+1j)
self.assertAlmostEqual(complex(real=17, imag=23), 17+23j)
self.assertAlmostEqual(complex(real=17+23j), 17+23j)
self.assertAlmostEqual(complex(real=17+23j, imag=23), 17+46j)
self.assertAlmostEqual(complex(real=1+2j, imag=3+4j), -3+5j)
# check that the sign of a zero in the real or imaginary part
# is preserved when constructing from two floats. (These checks
# are harmless on systems without support for signed zeros.)
def split_zeros(x):
"""Function that produces different results for 0. and -0."""
return atan2(x, -1.)
self.assertEqual(split_zeros(complex(1., 0.).imag), split_zeros(0.))
self.assertEqual(split_zeros(complex(1., -0.).imag), split_zeros(-0.))
self.assertEqual(split_zeros(complex(0., 1.).real), split_zeros(0.))
self.assertEqual(split_zeros(complex(-0., 1.).real), split_zeros(-0.))
c = 3.14 + 1j
self.assertTrue(complex(c) is c)
del c
self.assertRaises(TypeError, complex, "1", "1")
self.assertRaises(TypeError, complex, 1, "1")
if test_support.have_unicode:
self.assertEqual(complex(unicode(" 3.14+J ")), 3.14+1j)
# SF bug 543840: complex(string) accepts strings with \0
# Fixed in 2.3.
self.assertRaises(ValueError, complex, '1+1j\0j')
self.assertRaises(TypeError, int, 5+3j)
self.assertRaises(TypeError, long, 5+3j)
self.assertRaises(TypeError, float, 5+3j)
self.assertRaises(ValueError, complex, "")
self.assertRaises(TypeError, complex, None)
self.assertRaises(ValueError, complex, "\0")
self.assertRaises(ValueError, complex, "3\09")
self.assertRaises(TypeError, complex, "1", "2")
self.assertRaises(TypeError, complex, "1", 42)
self.assertRaises(TypeError, complex, 1, "2")
self.assertRaises(ValueError, complex, "1+")
self.assertRaises(ValueError, complex, "1+1j+1j")
self.assertRaises(ValueError, complex, "--")
self.assertRaises(ValueError, complex, "(1+2j")
self.assertRaises(ValueError, complex, "1+2j)")
self.assertRaises(ValueError, complex, "1+(2j)")
self.assertRaises(ValueError, complex, "(1+2j)123")
if test_support.have_unicode:
self.assertRaises(ValueError, complex, unicode("x"))
self.assertRaises(ValueError, complex, "1j+2")
self.assertRaises(ValueError, complex, "1e1ej")
self.assertRaises(ValueError, complex, "1e++1ej")
self.assertRaises(ValueError, complex, ")1+2j(")
# the following three are accepted by Python 2.6
self.assertRaises(ValueError, complex, "1..1j")
self.assertRaises(ValueError, complex, "1.11.1j")
self.assertRaises(ValueError, complex, "1e1.1j")
if test_support.have_unicode:
# check that complex accepts long unicode strings
self.assertEqual(type(complex(unicode("1"*500))), complex)
class EvilExc(Exception):
pass
class evilcomplex:
def __complex__(self):
raise EvilExc
self.assertRaises(EvilExc, complex, evilcomplex())
class float2:
def __init__(self, value):
self.value = value
def __float__(self):
return self.value
self.assertAlmostEqual(complex(float2(42.)), 42)
self.assertAlmostEqual(complex(real=float2(17.), imag=float2(23.)), 17+23j)
self.assertRaises(TypeError, complex, float2(None))
class complex0(complex):
"""Test usage of __complex__() when inheriting from 'complex'"""
def __complex__(self):
return 42j
class complex1(complex):
"""Test usage of __complex__() with a __new__() method"""
def __new__(self, value=0j):
return complex.__new__(self, 2*value)
def __complex__(self):
return self
class complex2(complex):
"""Make sure that __complex__() calls fail if anything other than a
complex is returned"""
def __complex__(self):
return None
self.assertAlmostEqual(complex(complex0(1j)), 42j)
self.assertAlmostEqual(complex(complex1(1j)), 2j)
self.assertRaises(TypeError, complex, complex2(1j))
def test_subclass(self):
class xcomplex(complex):
def __add__(self,other):
return xcomplex(complex(self) + other)
__radd__ = __add__
def __sub__(self,other):
return xcomplex(complex(self) + other)
__rsub__ = __sub__
def __mul__(self,other):
return xcomplex(complex(self) * other)
__rmul__ = __mul__
def __div__(self,other):
return xcomplex(complex(self) / other)
def __rdiv__(self,other):
return xcomplex(other / complex(self))
__truediv__ = __div__
__rtruediv__ = __rdiv__
def __floordiv__(self,other):
return xcomplex(complex(self) // other)
def __rfloordiv__(self,other):
return xcomplex(other // complex(self))
def __pow__(self,other):
return xcomplex(complex(self) ** other)
def __rpow__(self,other):
return xcomplex(other ** complex(self) )
def __mod__(self,other):
return xcomplex(complex(self) % other)
def __rmod__(self,other):
return xcomplex(other % complex(self))
infix_binops = ('+', '-', '*', '**', '%', '//', '/')
xcomplex_values = (xcomplex(1), xcomplex(123.0),
xcomplex(-10+2j), xcomplex(3+187j),
xcomplex(3-78j))
test_values = (1, 123.0, 10-19j, xcomplex(1+2j),
xcomplex(1+87j), xcomplex(10+90j))
for op in infix_binops:
for x in xcomplex_values:
for y in test_values:
a = 'x %s y' % op
b = 'y %s x' % op
self.assertTrue(type(eval(a)) is type(eval(b)) is xcomplex)
@requires_IEEE_754
def test_constructor_special_numbers(self):
class complex2(complex):
pass
for x in 0.0, -0.0, INF, -INF, NAN:
for y in 0.0, -0.0, INF, -INF, NAN:
z = complex(x, y)
self.assertFloatsAreIdentical(z.real, x)
self.assertFloatsAreIdentical(z.imag, y)
z = complex2(x, y)
self.assertIs(type(z), complex2)
self.assertFloatsAreIdentical(z.real, x)
self.assertFloatsAreIdentical(z.imag, y)
z = complex(complex2(x, y))
self.assertIs(type(z), complex)
self.assertFloatsAreIdentical(z.real, x)
self.assertFloatsAreIdentical(z.imag, y)
z = complex2(complex(x, y))
self.assertIs(type(z), complex2)
self.assertFloatsAreIdentical(z.real, x)
self.assertFloatsAreIdentical(z.imag, y)
def test_hash(self):
for x in xrange(-30, 30):
self.assertEqual(hash(x), hash(complex(x, 0)))
x /= 3.0 # now check against floating point
self.assertEqual(hash(x), hash(complex(x, 0.)))
def test_abs(self):
nums = [complex(x/3., y/7.) for x in xrange(-9,9) for y in xrange(-9,9)]
for num in nums:
self.assertAlmostEqual((num.real**2 + num.imag**2) ** 0.5, abs(num))
def test_repr(self):
self.assertEqual(repr(1+6j), '(1+6j)')
self.assertEqual(repr(1-6j), '(1-6j)')
self.assertNotEqual(repr(-(1+0j)), '(-1+-0j)')
self.assertEqual(1-6j,complex(repr(1-6j)))
self.assertEqual(1+6j,complex(repr(1+6j)))
self.assertEqual(-6j,complex(repr(-6j)))
self.assertEqual(6j,complex(repr(6j)))
self.assertEqual(repr(complex(1., INF)), "(1+infj)")
self.assertEqual(repr(complex(1., -INF)), "(1-infj)")
self.assertEqual(repr(complex(INF, 1)), "(inf+1j)")
self.assertEqual(repr(complex(-INF, INF)), "(-inf+infj)")
self.assertEqual(repr(complex(NAN, 1)), "(nan+1j)")
self.assertEqual(repr(complex(1, NAN)), "(1+nanj)")
self.assertEqual(repr(complex(NAN, NAN)), "(nan+nanj)")
self.assertEqual(repr(complex(0, INF)), "infj")
self.assertEqual(repr(complex(0, -INF)), "-infj")
self.assertEqual(repr(complex(0, NAN)), "nanj")
def test_neg(self):
self.assertEqual(-(1+6j), -1-6j)
def test_file(self):
a = 3.33+4.43j
b = 5.1+2.3j
fo = None
try:
fo = open(test_support.TESTFN, "wb")
print >>fo, a, b
fo.close()
fo = open(test_support.TESTFN, "rb")
self.assertEqual(fo.read(), "%s %s\n" % (a, b))
finally:
if (fo is not None) and (not fo.closed):
fo.close()
test_support.unlink(test_support.TESTFN)
def test_getnewargs(self):
self.assertEqual((1+2j).__getnewargs__(), (1.0, 2.0))
self.assertEqual((1-2j).__getnewargs__(), (1.0, -2.0))
self.assertEqual((2j).__getnewargs__(), (0.0, 2.0))
self.assertEqual((-0j).__getnewargs__(), (0.0, -0.0))
self.assertEqual(complex(0, INF).__getnewargs__(), (0.0, INF))
self.assertEqual(complex(INF, 0).__getnewargs__(), (INF, 0.0))
if float.__getformat__("double").startswith("IEEE"):
def test_plus_minus_0j(self):
# test that -0j and 0j literals are not identified
z1, z2 = 0j, -0j
self.assertEqual(atan2(z1.imag, -1.), atan2(0., -1.))
self.assertEqual(atan2(z2.imag, -1.), atan2(-0., -1.))
@unittest.skipUnless(float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
def test_overflow(self):
self.assertEqual(complex("1e500"), complex(INF, 0.0))
self.assertEqual(complex("-1e500j"), complex(0.0, -INF))
self.assertEqual(complex("-1e500+1.8e308j"), complex(-INF, INF))
@unittest.skipUnless(float.__getformat__("double").startswith("IEEE"),
"test requires IEEE 754 doubles")
def test_repr_roundtrip(self):
vals = [0.0, 1e-500, 1e-315, 1e-200, 0.0123, 3.1415, 1e50, INF, NAN]
vals += [-v for v in vals]
# complex(repr(z)) should recover z exactly, even for complex
# numbers involving an infinity, nan, or negative zero
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = complex(repr(z))
self.assertFloatsAreIdentical(z.real, roundtrip.real)
self.assertFloatsAreIdentical(z.imag, roundtrip.imag)
# if we predefine some constants, then eval(repr(z)) should
# also work, except that it might change the sign of zeros
inf, nan = float('inf'), float('nan')
infj, nanj = complex(0.0, inf), complex(0.0, nan)
for x in vals:
for y in vals:
z = complex(x, y)
roundtrip = eval(repr(z))
# adding 0.0 has no effect beside changing -0.0 to 0.0
self.assertFloatsAreIdentical(0.0 + z.real,
0.0 + roundtrip.real)
self.assertFloatsAreIdentical(0.0 + z.imag,
0.0 + roundtrip.imag)
def test_format(self):
# empty format string is same as str()
self.assertEqual(format(1+3j, ''), str(1+3j))
self.assertEqual(format(1.5+3.5j, ''), str(1.5+3.5j))
self.assertEqual(format(3j, ''), str(3j))
self.assertEqual(format(3.2j, ''), str(3.2j))
self.assertEqual(format(3+0j, ''), str(3+0j))
self.assertEqual(format(3.2+0j, ''), str(3.2+0j))
# empty presentation type should still be analogous to str,
# even when format string is nonempty (issue #5920).
self.assertEqual(format(3.2+0j, '-'), str(3.2+0j))
self.assertEqual(format(3.2+0j, '<'), str(3.2+0j))
z = 4/7. - 100j/7.
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '10'), str(z))
z = complex(0.0, 3.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '2'), str(z))
z = complex(-0.0, 2.0)
self.assertEqual(format(z, ''), str(z))
self.assertEqual(format(z, '-'), str(z))
self.assertEqual(format(z, '<'), str(z))
self.assertEqual(format(z, '3'), str(z))
self.assertEqual(format(1+3j, 'g'), '1+3j')
self.assertEqual(format(3j, 'g'), '0+3j')
self.assertEqual(format(1.5+3.5j, 'g'), '1.5+3.5j')
self.assertEqual(format(1.5+3.5j, '+g'), '+1.5+3.5j')
self.assertEqual(format(1.5-3.5j, '+g'), '+1.5-3.5j')
self.assertEqual(format(1.5-3.5j, '-g'), '1.5-3.5j')
self.assertEqual(format(1.5+3.5j, ' g'), ' 1.5+3.5j')
self.assertEqual(format(1.5-3.5j, ' g'), ' 1.5-3.5j')
self.assertEqual(format(-1.5+3.5j, ' g'), '-1.5+3.5j')
self.assertEqual(format(-1.5-3.5j, ' g'), '-1.5-3.5j')
self.assertEqual(format(-1.5-3.5e-20j, 'g'), '-1.5-3.5e-20j')
self.assertEqual(format(-1.5-3.5j, 'f'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'F'), '-1.500000-3.500000j')
self.assertEqual(format(-1.5-3.5j, 'e'), '-1.500000e+00-3.500000e+00j')
self.assertEqual(format(-1.5-3.5j, '.2e'), '-1.50e+00-3.50e+00j')
self.assertEqual(format(-1.5-3.5j, '.2E'), '-1.50E+00-3.50E+00j')
self.assertEqual(format(-1.5e10-3.5e5j, '.2G'), '-1.5E+10-3.5E+05j')
self.assertEqual(format(1.5+3j, '<20g'), '1.5+3j ')
self.assertEqual(format(1.5+3j, '*<20g'), '1.5+3j**************')
self.assertEqual(format(1.5+3j, '>20g'), ' 1.5+3j')
self.assertEqual(format(1.5+3j, '^20g'), ' 1.5+3j ')
self.assertEqual(format(1.5+3j, '<20'), '(1.5+3j) ')
self.assertEqual(format(1.5+3j, '>20'), ' (1.5+3j)')
self.assertEqual(format(1.5+3j, '^20'), ' (1.5+3j) ')
self.assertEqual(format(1.123-3.123j, '^20.2'), ' (1.1-3.1j) ')
self.assertEqual(format(1.5+3j, '20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '>20.2f'), ' 1.50+3.00j')
self.assertEqual(format(1.5+3j, '<20.2f'), '1.50+3.00j ')
self.assertEqual(format(1.5e20+3j, '<20.2f'), '150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '>40.2f'), ' 150000000000000000000.00+3.00j')
self.assertEqual(format(1.5e20+3j, '^40,.2f'), ' 150,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3j, '^40,.2f'), ' 1,500,000,000,000,000,000,000.00+3.00j ')
self.assertEqual(format(1.5e21+3000j, ',.2f'), '1,500,000,000,000,000,000,000.00+3,000.00j')
# alternate is invalid
self.assertRaises(ValueError, (1.5+0.5j).__format__, '#f')
# zero padding is invalid
self.assertRaises(ValueError, (1.5+0.5j).__format__, '010f')
# '=' alignment is invalid
self.assertRaises(ValueError, (1.5+3j).__format__, '=20')
# integer presentation types are an error
for t in 'bcdoxX':
self.assertRaises(ValueError, (1.5+0.5j).__format__, t)
# make sure everything works in ''.format()
self.assertEqual('*{0:.3f}*'.format(3.14159+2.71828j), '*3.142+2.718j*')
# issue 3382: 'f' and 'F' with inf's and nan's
self.assertEqual('{0:f}'.format(INF+0j), 'inf+0.000000j')
self.assertEqual('{0:F}'.format(INF+0j), 'INF+0.000000j')
self.assertEqual('{0:f}'.format(-INF+0j), '-inf+0.000000j')
self.assertEqual('{0:F}'.format(-INF+0j), '-INF+0.000000j')
self.assertEqual('{0:f}'.format(complex(INF, INF)), 'inf+infj')
self.assertEqual('{0:F}'.format(complex(INF, INF)), 'INF+INFj')
self.assertEqual('{0:f}'.format(complex(INF, -INF)), 'inf-infj')
self.assertEqual('{0:F}'.format(complex(INF, -INF)), 'INF-INFj')
self.assertEqual('{0:f}'.format(complex(-INF, INF)), '-inf+infj')
self.assertEqual('{0:F}'.format(complex(-INF, INF)), '-INF+INFj')
self.assertEqual('{0:f}'.format(complex(-INF, -INF)), '-inf-infj')
self.assertEqual('{0:F}'.format(complex(-INF, -INF)), '-INF-INFj')
self.assertEqual('{0:f}'.format(complex(NAN, 0)), 'nan+0.000000j')
self.assertEqual('{0:F}'.format(complex(NAN, 0)), 'NAN+0.000000j')
self.assertEqual('{0:f}'.format(complex(NAN, NAN)), 'nan+nanj')
self.assertEqual('{0:F}'.format(complex(NAN, NAN)), 'NAN+NANj')
def test_main():
with test_support.check_warnings(("complex divmod.., // and % are "
"deprecated", DeprecationWarning)):
test_support.run_unittest(ComplexTest)
if __name__ == "__main__":
test_main()
|
endolith/scipy
|
refs/heads/master
|
scipy/interpolate/setup.py
|
7
|
import os
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import (get_f2py_int64_options,
ilp64_pre_build_hook,
uses_blas64)
if uses_blas64():
# TODO: Note that fitpack does not use BLAS/LAPACK.
# The reason why we use 64-bit ints only in this case
# is because scipy._build_utils knows the 64-bit int
# flags for too few Fortran compilers, so we cannot turn
# this on by default.
pre_build_hook = ilp64_pre_build_hook
f2py_options = get_f2py_int64_options()
define_macros = [("HAVE_ILP64", None)]
else:
pre_build_hook = None
f2py_options = None
define_macros = []
config = Configuration('interpolate', parent_package, top_path)
fitpack_src = [join('fitpack', '*.f')]
config.add_library('fitpack', sources=fitpack_src,
_pre_build_hook=pre_build_hook)
config.add_extension('interpnd',
sources=['interpnd.c'])
config.add_extension('_ppoly',
sources=['_ppoly.c'])
config.add_extension('_bspl',
sources=['_bspl.c'],
depends=['src/__fitpack.h'])
config.add_extension('_fitpack',
sources=['src/_fitpackmodule.c'],
libraries=['fitpack'],
define_macros=define_macros,
depends=(['src/__fitpack.h']
+ fitpack_src)
)
config.add_extension('dfitpack',
sources=['src/fitpack.pyf'],
libraries=['fitpack'],
define_macros=define_macros,
depends=fitpack_src,
f2py_options=f2py_options
)
if int(os.environ.get('SCIPY_USE_PYTHRAN', 1)):
from pythran.dist import PythranExtension
ext = PythranExtension(
'scipy.interpolate._rbfinterp_pythran',
sources=['scipy/interpolate/_rbfinterp_pythran.py'],
config=['compiler.blas=none']
)
config.ext_modules.append(ext)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
freerangerouting/frr
|
refs/heads/master
|
tests/topotests/ospf-topo1/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.