repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
da2ce7/cjdns | node_build/dependencies/libuv/build/gyp/test/win/gyptest-cl-buffer-security-check.py | 344 | 1612 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure buffer security check setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('buffer-security-check.gyp', chdir=CHDIR)
test.build('buffer-security-check.gyp', chdir=CHDIR)
def GetDisassemblyOfMain(exe):
# The standard library uses buffer security checks independent of our
# buffer security settings, so we extract just our code (i.e. main()) to
# check against.
full_path = test.built_file_path(exe, chdir=CHDIR)
output = test.run_dumpbin('/disasm', full_path)
result = []
in_main = False
for line in output.splitlines():
if line == '_main:':
in_main = True
elif in_main:
# Disassembly of next function starts.
if line.startswith('_'):
break
result.append(line)
return '\n'.join(result)
# Buffer security checks are on by default, make sure security_cookie
# appears in the disassembly of our code.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_unset.exe'):
test.fail_test()
# Explicitly on.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_on.exe'):
test.fail_test()
# Explicitly off, shouldn't be a reference to the security cookie.
if 'security_cookie' in GetDisassemblyOfMain('test_bsc_off.exe'):
test.fail_test()
test.pass_test()
| gpl-3.0 |
gusDuarte/sugar-toolkit-gtk3 | tests/lib/test_mime.py | 4 | 3376 | #!/usr/bin/env python
# Copyright (C) 2006, Red Hat, Inc.
# Copyright (C) 2007, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import sys
import unittest
from sugar import mime
class TestMime(unittest.TestCase):
def test_from_file_name(self):
self.assertEqual(mime.get_from_file_name('test.pdf'),
'application/pdf')
def test_choose_most_significant(self):
# Mozilla's text in dnd
mime_type = mime.choose_most_significant(
['text/plain', 'text/_moz_htmlcontext', 'text/unicode',
'text/html', 'text/_moz_htmlinfo'])
self.assertEqual(mime_type, 'text/html')
# Mozilla's text in c&v
mime_type = mime.choose_most_significant(
['text/_moz_htmlcontext', 'STRING', 'text/html',
'text/_moz_htmlinfo', 'text/x-moz-url-priv', 'UTF8_STRING',
'COMPOUND_TEXT'])
self.assertEqual(mime_type, 'text/html')
# Mozilla gif in dnd
mime_type = mime.choose_most_significant(
['application/x-moz-file-promise-url',
'application/x-moz-file-promise-dest-filename',
'text/_moz_htmlinfo', 'text/x-moz-url-desc',
'text/_moz_htmlcontext', 'text/x-moz-url-data',
'text/uri-list'])
self.assertEqual(mime_type, 'text/uri-list')
# Mozilla url in dnd
mime_type = mime.choose_most_significant(
['text/_moz_htmlcontext', 'text/html', 'text/_moz_htmlinfo',
'_NETSCAPE_URL', 'text/x-moz-url', 'text/x-moz-url-desc',
'text/x-moz-url-data', 'text/plain', 'text/unicode'])
self.assertEqual(mime_type, 'text/x-moz-url')
# Abiword text in dnd
mime_type = mime.choose_most_significant(
['text/rtf', 'text/uri-list'])
self.assertEqual(mime_type, 'text/uri-list')
# Abiword text in c&v
mime_type = mime.choose_most_significant(
['UTF8_STRING', 'STRING', 'text/html', 'TEXT', 'text/rtf',
'COMPOUND_TEXT', 'application/rtf', 'text/plain',
'application/xhtml+xml'])
self.assertEqual(mime_type, 'application/rtf')
# Abiword text in c&v
mime_type = mime.choose_most_significant(
['GTK_TEXT_BUFFER_CONTENTS',
'application/x-gtk-text-buffer-rich-text',
'UTF8_STRING', 'COMPOUND_TEXT', 'TEXT', 'STRING',
'text/plain;charset=utf-8', 'text/plain;charset=UTF-8',
'text/plain'])
self.assertEqual(mime_type, 'text/plain')
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
rwl/PyCIM | CIM14/CDPSM/Balanced/IEC61968/Common/PositionPoint.py | 1 | 3116 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.CDPSM.Balanced.Element import Element
class PositionPoint(Element):
"""Set of spatial coordinates that determine a point. A sequence of PositionPoints can be used to describe:
- physical location of non-point oriented objects like cables or lines, or
- area of an object like a substation, a geographical zone or a diagram object.
"""
def __init__(self, xPosition='', sequenceNumber=0, yPosition='', Location=None, *args, **kw_args):
"""Initialises a new 'PositionPoint' instance.
@param xPosition: X axis position.
@param sequenceNumber: Zero-relative sequence number of this point within a series of points.
@param yPosition: Y axis position.
@param Location: Location that this position point describes.
"""
#: X axis position.
self.xPosition = xPosition
#: Zero-relative sequence number of this point within a series of points.
self.sequenceNumber = sequenceNumber
#: Y axis position.
self.yPosition = yPosition
self._Location = None
self.Location = Location
super(PositionPoint, self).__init__(*args, **kw_args)
_attrs = ["xPosition", "sequenceNumber", "yPosition"]
_attr_types = {"xPosition": str, "sequenceNumber": int, "yPosition": str}
_defaults = {"xPosition": '', "sequenceNumber": 0, "yPosition": ''}
_enums = {}
_refs = ["Location"]
_many_refs = []
def getLocation(self):
"""Location that this position point describes.
"""
return self._Location
def setLocation(self, value):
if self._Location is not None:
filtered = [x for x in self.Location.PositionPoints if x != self]
self._Location._PositionPoints = filtered
self._Location = value
if self._Location is not None:
if self not in self._Location._PositionPoints:
self._Location._PositionPoints.append(self)
Location = property(getLocation, setLocation)
| mit |
longmen21/edx-platform | openedx/core/djangoapps/credit/signature.py | 60 | 1975 | """
Calculate digital signatures for messages sent to/from credit providers,
using a shared secret key.
The signature is calculated as follows:
1) Encode all parameters of the request (except the signature) in a string.
2) Encode each key/value pair as a string of the form "{key}:{value}".
3) Concatenate key/value pairs in ascending alphabetical order by key.
4) Calculate the HMAC-SHA256 digest of the encoded request parameters, using a 32-character shared secret key.
5) Encode the digest in hexadecimal.
It is the responsibility of the credit provider to check the signature of messages
we send them, and it is our responsibility to check the signature of messages
we receive from the credit provider.
"""
import logging
import hashlib
import hmac
from django.conf import settings
log = logging.getLogger(__name__)
def get_shared_secret_key(provider_id):
"""
Retrieve the shared secret key for a particular credit provider.
"""
secret = getattr(settings, "CREDIT_PROVIDER_SECRET_KEYS", {}).get(provider_id)
if isinstance(secret, unicode):
try:
secret = str(secret)
except UnicodeEncodeError:
secret = None
log.error(u'Shared secret key for credit provider "%s" contains non-ASCII unicode.', provider_id)
return secret
def signature(params, shared_secret):
"""
Calculate the digital signature for parameters using a shared secret.
Arguments:
params (dict): Parameters to sign. Ignores the "signature" key if present.
shared_secret (str): The shared secret string.
Returns:
str: The 32-character signature.
"""
encoded_params = u"".join([
u"{key}:{value}".format(key=key, value=params[key])
for key in sorted(params.keys())
if key != u"signature"
])
hasher = hmac.new(shared_secret.encode('utf-8'), encoded_params.encode('utf-8'), hashlib.sha256)
return hasher.hexdigest()
| agpl-3.0 |
madformuse/client | src/notificatation_system/ns_hook.py | 3 | 1590 | from PyQt4 import QtGui
import util
"""
Setting Model class.
All Event Types (Notifications) are customizable.
Required are "popup, sound, enabled" settings.
You can add custom settings over the "settings" button.
connect on clicked event some actions, e.g.
self.button.clicked.connect(self.dialog.show)
"""
class NsHook():
def __init__(self, eventType):
self.eventType = eventType
self.loadSettings()
self.button = QtGui.QPushButton('More')
self.button.setEnabled(False)
def loadSettings(self):
util.settings.beginGroup("notification_system")
util.settings.beginGroup(self.eventType)
self.popup = util.settings.value('popup', 'true') == 'true'
self.sound = util.settings.value('sound', 'true') == 'true'
util.settings.endGroup()
util.settings.endGroup()
def saveSettings(self):
util.settings.beginGroup("notification_system")
util.settings.beginGroup(self.eventType)
util.settings.setValue('popup', self.popup)
util.settings.setValue('sound', self.sound)
util.settings.endGroup()
util.settings.endGroup()
util.settings.sync()
def getEventDisplayName(self):
return self.eventType
def popupEnabled(self):
return self.popup
def switchPopup(self):
self.popup = not self.popup
self.saveSettings()
def soundEnabled(self):
return self.sound
def switchSound(self):
self.sound = not self.sound
self.saveSettings()
def settings(self):
return self.button | gpl-3.0 |
llhe/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/pandas_io.py | 92 | 4535 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.estimator.inputs.pandas_io import pandas_input_fn as core_pandas_input_fn
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=True,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""This input_fn diffs from the core version with default `shuffle`."""
return core_pandas_input_fn(x=x,
y=y,
batch_size=batch_size,
shuffle=shuffle,
num_epochs=num_epochs,
queue_capacity=queue_capacity,
num_threads=num_threads,
target_column=target_column)
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors.
Given a DataFrame, will extract the values and cast them to float. The
DataFrame is expected to contain values of type int, float or bool.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values as floats.
Raises:
ValueError: if data contains types other than int, float or bool.
"""
if not isinstance(data, pd.DataFrame):
return data
bad_data = [column for column in data
if data[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return data.values.astype('float')
else:
error_report = [("'" + str(column) + "' type='" +
data[column].dtype.name + "'") for column in bad_data]
raise ValueError('Data types for extracting pandas data must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame.
Args:
data: `pandas.DataFrame` containing the data to be extracted.
Returns:
A numpy `ndarray` of the DataFrame's values.
"""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels.
Args:
labels: `pandas.DataFrame` or `pandas.Series` containing one column of
labels to be extracted.
Returns:
A numpy `ndarray` of labels from the DataFrame.
Raises:
ValueError: if more than one column is found or type is not int, float or
bool.
"""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
bad_data = [column for column in labels
if labels[column].dtype.name not in PANDAS_DTYPES]
if not bad_data:
return labels.values
else:
error_report = ["'" + str(column) + "' type="
+ str(labels[column].dtype.name) for column in bad_data]
raise ValueError('Data types for extracting labels must be int, '
'float, or bool. Found: ' + ', '.join(error_report))
else:
return labels
| apache-2.0 |
sysalexis/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/urllib3/response.py | 328 | 10347 | # urllib3/response.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import logging
import zlib
import io
from ._collections import HTTPHeaderDict
from .exceptions import DecodeError
from .packages.six import string_types as basestring, binary_type
from .util import is_fp_closed
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = binary_type()
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
return self._obj.decompress(data)
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
def _get_decoder(mode):
if mode == 'gzip':
return zlib.decompressobj(16 + zlib.MAX_WBITS)
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, attempts to decode specific content-encoding's based on headers
(like 'gzip' and 'deflate') will be skipped and raw data will be used
instead.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
"""
CONTENT_DECODERS = ['gzip', 'deflate']
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(self, body='', headers=None, status=0, version=0, reason=None,
strict=0, preload_content=True, decode_content=True,
original_response=None, pool=None, connection=None):
self.headers = HTTPHeaderDict()
if headers:
self.headers.update(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self._decoder = None
self._body = body if body and isinstance(body, basestring) else None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
self._pool = pool
self._connection = connection
if hasattr(body, 'read'):
self._fp = body
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get('location')
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
# Note: content-encoding value should be case-insensitive, per RFC 2616
# Section 3.5
content_encoding = self.headers.get('content-encoding', '').lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
try:
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read()
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt)
if amt != 0 and not data: # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do not
# properly close the connection in all cases. There is no harm
# in redundantly calling close.
self._fp.close()
flush_decoder = True
self._fp_bytes_read += len(data)
try:
if decode_content and self._decoder:
data = self._decoder.decompress(data)
except (IOError, zlib.error) as e:
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e)
if flush_decoder and decode_content and self._decoder:
buf = self._decoder.decompress(binary_type())
data += buf + self._decoder.flush()
if cache_content:
self._body = data
return data
finally:
if self._original_response and self._original_response.isclosed():
self.release_conn()
def stream(self, amt=2**16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = HTTPHeaderDict()
for k, v in r.getheaders():
headers.add(k, v)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, 'strict', 0)
return ResponseCls(body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw)
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
@property
def closed(self):
if self._fp is None:
return True
elif hasattr(self._fp, 'closed'):
return self._fp.closed
elif hasattr(self._fp, 'isclosed'): # Python 2
return self._fp.isclosed()
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError("The file-like object this HTTPResponse is wrapped "
"around has no file descriptor")
def flush(self):
if self._fp is not None and hasattr(self._fp, 'flush'):
return self._fp.flush()
def readable(self):
return True
| lgpl-3.0 |
bjornlevi/5thpower | afmaeli/env/lib/python3.6/site-packages/chardet/jpcntx.py | 289 | 19643 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis(object):
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
def __init__(self):
self._total_rel = None
self._rel_sample = None
self._need_to_skip_char_num = None
self._last_char_order = None
self._done = None
self.reset()
def reset(self):
self._total_rel = 0 # total sequence received
# category counters, each integer counts sequence in its category
self._rel_sample = [0] * self.NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._need_to_skip_char_num = 0
self._last_char_order = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._done = False
def feed(self, byte_str, num_bytes):
if self._done:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._need_to_skip_char_num
while i < num_bytes:
order, char_len = self.get_order(byte_str[i:i + 2])
i += char_len
if i > num_bytes:
self._need_to_skip_char_num = i - num_bytes
self._last_char_order = -1
else:
if (order != -1) and (self._last_char_order != -1):
self._total_rel += 1
if self._total_rel > self.MAX_REL_THRESHOLD:
self._done = True
break
self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1
self._last_char_order = order
def got_enough_data(self):
return self._total_rel > self.ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._total_rel > self.MINIMUM_DATA_THRESHOLD:
return (self._total_rel - self._rel_sample[0]) / self._total_rel
else:
return self.DONT_KNOW
def get_order(self, byte_str):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
super(SJISContextAnalysis, self).__init__()
self._charset_name = "SHIFT_JIS"
@property
def charset_name(self):
return self._charset_name
def get_order(self, byte_str):
if not byte_str:
return -1, 1
# find out current char's byte length
first_char = byte_str[0]
if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC):
char_len = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self._charset_name = "CP932"
else:
char_len = 1
# return its order if it is hiragana
if len(byte_str) > 1:
second_char = byte_str[1]
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, char_len
return -1, char_len
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, byte_str):
if not byte_str:
return -1, 1
# find out current char's byte length
first_char = byte_str[0]
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
char_len = 2
elif first_char == 0x8F:
char_len = 3
else:
char_len = 1
# return its order if it is hiragana
if len(byte_str) > 1:
second_char = byte_str[1]
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, char_len
return -1, char_len
| mit |
polojacky/ehfpi | ehf/filebrowser/templatetags/fb_pagination.py | 15 | 1837 | # coding: utf-8
# DJANGO IMPORTS
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.template import Library
register = Library()
DOT = '.'
@register.inclusion_tag('filebrowser/include/paginator.html', takes_context=True)
def pagination(context):
page_num = context['page'].number-1
paginator = context['p']
if not paginator.num_pages or paginator.num_pages == 1:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
return {
'page_range': page_range,
'page_num': page_num,
'filelisting': context['filelisting'],
'query': context['query'],
}
| apache-2.0 |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/matplotlib/backends/backend_gtk3cairo.py | 21 | 2321 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from . import backend_gtk3
from . import backend_cairo
from .backend_cairo import cairo, HAS_CAIRO_CFFI
from matplotlib.figure import Figure
class RendererGTK3Cairo(backend_cairo.RendererCairo):
def set_context(self, ctx):
if HAS_CAIRO_CFFI:
ctx = cairo.Context._from_pointer(
cairo.ffi.cast(
'cairo_t **',
id(ctx) + object.__basicsize__)[0],
incref=True)
self.gc.ctx = ctx
class FigureCanvasGTK3Cairo(backend_gtk3.FigureCanvasGTK3,
backend_cairo.FigureCanvasCairo):
def __init__(self, figure):
backend_gtk3.FigureCanvasGTK3.__init__(self, figure)
def _renderer_init(self):
"""use cairo renderer"""
self._renderer = RendererGTK3Cairo(self.figure.dpi)
def _render_figure(self, width, height):
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def on_draw_event(self, widget, ctx):
""" GtkDrawable draw event, like expose_event in GTK 2.X
"""
# the _need_redraw flag doesnt work. it sometimes prevents
# the rendering and leaving the canvas blank
#if self._need_redraw:
self._renderer.set_context(ctx)
allocation = self.get_allocation()
x, y, w, h = allocation.x, allocation.y, allocation.width, allocation.height
self._render_figure(w, h)
#self._need_redraw = False
return False # finish event propagation?
class FigureManagerGTK3Cairo(backend_gtk3.FigureManagerGTK3):
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTK3Cairo(figure)
manager = FigureManagerGTK3Cairo(canvas, num)
return manager
FigureCanvas = FigureCanvasGTK3Cairo
FigureManager = FigureManagerGTK3Cairo
show = backend_gtk3.show
| mit |
mixturemodel-flow/tensorflow | tensorflow/contrib/stateless/python/kernel_tests/stateless_random_ops_test.py | 54 | 3287 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stateless random ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import stateless
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
CASES = [(stateless.stateless_random_uniform, random_ops.random_uniform),
(stateless.stateless_random_normal, random_ops.random_normal),
(stateless.stateless_truncated_normal, random_ops.truncated_normal)]
def invert_philox(key, value):
"""Invert the Philox bijection."""
key = np.array(key, dtype=np.uint32)
value = np.array(value, dtype=np.uint32)
step = np.array([0x9E3779B9, 0xBB67AE85], dtype=np.uint32)
for n in range(10)[::-1]:
key0, key1 = key + n * step
v0 = value[3] * 0x991a7cdb & 0xffffffff
v2 = value[1] * 0x6d7cae67 & 0xffffffff
hi0 = v0 * 0xD2511F53 >> 32
hi1 = v2 * 0xCD9E8D57 >> 32
v1 = hi1 ^ value[0] ^ key0
v3 = hi0 ^ value[2] ^ key1
value = v0, v1, v2, v3
return np.array(value)
class StatelessOpsTest(test.TestCase):
def testMatchStateful(self):
# Stateless ops should be the same as stateful ops on the first call
# after seed scrambling.
key = 0x3ec8f720, 0x02461e29
for seed in (7, 17), (11, 5), (2, 3):
preseed = invert_philox(key, (seed[0], 0, seed[1], 0)).astype(np.uint64)
preseed = preseed[::2] | preseed[1::2] << 32
random_seed.set_random_seed(seed[0])
with self.test_session(use_gpu=True):
for stateless_op, stateful_op in CASES:
for shape in (), (3,), (2, 5):
stateful = stateful_op(shape, seed=seed[1])
pure = stateless_op(shape, seed=preseed)
self.assertAllEqual(stateful.eval(), pure.eval())
def testDeterminism(self):
# Stateless values should be equal iff the seeds are equal (roughly)
with self.test_session(use_gpu=True):
seed_t = array_ops.placeholder(dtypes.int64, shape=[2])
seeds = [(x, y) for x in range(5) for y in range(5)] * 3
for stateless_op, _ in CASES:
for shape in (), (3,), (2, 5):
pure = stateless_op(shape, seed=seed_t)
values = [(seed, pure.eval(feed_dict={seed_t: seed}))
for seed in seeds]
for s0, v0 in values:
for s1, v1 in values:
self.assertEqual(s0 == s1, np.all(v0 == v1))
if __name__ == '__main__':
test.main()
| apache-2.0 |
thaumos/ansible | lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py | 46 | 6822 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = """
module: ec2_scaling_policy
short_description: Create or delete AWS scaling policies for Autoscaling groups
description:
- Can create or delete scaling policies for autoscaling groups
- Referenced autoscaling groups must already exist
version_added: "1.6"
author: "Zacharie Eakin (@Zeekin)"
options:
state:
description:
- register or deregister the policy
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the scaling policy
required: true
asg_name:
description:
- Name of the associated autoscaling group
required: true
adjustment_type:
description:
- The type of change in capacity of the autoscaling group
required: false
choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']
scaling_adjustment:
description:
- The amount by which the autoscaling group is adjusted by the policy
required: false
min_adjustment_step:
description:
- Minimum amount of adjustment when policy is triggered
required: false
cooldown:
description:
- The minimum period of time between which autoscaling actions can take place
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- ec2_scaling_policy:
state: present
region: US-XXX
name: "scaledown-policy"
adjustment_type: "ChangeInCapacity"
asg_name: "slave-pool"
scaling_adjustment: -1
min_adjustment_step: 1
cooldown: 300
'''
try:
import boto.ec2.autoscale
import boto.exception
from boto.ec2.autoscale import ScalingPolicy
from boto.exception import BotoServerError
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError, HAS_BOTO, connect_to_aws, ec2_argument_spec,
get_aws_connection_info)
def create_scaling_policy(connection, module):
sp_name = module.params.get('name')
adjustment_type = module.params.get('adjustment_type')
asg_name = module.params.get('asg_name')
scaling_adjustment = module.params.get('scaling_adjustment')
min_adjustment_step = module.params.get('min_adjustment_step')
cooldown = module.params.get('cooldown')
scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
if not scalingPolicies:
sp = ScalingPolicy(
name=sp_name,
adjustment_type=adjustment_type,
as_name=asg_name,
scaling_adjustment=scaling_adjustment,
min_adjustment_step=min_adjustment_step,
cooldown=cooldown)
try:
connection.create_scaling_policy(sp)
policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
policy = scalingPolicies[0]
changed = False
# min_adjustment_step attribute is only relevant if the adjustment_type
# is set to percentage change in capacity, so it is a special case
if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity':
if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
changed = True
# set the min adjustment step in case the user decided to change their
# adjustment type to percentage
setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
# check the remaining attributes
for attr in ('adjustment_type', 'scaling_adjustment', 'cooldown'):
if getattr(policy, attr) != module.params.get(attr):
changed = True
setattr(policy, attr, module.params.get(attr))
try:
if changed:
connection.create_scaling_policy(policy)
policy = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])[0]
module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
def delete_scaling_policy(connection, module):
sp_name = module.params.get('name')
asg_name = module.params.get('asg_name')
scalingPolicies = connection.get_all_policies(as_group=asg_name, policy_names=[sp_name])
if scalingPolicies:
try:
connection.delete_policy(sp_name, asg_name)
module.exit_json(changed=True)
except BotoServerError as e:
module.exit_json(changed=False, msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
adjustment_type=dict(type='str', choices=['ChangeInCapacity', 'ExactCapacity', 'PercentChangeInCapacity']),
asg_name=dict(required=True, type='str'),
scaling_adjustment=dict(type='int'),
min_adjustment_step=dict(type='int'),
cooldown=dict(type='int'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
state = module.params.get('state')
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
if state == 'present':
create_scaling_policy(connection, module)
elif state == 'absent':
delete_scaling_policy(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
nimia/public_drown_scanner | pyx509/pkcs7/debug.py | 2 | 1176 |
#* pyx509 - Python library for parsing X.509
#* Copyright (C) 2009-2010 CZ.NIC, z.s.p.o. (http://www.nic.cz)
#*
#* This library is free software; you can redistribute it and/or
#* modify it under the terms of the GNU Library General Public
#* License as published by the Free Software Foundation; either
#* version 2 of the License, or (at your option) any later version.
#*
#* This library is distributed in the hope that it will be useful,
#* but WITHOUT ANY WARRANTY; without even the implied warranty of
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#* Library General Public License for more details.
#*
#* You should have received a copy of the GNU Library General Public
#* License along with this library; if not, write to the Free
#* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#*
'''
Created on Dec 9, 2009
'''
def show_bytes(string):
print '--------------'
for byte in string:
print hex(ord(byte)),
print '\n--------------'
def write_to_file(what, where):
ff = open(where,"w")
ff.write(str(what))
ff.close()
| gpl-2.0 |
ericawright/bedrock | tests/pages/firefox/whatsnew/whatsnew_beta_74.py | 1 | 1190 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from pages.base import BasePage
class FirefoxWhatsNew74betaPage(BasePage):
URL_TEMPLATE = '/{locale}/firefox/74.0beta/whatsnew/all/{params}'
_signed_out_monitor_button_locator = (By.CSS_SELECTOR, '.show-fxa-supported-signed-out .js-fxa-product-button')
_signed_in_monitor_button_locator = (By.CSS_SELECTOR, '.show-fxa-supported-signed-in .js-fxa-product-button')
def wait_for_page_to_load(self):
self.wait.until(lambda s: self.seed_url in s.current_url)
el = self.find_element(By.TAG_NAME, 'body')
self.wait.until(lambda s: 'state-fxa-default' not in el.get_attribute('class'))
return self
@property
def is_signed_out_monitor_button_displayed(self):
return self.is_element_displayed(*self._signed_out_monitor_button_locator)
@property
def is_signed_in_monitor_button_displayed(self):
return self.is_element_displayed(*self._signed_in_monitor_button_locator)
| mpl-2.0 |
v-legoff/accertin | lyntin/engine.py | 1 | 32032 | #######################################################################
# This file is part of Lyntin.
# copyright (c) Free Software Foundation 2001, 2002
#
# Lyntin is distributed under the GNU General Public License license. See the
# file LICENSE for distribution details.
# $Id: engine.py,v 1.32 2004/05/10 13:59:02 glasssnake Exp $
#######################################################################
"""
This holds the X{engine} which both contains most of the other objects
that do work in Lyntin as well as encapsulates the event queue, event
handling methods, and some of the other singleton managers such as
the HelpManager and the CommandManager.
Engine also holds hooks to the various event types. Events will call
all appropriate hooks allowing you to add functionality via the modules
interface without changing the Lyntin internals.
The engine also holds a list of registered threads. This helps in
diagnostics. Use the methods in exported to handle spinning off
threads.
The Engine class is a singleton and the reference to it is stored in
"engine.Engine.instance". However, you should use the exported module
to access the engine using the "get_engine()" function.
X{startup_hook}::
The startup hook is called after Lyntin has bootstrapped itself
enough to allow everything to initialize itself.
Arg mapping: {}
X{shutdown_hook}::
When Lyntin is shutting down, this hook is called. It's possible
Lyntin might be in a state of disarray at this point, so it's not
clear what is and what is not available.
Arg mapping: {}
X{timer_hook}::
The timer hook spams all registered functions every second. This
is how the scheduler works.
Arg mapping: { "tick": int }
tick - the current tick (starts at 0)
X{from_user_hook}::
All input typed in by the user (as well as other things that
eventually go through the handleUserData method) get passed
through this hook (unless it's specified as internal). All
registered functions get to see the raw user data this way.
Arg mapping: {"data": string}
data - the user data passed into handleUserData
X{to_user_hook}::
This hook is for data to be displayed to the user. The UI listens
on this hook as do logger functions.
NOTE: Functions registered with this hook should NEVER call
exported.write* functions. That will result in an infinite loop and
then Lyntin will either hang or die. If you want to spit out output
use an OutputEvent.
Arg mapping: { "message": string }
message - The message to be displayed. This can be either a string
of a ui.base.Message object.
X{error_occurred_hook}::
Every time an event kicks up an unhandled error, we add one to
our error count and also spam this hook with the current
number of errors.
Arg mapping: { "count": int }
count - the current number of errors
X{too_many_errors_hook}::
When we hit the maximum number of errors, this hook gets spammed and
then Lyntin shuts down.
Arg mapping: {}
"""
import Queue, thread, sys, traceback, os.path
from threading import Thread
from lyntin import config, session, utils, event, exported, helpmanager, history, commandmanager, constants
class Engine:
"""
This is the engine class. There should be only one engine.
"""
instance = None
def __init__(self):
""" Initializes the engine."""
# this is the event queue that holds all the events in
# the system.
self._event_queue = Queue.Queue()
# this is a lock for writing stuff to the ui--makes sure
# we're not hosing things by having multiple things write
# to the ui simultaneously.... ick.
self._ui_lock = thread.allocate_lock()
# this is the master shutdown flag for the event queue
# handling.
self._shutdownflag = 0
# Lyntin counts the total number of errors it's encountered.
# This enables us to shut ourselves down if we encounter too
# many which indicates a "bigger problem".
self._errorcount = 0
# listeners exist at an engine level. if you sign up for
# an input hook, you get the input hook for ALL sessions.
# this might change at some point.... we'll see.
self._listeners = {}
self._managers = {}
# the help manager manages all the help content in a hierarchical
# structure.
self._managers["help"] = helpmanager.HelpManager(self)
# our config manager
self._managers["config"] = config.ConfigManager(self)
# our history manager
self._managers["history"] = history.HistoryManager(self)
# our command manager
self._managers["command"] = commandmanager.CommandManager(self)
# there is only one ui in the system.
self._ui = None
# current tick count
self._current_tick = 0
# list of registered threads
self._threads = []
# counts the total number of events processed--for diagnostics
self._num_events_processed = 0
# holds all the sessions
self._sessions = {}
# the current session. points to a Session object.
self._current_session = None
# map of hook name -> utils.PriorityQueue objects
self._hooks = {}
# we register ourselves with the shutdown hook
self.hookRegister("shutdown_hook", self.shutdown)
commonsession = session.Session(self)
commonsession.setName("common")
# this creates a "common" entry in all the managers that manage
# session scoped data--the session base is None
# for mem in self._managers.values():
# mem.addSession(commonsession, None)
self._sessions["common"] = commonsession
self._current_session = commonsession
self.hookRegister("user_filter_hook", self._managers["command"].filter, 100)
def _setupConfiguration(self):
"""
Goes through and sets up all the engine-specific configuration
pieces.
"""
c = self._managers["config"]
# this one doesn't seem to do anything
# c.add("variablechar", config.CharConfig("variablechar", "$", 0, "denotes variables"))
cops = config.options
c.add("repeathistory", config.BoolConfig("repeathistory",
utils.convert_boolean(cops.get("repeathistory", 1)), 0,
"Whether (yes) or not (no) we record repeated user input in the " +
"history buffer. For example, if you type \"north\" and then " +
"\"north\" again, if repeathistory is on, we record both. " +
"Otherwise we would only record the first one."))
c.add("commandchar", config.CharConfig("commandchar",
config.options.get("commandchar", "#"), 0,
"The character used to denote a command."))
c.add("debugmode", config.BoolConfig("debugmode",
utils.convert_boolean(cops.get("debugmode", 0)), 0,
"Debug mode helps you to figure out how your commands are being " +
"evaluated."))
c.add("promptdetection", config.BoolConfig("promptdetection",
utils.convert_boolean(cops.get("promptdetection", 0)), 0,
"Prompt detection is done in net.py when mud data comes in. " +
"This toggles whether we detect prompts or not. This won't help " +
"you unless you have a plugin which requires it."))
c.add("ansicolor", config.BoolConfig("ansicolor",
utils.convert_boolean(cops.get("ansicolor", 1)), 1,
"Allows you to enable or disable ansi color handling."))
c.add("mudecho", config.BoolConfig("mudecho",
utils.convert_boolean(cops.get("mudecho", 1)), 0,
"Whether (1) or not (0) we're echoing user input to the ui."))
c.add("datadir", config.StringConfig("datadir",
config.options["datadir"], 0,
"Default directory to find config files etc."))
self._sessions["common"].setupCommonSession()
### ------------------------------------------
### hook stuff
### ------------------------------------------
def checkHooks(self):
"""
Goes through all the hooks and returns a list of strings of
basic information about them.
@returns: information about the hooks
@rtype: list of strings
"""
data = []
for mem in self._hooks.keys():
data.append(" %s - %d registered functions" % (mem, self._hooks[mem].count()))
return data
def getHook(self, hookname, newhook=1):
"""
Retrieves the hook in question. If the hook doesn't
exist and newhook==1, then we'll create a new hook.
Otherwise, we'll return None.
@param hookname: the name of the hook to retrieve
@type hookname: string
@returns: the hook by name
@rtype: utils.PriorityQueue
"""
if self._hooks.has_key(hookname):
return self._hooks[hookname]
if newhook==1:
self._hooks[hookname] = utils.PriorityQueue()
return self._hooks[hookname]
return None
def hookRegister(self, hookname, func, place=constants.LAST):
"""
Registers a function with a hook.
@param hookname: the name of the hook
@type hookname: string
@param func: the function to register with the hook
@type func: function
@param place: the function will get this place in the call
order. functions with the same place specified will get
arbitrary ordering. defaults to constants.LAST.
@type place: int
"""
hook = self.getHook(hookname)
if place == None:
hook.add(func)
else:
hook.add(func, place)
### ------------------------------------------
### thread stuff
### ------------------------------------------
def startthread(self, name, func):
"""
Starts a thread through the Thread Manager.
@param name: the name of the thread to start
@type name: string
@param func: the function to run in the thread
@type func: function
"""
# clean up the list of threads that we maintain first
self._threadCleanup()
# create and initialize the new thread and stick it in our list
t = Thread(None, func)
t.setDaemon(1)
t.setName(name)
t.start()
self._threads.append(t)
def checkthreads(self):
"""
Calls the Thread Manager checkthreads method which goes
through and checks the status of all the threads registered
with the Thread Manager.
@return: one string for each thread indicating its status
@rtype: list of strings
"""
data = []
alive = { 0: "not alive", 1: "alive" }
for mem in self._threads:
data.append(" %s - %s" % (mem.getName(), alive[mem.isAlive()]))
return data
def _threadCleanup(self):
"""
Removes threads which have ended.
"""
removeme = []
for i in range(len(self._threads)):
if self._threads[i].isAlive() == 0:
removeme.append(self._threads[i])
for mem in removeme:
self._threads.remove(mem)
### ------------------------------------------
### timer thread
### ------------------------------------------
def runtimer(self):
"""
This timer thread sleeps for a second, then calls everything
in the queue with the current tick.
Note: This will almost always be slightly behind and will
get worse as there are more things that get executed each
tick.
"""
import time, event
self._current_tick = 0
while not self._shutdownflag:
try:
time.sleep(1)
event.SpamEvent(hookname="timer_hook",
argmap={"tick": self._current_tick}
).enqueue()
self._current_tick += 1
except KeyboardInterrupt:
return
except SystemExit:
return
except:
exported.write_traceback("ticker: ticker hiccupped.")
### ------------------------------------------
### input/output stuff
### ------------------------------------------
def handleUserData(self, input, internal=0, session=None ):
"""
This handles input lines from the user in a session-less context.
The engine.handleUserData deals with global stuff and then
passes the modified input to the session for session-oriented
handling. The session can call this method again with
expanded input--this method is considered recursive.
internal tells whether to spam the input hook and
things of that nature.
@param input: the data from the user
@type input: string
@param internal: whether this should be executed internally or not.
0 if we should spam the input hook and record
the input to the historymanager; 1 if we shouldn't
@type internal: boolean
@param session: the session scoping to execute this user input in
@type session: session.Session instance
@return: the commands that were actually executed (may not be
exactly what the user typed--this is for the history manager)
@rtype: string
"""
if self._managers["config"].get("debugmode") == 1:
exported.write_message("evaluating: %s" % input)
inputlist = utils.split_commands(input)
if session == None:
session = self._current_session
historyitems = []
commandchar = self._managers["config"].get("commandchar")
for mem in inputlist:
# mem = mem.strip()
if len(mem) == 0:
mem = commandchar + "cr"
# if it's not internal we spam the hook with the raw input
if internal == 0:
exported.hook_spam("from_user_hook", {"data": mem})
if mem.startswith("!"):
memhistory = self.getManager("history").getHistoryItem(mem)
if memhistory != None:
self.handleUserData(memhistory, 1, session)
historyitems.append(memhistory)
continue
# if it starts with a # it's a loop, session or command
if len(mem) > 0 and mem.startswith(commandchar):
# pull off the first token without the commandchar
ses = mem.split(" ", 1)[0][1:]
# is it a loop (aka repeating command)?
if ses.isdigit():
num = int(ses)
if mem.find(" ") != -1:
command = mem.split(" ", 1)[1]
command = utils.strip_braces(command)
if num > 0:
for i in range(num):
loopcommand = self.handleUserData(command, 1, session)
historyitems.append(commandchar + ses + " {" + loopcommand + "}")
continue
# is it a session?
if self._sessions.has_key(ses):
input = mem.split(" ", 1)
if len(input) < 2:
self._current_session = self._sessions[ses]
exported.write_message("%s now current session." % ses)
else:
self.handleUserData(input[1], internal=1, session=self._sessions[ses])
historyitems.append(mem)
continue
# is it "all" sessions?
if ses == "all":
newinput = mem.split(" ", 1)
if len(newinput) > 1:
newinput = newinput[1]
else:
newinput = commandchar + "cr"
for sessionname in self._sessions.keys():
if sessionname != "common":
self._sessions[sessionname].handleUserData(newinput, internal)
historyitems.append(mem)
continue
# if we get here then it is not a valid !-expression. and it's going
# to the default session
historyitems.append(mem)
# no command char, so we pass it on to the session.handleUserData
# to do session oriented things
session.handleUserData(mem, internal)
# we don't record internal stuff or input that isn't supposed
# to be echo'd
executed = ";".join(historyitems)
if internal == 0 and self.getConfigManager().get("mudecho") == 1:
self.getManager("history").recordHistory(executed)
return executed
def handleMudData(self, session, text):
"""
Handle input coming from the mud. We toss this to the
current session to deal with.
@param session: the session this mud data applies to
@type session: session.Session instance
@param text: the text coming from the mud
@type text: string
"""
if session:
session.handleMudData(text)
else:
exported.write_message("Unhandled data:\n%s" % text)
### ------------------------------------------
### session stuff
### ------------------------------------------
def createSession(self, name):
"""
Creates a new session by copying the common session
and registers the new session with the engine.
@param name: the name of the session
@type name: string
@return: the new session
@rtype: session.Session instance
"""
ses = session.Session(self)
ses.setName(name)
self.registerSession(ses, name)
return ses
def registerSession(self, session, name):
"""
Registers a session with the engine.
@param session: the session to register
@type session: session.Session instance
@param name: the name of the session
@type name: string
@raises ValueError: if the session has a non-unique name
"""
if self._sessions.has_key(name):
raise ValueError("Session of that name already exists.")
commonsession = self.getSession("common")
for mem in self._managers.values():
mem.addSession(session, commonsession)
self._sessions[name] = session
def unregisterSession(self, ses):
"""
Unregisters a session from the engine.
@param ses: the session to unregister
@type ses: session.Session instance
"""
if not self._sessions.has_key(ses.getName()):
raise ValueError("No session of that name.")
if ses == self._current_session:
self.changeSession()
for mem in self._managers.values():
try:
mem.removeSession(ses)
except Exception, e:
exported.write_error("Exception with removing session %s." % e)
del self._sessions[ses.getName()]
def getSessions(self):
"""
Returns a list of session names.
@return: all the session names
@rtype: list of strings
"""
return self._sessions.keys()
def getSession(self, name):
"""
Returns a named session.
@param name: the name of the session to retrieve
@type name: string
@return: the session of that name or None
@rtype: session.Session or None
"""
if self._sessions.has_key(name):
return self._sessions[name]
else:
return None
def changeSession(self, name=''):
"""
Changes the current session to another named session.
If they don't pass in a name, we get the next available
non-common session if possible.
@param name: the name of the session to switch to
@type name: string
"""
if name == '':
keys = self._sessions.keys()
# it's a little bit of finagling here to make sure
# that the common session is the last one we would
# switch to
name = self._current_session.getName()
keys.remove(name)
if not name == "common":
keys.remove("common")
keys.append("common")
self._current_session = self._sessions[keys[0]]
# if they pass in a name, we switch to that session.
elif self._sessions.has_key(name):
self._current_session = self._sessions[name]
else:
exported.write_error("No session of that name.")
return
exported.write_message("Switching to session '%s'." % self._current_session.getName())
def writeSession(self, message):
"""
Writes a message to the network socket. The message should
be a string. Otherwise, it's unhealthy.
@param message: the text to write to the mud
@type message: string
"""
self._current_session.write(message)
def closeSession(self, ses=None):
"""
Closes down a session.
@param ses: the name of the session to close
@type ses: string
@return: 1 if successful; 0 if not
@rtype: boolean
"""
if ses == None:
ses = self._current_session
if ses.getName() == "common":
exported.write_error("Can't close the common session.")
return 0
ses.shutdown((1,))
self.unregisterSession(ses)
exported.hook_unregister("shutdown_hook", ses.shutdown)
return 1
### ------------------------------------------
### event-handling/engine stuff
### ------------------------------------------
def _enqueue(self, event):
"""
Adds an event to the queue.
@param event: the new event to enqueue
@type event: event.Event
"""
self._event_queue.put(event)
def runengine(self):
"""
This gets kicked off in a thread and just keep going through
events until it detects a shutdown.
"""
while not self._shutdownflag:
try:
# blocks on the event queue
e = self._event_queue.get()
e.execute()
except KeyboardInterrupt:
return
except SystemExit:
return
except:
self.tallyError()
exported.write_traceback("engine: unhandled error in engine.")
self._num_events_processed += 1
sys.exit(0)
def tallyError(self):
"""
Adds one to the error count. If we see more than 20 errors, we shutdown.
"""
self._errorcount = self._errorcount + 1
exported.write_error("WARNING: Unhandled error encountered (%d out of %d)."
% (self._errorcount, 20))
exported.hook_spam("error_occurred_hook", {"count": self._errorcount})
if self._errorcount > 20:
exported.hook_spam("too_many_errors_hook", {})
exported.write_error("Error count exceeded--shutting down.")
sys.exit(0)
def shutdown(self, args):
""" Sets the shutdown status for the engine."""
self._shutdownflag = 1
def getDiagnostics(self):
"""
Returns some basic diagnostic information in the form of a string.
This allows a user to monitor how Lyntin is doing in terms
of events and other such erata.
@return: the complete the complete diagnostic data for our little happy
mud client
@rtype: string
"""
data = []
data.append(" events processed: %d" % self._num_events_processed)
data.append(" queue size: %d" % self._event_queue.qsize())
data.append(" ui: %s" % repr(self._ui))
data.append(" ansicolor: %d" % self.getConfigManager().get("ansicolor"))
data.append(" ticks: %d" % self._current_tick)
data.append(" errors: %d" % self._errorcount)
# print info from each session
data.append("Sessions:")
data.append(" total sessions: %d" % len(self._sessions))
data.append(" current session: %s" % self._current_session.getName())
for mem in self._sessions.values():
# we do some fancy footwork here to make it print nicely
info = "\n ".join(self.getStatus(mem))
data.append(' %s\n' % info)
return "\n".join(data)
### ------------------------------------------
### user interface stuff
### ------------------------------------------
def setUI(self, newui):
"""
Sets the ui.
@param newui: the new ui to set
@type newui: ui.base.BaseUI subclass
"""
self._ui = newui
def getUI(self):
"""
Returns the ui.
@return: the ui
@rtype: ui.base.BaseUI subclass
"""
return self._ui
def writeUI(self, text):
"""
Writes a message to the ui.
This method uses a lock so that multiple threads can write
to the ui without intersecting and crashing the python process.
Theoretically you should use the exported module to write
things to the ui--it calls this method.
@param text: the message to write to the ui
@type text: string or ui.base.Message
"""
self._ui_lock.acquire(1)
try:
exported.hook_spam("to_user_hook", {"message": text})
finally:
self._ui_lock.release()
def writePrompt(self):
""" Tells the ui to print a prompt."""
if self._ui:
self._ui.prompt()
def flushUI(self):
""" Tells the ui to flush its output."""
self._ui.flush()
### ------------------------------------------------
### config functions
### ------------------------------------------------
def getConfigManager(self):
"""
Returns the config manager.
"""
return self._managers["config"]
### ------------------------------------------------
### Manager functions
### ------------------------------------------------
def addManager(self, name, manager):
"""
Adds a manager to our list.
@param name: the name of the manager to add
@type name: string
@param manager: the manager instance to add
@type manager: manager.Manager subclass
"""
self._managers[name] = manager
def removeManager(self, name):
"""
Removes a manager from our list.
@param name: the name of the manager to remove
@type name: string
@return: 0 if nothing happened, 1 if the manager was removed
@rtype: boolean
"""
if self._managers.has_key(name):
del self._managers[name]
return 1
return 0
def getManager(self, name):
"""
Retrieves a manager by name.
@param name: the name of the manager to retrieve
@type name: string
@return: the manager instance or None
@rtype: manager.Manager subclass or None
"""
if self._managers.has_key(name):
return self._managers[name]
return None
### ------------------------------------------------
### Status stuff
### ------------------------------------------------
def getStatus(self, ses):
"""
Gets the status for a specific session.
@param ses: the session to get status for
@type ses: session.Session
@return: the status of the session
@rtype: list of strings
"""
# call session.getStatus() and get status from it too
data = ses.getStatus()
# loop through our managers and get status from them
managerkeys = self._managers.keys()
managerkeys.sort()
for mem in managerkeys:
temp = self.getManager(mem).getStatus(ses)
if temp:
data.append(" %s: %s" % (mem, temp))
# return the list of elements
return data
def shutdown():
"""
This gets called by the Python interpreter atexit. The reason
we do shutdown stuff here is we're more likely to catch things
here than we are to let everything cycle through the
ShutdownEvent. This should probably get fixed up at some point
in the future.
Do not call this elsewhere.
"""
import exported
try:
exported.write_message("shutting down... goodbye.")
except:
print "shutting down... goodbye."
exported.hook_spam("shutdown_hook", {})
def main(defaultoptions={}):
"""
This parses the command line arguments and makes sure they're all valid,
instantiates a ui, does some setup, spins off an engine thread, and
goes into the ui's mainloop.
@param defaultoptions: the boot options to use. we update the
config.options dict with these options--this is the easiest
way to override the ui, moduledir, datadir, et al from a
Lyntin run script.
@type defaultoptions: dict
"""
startuperrors = []
try:
import sys, os, traceback, ConfigParser
from lyntin import config, event, utils, exported
from lyntin.ui import base
import locale
locale.setlocale(locale.LC_ALL, '')
config.options.update(defaultoptions)
# read through options and arguments
optlist = utils.parse_args(sys.argv[1:])
for mem in optlist:
if mem[0] == '--help':
print constants.HELPTEXT
sys.exit(0)
elif mem[0] == '--version':
print constants.VERSION
sys.exit(0)
elif mem[0] in ["--configuration", "-c"]:
# ini files OVERRIDE the default options
# they can provide multiple ini files, but each new
# ini file will OVERRIDE the contents of the previous ini file
# where the two files intersect.
parser = ConfigParser.ConfigParser()
parser.read([mem[1]])
newoptions = {}
for s in parser.sections():
for o in parser.options(s):
c = parser.get(s, o).split(",")
if newoptions.has_key(o):
newoptions[o] += c
else:
newoptions[o] = c
config.options.update(newoptions)
else:
opt = mem[0]
while opt.startswith("-"):
opt = opt[1:]
if len(opt) > 0:
if config.options.has_key(opt):
if type(config.options[opt]) is list:
config.options[opt].append(mem[1])
else:
config.options[opt] = mem[1]
else:
config.options[opt] = [mem[1]]
for mem in ["datadir", "ui", "commandchar"]:
if config.options.has_key(mem) and type(config.options[mem]) is list:
config.options[mem] = config.options[mem][0]
# if they haven't set the datadir via the command line, then
# we go see if they have a HOME in their environment variables....
if not config.options["datadir"]:
if os.environ.has_key("HOME"):
config.options["datadir"] = os.environ["HOME"]
config.options["datadir"] = utils.fixdir(config.options["datadir"])
import atexit
atexit.register(shutdown)
# instantiate the engine
Engine.instance = Engine()
exported.myengine = Engine.instance
Engine.instance._setupConfiguration()
# instantiate the ui
uiinstance = None
try:
uiname = str(config.options['ui'])
modulename = uiname + "ui"
uiinstance = base.get_ui(modulename)
if not uiinstance:
raise ValueError("No ui instance.")
except Exception, e:
print "Cannot start '%s': %s" % (uiname, e)
traceback.print_exc()
sys.exit(0)
Engine.instance.setUI(uiinstance)
exported.write_message("UI started.")
for mem in startuperrors:
exported.write_error(mem)
# do some more silly initialization stuff
# adds the .lyntinrc file to the readfile list if it exists.
if config.options["datadir"]:
lyntinrcfile = config.options["datadir"] + ".lyntinrc"
if os.path.exists(lyntinrcfile):
# we want the .lyntinrc file read in first, so then other
# files can overwrite the contents therein
config.options['readfile'].insert(0, lyntinrcfile)
# import modules listed in modulesinit
exported.write_message("Loading Lyntin modules.")
try:
import modules.__init__
modules.__init__.load_modules()
except:
exported.write_traceback("Modules did not load correctly.")
sys.exit(1)
# spam the startup hook
exported.hook_spam("startup_hook", {})
commandchar = Engine.instance._managers["config"].get("commandchar")
# handle command files
for mem in config.options['readfile']:
exported.write_message("Reading in file " + mem)
# we have to escape windows os separators because \ has a specific
# meaning in the argparser
mem = mem.replace("\\", "\\\\")
exported.lyntin_command("%sread %s" % (commandchar, mem), internal=1)
# we're done initialization!
exported.write_message(constants.STARTUPTEXT)
Engine.instance.writePrompt()
# start the timer thread
Engine.instance.startthread("timer", Engine.instance.runtimer)
# we ask the ui if they want the main thread of execution and
# handle accordingly
if Engine.instance._ui.wantMainThread() == 0:
Engine.instance.startthread("ui", Engine.instance._ui.runui)
Engine.instance.runengine()
else:
Engine.instance.startthread("engine", Engine.instance.runengine)
Engine.instance._ui.runui()
except SystemExit:
# we do this because the engine is blocking on events....
if Engine.instance:
event.ShutdownEvent().enqueue()
except:
import traceback
traceback.print_exc()
if Engine.instance:
try:
event.ShutdownEvent().enqueue()
except:
pass
sys.exit(1)
# Local variables:
# mode:python
# py-indent-offset:2
# tab-width:2
# End:
| gpl-3.0 |
Jyrno42/EstLan-Web | EstLan/accounts/migrations/0001_initial.py | 1 | 7021 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EstLanUser'
db.create_table(u'accounts_estlanuser', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('gender', self.gf('django.db.models.fields.CharField')(default=u'm', max_length=1)),
('date_of_birth', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=16, blank=True)),
('selected_avatar', self.gf('django.db.models.fields.CharField')(default=u'fb', max_length=2)),
))
db.send_create_signal(u'accounts', ['EstLanUser'])
# Adding M2M table for field groups on 'EstLanUser'
m2m_table_name = db.shorten_name(u'accounts_estlanuser_groups')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('estlanuser', models.ForeignKey(orm[u'accounts.estlanuser'], null=False)),
('group', models.ForeignKey(orm[u'auth.group'], null=False))
))
db.create_unique(m2m_table_name, ['estlanuser_id', 'group_id'])
# Adding M2M table for field user_permissions on 'EstLanUser'
m2m_table_name = db.shorten_name(u'accounts_estlanuser_user_permissions')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('estlanuser', models.ForeignKey(orm[u'accounts.estlanuser'], null=False)),
('permission', models.ForeignKey(orm[u'auth.permission'], null=False))
))
db.create_unique(m2m_table_name, ['estlanuser_id', 'permission_id'])
def backwards(self, orm):
# Deleting model 'EstLanUser'
db.delete_table(u'accounts_estlanuser')
# Removing M2M table for field groups on 'EstLanUser'
db.delete_table(db.shorten_name(u'accounts_estlanuser_groups'))
# Removing M2M table for field user_permissions on 'EstLanUser'
db.delete_table(db.shorten_name(u'accounts_estlanuser_user_permissions'))
models = {
u'accounts.estlanuser': {
'Meta': {'object_name': 'EstLanUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "u'm'", 'max_length': '1'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'selected_avatar': ('django.db.models.fields.CharField', [], {'default': "u'fb'", 'max_length': '2'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '16', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts'] | gpl-3.0 |
tragiclifestories/django | tests/model_options/models/tablespaces.py | 342 | 1853 | from django.db import models
# Since the test database doesn't have tablespaces, it's impossible for Django
# to create the tables for models where db_tablespace is set. To avoid this
# problem, we mark the models as unmanaged, and temporarily revert them to
# managed during each test. We also set them to use the same tables as the
# "reference" models to avoid errors when other tests run 'migrate'
# (proxy_models_inheritance does).
class ScientistRef(models.Model):
name = models.CharField(max_length=50)
class ArticleRef(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True)
authors = models.ManyToManyField(ScientistRef, related_name='articles_written_set')
reviewers = models.ManyToManyField(ScientistRef, related_name='articles_reviewed_set')
class Scientist(models.Model):
name = models.CharField(max_length=50)
class Meta:
db_table = 'model_options_scientistref'
db_tablespace = 'tbl_tbsp'
managed = False
class Article(models.Model):
title = models.CharField(max_length=50, unique=True)
code = models.CharField(max_length=50, unique=True, db_tablespace='idx_tbsp')
authors = models.ManyToManyField(Scientist, related_name='articles_written_set')
reviewers = models.ManyToManyField(Scientist, related_name='articles_reviewed_set', db_tablespace='idx_tbsp')
class Meta:
db_table = 'model_options_articleref'
db_tablespace = 'tbl_tbsp'
managed = False
# Also set the tables for automatically created models
Authors = Article._meta.get_field('authors').remote_field.through
Authors._meta.db_table = 'model_options_articleref_authors'
Reviewers = Article._meta.get_field('reviewers').remote_field.through
Reviewers._meta.db_table = 'model_options_articleref_reviewers'
| bsd-3-clause |
PLyczkowski/Sticky-Keymap | 2.74/scripts/addons_contrib/object_physics_meadow/__init__.py | 1 | 1301 | ### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
bl_info = {
"name": "Meadow",
"author": "Lukas Toenne",
"version": (0, 1, 0),
"blender": (2, 7, 2),
"location": "Scene Properties",
"description": "Efficient large-scale grass simulation",
"warning": "",
"category": "Development"}
import bpy
from object_physics_meadow import settings, ui
def register():
settings.register()
ui.register()
def unregister():
settings.unregister()
ui.unregister()
if __name__ == "__main__":
register()
| gpl-2.0 |
bretttegart/treadmill | tests/vipfile_test.py | 1 | 2068 | """Unit test for vipfile.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import io
import os
import shutil
import tempfile
import threading
import unittest
# Disable W0611: Unused import
import tests.treadmill_test_skip_windows # pylint: disable=W0611
import six
from treadmill import vipfile
class VipFileTest(unittest.TestCase):
"""Tests for teadmill.rulefile."""
def setUp(self):
self.root = tempfile.mkdtemp()
self.vips_dir = os.path.join(self.root, 'vips')
owner_dirs = os.path.join(self.root, 'owners')
os.mkdir(owner_dirs)
for owner in six.moves.range(0, 15):
with io.open(os.path.join(owner_dirs, str(owner)), 'w'):
pass
self.vips = vipfile.VipMgr(self.vips_dir, owner_dirs)
def tearDown(self):
if self.root and os.path.isdir(self.root):
shutil.rmtree(self.root)
def test_alloc(self):
"""Verifies that vips are allocated atomically with no duplicates."""
vips = set()
def alloc_thread(idx):
"""Allocate container ip."""
ip0 = self.vips.alloc(str(idx))
vips.add(ip0)
threads = []
for i in range(0, 15):
threads.append(threading.Thread(target=alloc_thread, args=(i,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertEqual(len(threads), len(vips))
def test_free(self):
"""Tests freeing the resource."""
owner = '3'
ip0 = self.vips.alloc(owner)
self.assertTrue(os.path.exists(os.path.join(self.vips_dir, ip0)))
self.vips.free(owner, ip0)
self.assertFalse(os.path.exists(os.path.join(self.vips_dir, ip0)))
# Calling free twice is noop.
self.vips.free(owner, ip0)
self.assertFalse(os.path.exists(os.path.join(self.vips_dir, ip0)))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Sorsly/subtle | google-cloud-sdk/platform/gsutil/gslib/ls_helper.py | 5 | 11067 | # -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions and class for listing commands such as ls and du."""
from __future__ import absolute_import
import fnmatch
from gslib.cloud_api import EncryptionException
from gslib.exception import CommandException
from gslib.plurality_checkable_iterator import PluralityCheckableIterator
from gslib.util import IS_WINDOWS
from gslib.util import UTF8
from gslib.wildcard_iterator import StorageUrlFromString
ENCRYPTED_FIELDS = ['md5Hash', 'crc32c']
UNENCRYPTED_FULL_LISTING_FIELDS = [
'acl', 'cacheControl', 'componentCount', 'contentDisposition',
'contentEncoding', 'contentLanguage', 'contentType',
'customerEncryption', 'etag', 'generation', 'metadata',
'metageneration', 'size', 'storageClass', 'timeCreated', 'timeDeleted',
'updated']
def PrintNewLine():
"""Default function for printing new lines between directories."""
print
def PrintDirHeader(bucket_listing_ref):
"""Default function for printing headers for prefixes.
Header is printed prior to listing the contents of the prefix.
Args:
bucket_listing_ref: BucketListingRef of type PREFIX.
"""
print '%s:' % bucket_listing_ref.url_string.encode(UTF8)
def PrintBucketHeader(bucket_listing_ref): # pylint: disable=unused-argument
"""Default function for printing headers for buckets.
Header is printed prior to listing the contents of the bucket.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET.
"""
pass
def PrintDir(bucket_listing_ref):
"""Default function for printing buckets or prefixes.
Args:
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
print bucket_listing_ref.url_string.encode(UTF8)
# pylint: disable=unused-argument
def PrintDirSummary(num_bytes, bucket_listing_ref):
"""Off-by-default function for printing buckets or prefix size summaries.
Args:
num_bytes: Number of bytes contained in the directory.
bucket_listing_ref: BucketListingRef of type BUCKET or PREFIX.
"""
pass
def PrintObject(bucket_listing_ref):
"""Default printing function for objects.
Args:
bucket_listing_ref: BucketListingRef of type OBJECT.
Returns:
(num_objects, num_bytes).
"""
try:
print bucket_listing_ref.url_string.encode(UTF8)
except IOError as e:
# Windows throws an IOError 0 here for object names containing Unicode
# chars. Ignore it.
if not (IS_WINDOWS and e.errno == 0):
raise
return (1, 0)
class LsHelper(object):
"""Helper class for ls and du."""
def __init__(self, iterator_func, logger,
print_object_func=PrintObject,
print_dir_func=PrintDir,
print_dir_header_func=PrintDirHeader,
print_bucket_header_func=PrintBucketHeader,
print_dir_summary_func=PrintDirSummary,
print_newline_func=PrintNewLine,
all_versions=False, should_recurse=False,
exclude_patterns=None, fields=('name',),
list_subdir_contents=True):
"""Initializes the helper class to prepare for listing.
Args:
iterator_func: Function for instantiating iterator.
Inputs-
url_string- Url string to iterate on. May include
wildcards.
all_versions=False- If true, iterate over all object
versions.
logger: Logger for outputting warnings / errors.
print_object_func: Function for printing objects.
print_dir_func: Function for printing buckets/prefixes.
print_dir_header_func: Function for printing header line for buckets
or prefixes.
print_bucket_header_func: Function for printing header line for buckets
or prefixes.
print_dir_summary_func: Function for printing size summaries about
buckets/prefixes.
print_newline_func: Function for printing new lines between dirs.
all_versions: If true, list all object versions.
should_recurse: If true, recursively listing buckets/prefixes.
exclude_patterns: Patterns to exclude when listing.
fields: Fields to request from bucket listings; this should
include all fields that need to be populated in
objects so they can be listed. Can be set to None
to retrieve all object fields. Defaults to short
listing fields.
list_subdir_contents: If true, return the directory and any contents,
otherwise return only the directory itself.
"""
self._iterator_func = iterator_func
self.logger = logger
self._print_object_func = print_object_func
self._print_dir_func = print_dir_func
self._print_dir_header_func = print_dir_header_func
self._print_bucket_header_func = print_bucket_header_func
self._print_dir_summary_func = print_dir_summary_func
self._print_newline_func = print_newline_func
self.all_versions = all_versions
self.should_recurse = should_recurse
self.exclude_patterns = exclude_patterns
self.bucket_listing_fields = fields
self.list_subdir_contents = list_subdir_contents
def ExpandUrlAndPrint(self, url):
"""Iterates over the given URL and calls print functions.
Args:
url: StorageUrl to iterate over.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
print_newline = False
if url.IsBucket() or self.should_recurse:
# IsBucket() implies a top-level listing.
if url.IsBucket():
self._print_bucket_header_func(url)
return self._RecurseExpandUrlAndPrint(url.url_string,
print_initial_newline=False)
else:
# User provided a prefix or object URL, but it's impossible to tell
# which until we do a listing and see what matches.
top_level_iterator = PluralityCheckableIterator(self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields))
plurality = top_level_iterator.HasPlurality()
try:
top_level_iterator.PeekException()
except EncryptionException:
# Detailed listing on a single object can perform a GetObjectMetadata
# call, which raises if a matching encryption key isn't found.
# Re-iterate without requesting encrypted fields.
top_level_iterator = PluralityCheckableIterator(self._iterator_func(
url.CreatePrefixUrl(wildcard_suffix=None),
all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=UNENCRYPTED_FULL_LISTING_FIELDS))
plurality = top_level_iterator.HasPlurality()
for blr in top_level_iterator:
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
print_newline = True
elif blr.IsPrefix():
if print_newline:
self._print_newline_func()
else:
print_newline = True
if plurality and self.list_subdir_contents:
self._print_dir_header_func(blr)
elif plurality and not self.list_subdir_contents:
print_newline = False
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(
wildcard_suffix='*' if self.list_subdir_contents else None)
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a CsBucketListingRef of type Bucket')
num_objects += no
num_dirs += nd
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _RecurseExpandUrlAndPrint(self, url_str, print_initial_newline=True):
"""Iterates over the given URL string and calls print functions.
Args:
url_str: String describing StorageUrl to iterate over.
Must be of depth one or higher.
print_initial_newline: If true, print a newline before recursively
expanded prefixes.
Returns:
(num_objects, num_bytes) total number of objects and bytes iterated.
"""
num_objects = 0
num_dirs = 0
num_bytes = 0
for blr in self._iterator_func(
'%s' % url_str, all_versions=self.all_versions).IterAll(
expand_top_level_buckets=True,
bucket_listing_fields=self.bucket_listing_fields):
if self._MatchesExcludedPattern(blr):
continue
if blr.IsObject():
nd = 0
no, nb = self._print_object_func(blr)
elif blr.IsPrefix():
if self.should_recurse:
if print_initial_newline:
self._print_newline_func()
else:
print_initial_newline = True
self._print_dir_header_func(blr)
expansion_url_str = StorageUrlFromString(
blr.url_string).CreatePrefixUrl(wildcard_suffix='*')
nd, no, nb = self._RecurseExpandUrlAndPrint(expansion_url_str)
self._print_dir_summary_func(nb, blr)
else:
nd, no, nb = 1, 0, 0
self._print_dir_func(blr)
else:
# We handle all buckets at the top level, so this should never happen.
raise CommandException(
'Sub-level iterator returned a bucketListingRef of type Bucket')
num_dirs += nd
num_objects += no
num_bytes += nb
return num_dirs, num_objects, num_bytes
def _MatchesExcludedPattern(self, blr):
"""Checks bucket listing reference against patterns to exclude.
Args:
blr: BucketListingRef to check.
Returns:
True if reference matches a pattern and should be excluded.
"""
if self.exclude_patterns:
tomatch = blr.url_string
for pattern in self.exclude_patterns:
if fnmatch.fnmatch(tomatch, pattern):
return True
return False
| mit |
ic-hep/DIRAC | ResourceStatusSystem/scripts/dirac-rss-sync.py | 1 | 8568 | #!/usr/bin/env python
"""
dirac-rss-sync
Script that synchronizes the resources described on the CS with the RSS.
By default, it sets their Status to `Unknown`, StatusType to `all` and
reason to `Synchronized`. However, it can copy over the status on the CS to
the RSS. Important: If the StatusType is not defined on the CS, it will set
it to Banned !
Usage:
dirac-rss-sync
--init Initialize the element to the status in the CS ( applicable for StorageElements )
--element= Element family to be Synchronized ( Site, Resource or Node ) or `all`
Verbosity:
-o LogLevel=LEVEL NOTICE by default, levels available: INFO, DEBUG, VERBOSE..
"""
from datetime import datetime, timedelta
from DIRAC import version, gLogger, exit as DIRACExit, S_OK
from DIRAC.Core.Base import Script
__RCSID__ = '$Id$'
subLogger = None
switchDict = {}
DEFAULT_STATUS = 'Banned'
#Add 24 hours to the datetime (it is going to be inserted in the "TokenExpiration" Column of "SiteStatus")
Datetime = datetime.utcnow() + timedelta(hours=24)
def registerSwitches():
'''
Registers all switches that can be used while calling the script from the
command line interface.
'''
switches = (
( 'init', 'Initialize the element to the status in the CS ( applicable for StorageElements )' ),
( 'element=', 'Element family to be Synchronized ( Site, Resource or Node ) or `all`' ),
)
for switch in switches:
Script.registerSwitch( '', switch[ 0 ], switch[ 1 ] )
def registerUsageMessage():
'''
Takes the script __doc__ and adds the DIRAC version to it
'''
hLine = ' ' + '='*78 + '\n'
usageMessage = hLine
usageMessage += ' DIRAC %s\n' % version
usageMessage += __doc__
usageMessage += '\n' + hLine
Script.setUsageMessage( usageMessage )
def parseSwitches():
'''
Parses the arguments passed by the user
'''
Script.parseCommandLine( ignoreErrors = True )
args = Script.getPositionalArgs()
if args:
subLogger.error( "Found the following positional args '%s', but we only accept switches" % args )
subLogger.error( "Please, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
switches = dict( Script.getUnprocessedSwitches() )
# Default values
switches.setdefault( 'element', None )
if not switches[ 'element' ] in ( 'all', 'Site', 'Resource', 'Node', None ):
subLogger.error( "Found %s as element switch" % switches[ 'element' ] )
subLogger.error( "Please, check documentation below" )
Script.showHelp()
DIRACExit( 1 )
subLogger.debug( "The switches used are:" )
map( subLogger.debug, switches.iteritems() )
return switches
#Script initialization
subLogger = gLogger.getSubLogger( __file__ )
registerSwitches()
registerUsageMessage()
switchDict = parseSwitches()
#############################################################################
# We can define the script body now
from DIRAC.WorkloadManagementSystem.Client.ServerUtils import jobDB
from DIRAC import gConfig
from DIRAC.ResourceStatusSystem.Utilities import Synchronizer, CSHelpers, RssConfiguration
from DIRAC.ResourceStatusSystem.Client import ResourceStatusClient
from DIRAC.ResourceStatusSystem.PolicySystem import StateMachine
def synchronize():
'''
Given the element switch, adds rows to the <element>Status tables with Status
`Unknown` and Reason `Synchronized`.
'''
synchronizer = Synchronizer.Synchronizer()
if switchDict[ 'element' ] in ( 'Site', 'all' ):
subLogger.info( 'Synchronizing Sites' )
res = synchronizer._syncSites()
if not res[ 'OK' ]:
return res
if switchDict[ 'element' ] in ( 'Resource', 'all' ):
subLogger.info( 'Synchronizing Resource' )
res = synchronizer._syncResources()
if not res[ 'OK' ]:
return res
if switchDict[ 'element' ] in ( 'Node', 'all' ):
subLogger.info( 'Synchronizing Nodes' )
res = synchronizer._syncNodes()
if not res[ 'OK' ]:
return res
return S_OK()
def initSites():
'''
Initializes Sites statuses taking their values from the "SiteMask" table of "JobDB" database.
'''
rssClient = ResourceStatusClient.ResourceStatusClient()
sites = jobDB.getAllSiteMaskStatus()
if not sites[ 'OK' ]:
subLogger.error( sites[ 'Message' ] )
DIRACExit( 1 )
for site, elements in sites['Value'].iteritems():
table = { 'table': 'SiteStatus' }
parameters = { 'status': elements[0],
'reason': 'Synchronized',
'name': site,
'dateEffective': elements[1],
'tokenExpiration': Datetime,
'elementType': 'Site',
'statusType': 'all',
'lastCheckTime': None,
'tokenOwner': elements[2],
'meta': table }
result = rssClient.addIfNotThereStatusElement( "Site", "Status", **parameters )
if not result[ 'OK' ]:
subLogger.error( result[ 'Message' ] )
DIRACExit( 1 )
return S_OK()
def initSEs():
'''
Initializes SEs statuses taking their values from the CS.
'''
#WarmUp local copy
CSHelpers.warmUp()
subLogger.info( 'Initializing SEs' )
rssClient = ResourceStatusClient.ResourceStatusClient()
ses = CSHelpers.getStorageElements()
if not ses[ 'OK' ]:
return ses
ses = ses[ 'Value' ]
statuses = StateMachine.RSSMachine( None ).getStates()
statusTypes = RssConfiguration.RssConfiguration().getConfigStatusType( 'StorageElement' )
reason = 'dirac-rss-sync'
subLogger.debug( statuses )
subLogger.debug( statusTypes )
for se in ses:
subLogger.debug( se )
opts = gConfig.getOptionsDict( '/Resources/StorageElements/%s' % se )
if not opts[ 'OK' ]:
subLogger.warn( opts[ 'Message' ] )
continue
opts = opts[ 'Value' ]
subLogger.debug( opts )
# We copy the list into a new object to remove items INSIDE the loop !
statusTypesList = statusTypes[:]
for statusType, status in opts.iteritems():
#Sanity check...
if not statusType in statusTypesList:
continue
#Transforms statuses to RSS terms
if status in ( 'NotAllowed', 'InActive' ):
status = 'Banned'
if not status in statuses:
subLogger.error( '%s not a valid status for %s - %s' % ( status, se, statusType ) )
continue
# We remove from the backtracking
statusTypesList.remove( statusType )
subLogger.debug( [ se,statusType,status,reason ] )
result = rssClient.addOrModifyStatusElement( 'Resource', 'Status', name = se,
statusType = statusType, status = status,
elementType = 'StorageElement',
reason = reason )
if not result[ 'OK' ]:
subLogger.error( 'Failed to modify' )
subLogger.error( result[ 'Message' ] )
continue
#Backtracking: statusTypes not present on CS
for statusType in statusTypesList:
result = rssClient.addOrModifyStatusElement( 'Resource', 'Status', name = se,
statusType = statusType, status = DEFAULT_STATUS,
elementType = 'StorageElement',
reason = reason )
if not result[ 'OK' ]:
subLogger.error( 'Error in backtracking for %s,%s,%s' % ( se, statusType, status ) )
subLogger.error( result[ 'Message' ] )
return S_OK()
#...............................................................................
def run():
'''
Main function of the script
'''
result = synchronize()
if not result[ 'OK' ]:
subLogger.error( result[ 'Message' ] )
DIRACExit( 1 )
if 'init' in switchDict:
result = initSites()
if not result[ 'OK' ]:
subLogger.error( result[ 'Message' ] )
DIRACExit( 1 )
result = initSEs()
if not result[ 'OK' ]:
subLogger.error( result[ 'Message' ] )
DIRACExit( 1 )
#...............................................................................
if __name__ == "__main__":
#Run script
run()
#Bye
DIRACExit( 0 )
################################################################################
#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 |
favll/pogom | pogom/pgoapi/protos/POGOProtos/Networking/Responses/ReleasePokemonResponse_pb2.py | 6 | 4314 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/ReleasePokemonResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/ReleasePokemonResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n<POGOProtos/Networking/Responses/ReleasePokemonResponse.proto\x12\x1fPOGOProtos.Networking.Responses\"\xf9\x01\n\x16ReleasePokemonResponse\x12N\n\x06result\x18\x01 \x01(\x0e\x32>.POGOProtos.Networking.Responses.ReleasePokemonResponse.Result\x12\x15\n\rcandy_awarded\x18\x02 \x01(\x05\"x\n\x06Result\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x14\n\x10POKEMON_DEPLOYED\x10\x02\x12\n\n\x06\x46\x41ILED\x10\x03\x12\x18\n\x14\x45RROR_POKEMON_IS_EGG\x10\x04\x12\x1a\n\x16\x45RROR_POKEMON_IS_BUDDY\x10\x05\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_RELEASEPOKEMONRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='POGOProtos.Networking.Responses.ReleasePokemonResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='POKEMON_DEPLOYED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAILED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_POKEMON_IS_EGG', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_POKEMON_IS_BUDDY', index=5, number=5,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=227,
serialized_end=347,
)
_sym_db.RegisterEnumDescriptor(_RELEASEPOKEMONRESPONSE_RESULT)
_RELEASEPOKEMONRESPONSE = _descriptor.Descriptor(
name='ReleasePokemonResponse',
full_name='POGOProtos.Networking.Responses.ReleasePokemonResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='POGOProtos.Networking.Responses.ReleasePokemonResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='candy_awarded', full_name='POGOProtos.Networking.Responses.ReleasePokemonResponse.candy_awarded', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RELEASEPOKEMONRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=347,
)
_RELEASEPOKEMONRESPONSE.fields_by_name['result'].enum_type = _RELEASEPOKEMONRESPONSE_RESULT
_RELEASEPOKEMONRESPONSE_RESULT.containing_type = _RELEASEPOKEMONRESPONSE
DESCRIPTOR.message_types_by_name['ReleasePokemonResponse'] = _RELEASEPOKEMONRESPONSE
ReleasePokemonResponse = _reflection.GeneratedProtocolMessageType('ReleasePokemonResponse', (_message.Message,), dict(
DESCRIPTOR = _RELEASEPOKEMONRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.ReleasePokemonResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.ReleasePokemonResponse)
))
_sym_db.RegisterMessage(ReleasePokemonResponse)
# @@protoc_insertion_point(module_scope)
| mit |
sjlehtin/django | django/contrib/auth/migrations/0001_initial.py | 74 | 4960 | import django.contrib.auth.models
from django.contrib.auth import validators
from django.db import migrations, models
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__first__'),
]
operations = [
migrations.CreateModel(
name='Permission',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50, verbose_name='name')),
('content_type', models.ForeignKey(
to='contenttypes.ContentType',
on_delete=models.CASCADE,
to_field='id',
verbose_name='content type',
)),
('codename', models.CharField(max_length=100, verbose_name='codename')),
],
options={
'ordering': ('content_type__app_label', 'content_type__model', 'codename'),
'unique_together': {('content_type', 'codename')},
'verbose_name': 'permission',
'verbose_name_plural': 'permissions',
},
managers=[
('objects', django.contrib.auth.models.PermissionManager()),
],
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=80, verbose_name='name')),
('permissions', models.ManyToManyField(to='auth.Permission', verbose_name='permissions', blank=True)),
],
options={
'verbose_name': 'group',
'verbose_name_plural': 'groups',
},
managers=[
('objects', django.contrib.auth.models.GroupManager()),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(
default=False,
help_text='Designates that this user has all permissions without explicitly assigning them.',
verbose_name='superuser status'
)),
('username', models.CharField(
help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True,
max_length=30, verbose_name='username',
validators=[validators.UnicodeUsernameValidator()],
)),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(
default=False, help_text='Designates whether the user can log into this admin site.',
verbose_name='staff status'
)),
('is_active', models.BooleanField(
default=True, verbose_name='active', help_text=(
'Designates whether this user should be treated as active. Unselect this instead of deleting '
'accounts.'
)
)),
('date_joined', models.DateTimeField(default=timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(
to='auth.Group', verbose_name='groups', blank=True, related_name='user_set',
related_query_name='user', help_text=(
'The groups this user belongs to. A user will get all permissions granted to each of their '
'groups.'
)
)),
('user_permissions', models.ManyToManyField(
to='auth.Permission', verbose_name='user permissions', blank=True,
help_text='Specific permissions for this user.', related_name='user_set',
related_query_name='user')
),
],
options={
'swappable': 'AUTH_USER_MODEL',
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| bsd-3-clause |
michaelaye/vispy | vispy/scene/widgets/widget.py | 7 | 13305 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from ..visuals import Compound
from ...visuals.mesh import MeshVisual
from ...visuals.transforms import STTransform
from ...visuals.filters import Clipper
from ...util.event import Event
from ...geometry import Rect
from ...color import Color
class Widget(Compound):
""" A widget takes up a rectangular space, intended for use in
a 2D pixel coordinate frame.
The widget is positioned using the transform attribute (as any
node), and its extent (size) is kept as a separate property.
Parameters
----------
pos : (x, y)
A 2-element tuple to specify the top left corner of the widget.
size : (w, h)
A 2-element tuple to spicify the size of the widget.
border_color : color
The color of the border.
border_width : float
The width of the border line in pixels.
bgcolor : color
The background color.
padding : int
The amount of padding in the widget (i.e. the space reserved between
the contents and the border).
margin : int
The margin to keep outside the widget's border.
"""
def __init__(self, pos=(0, 0), size=(10, 10), border_color=None,
border_width=1, bgcolor=None, padding=0, margin=0, **kwargs):
# For drawing border.
# A mesh is required because GL lines cannot be drawn with predictable
# shape across all platforms.
self._mesh = MeshVisual(color=border_color, mode='triangles')
self._mesh.set_gl_state('translucent', depth_test=False,
cull_face=False)
self._picking_mesh = MeshVisual(mode='triangle_fan')
self._picking_mesh.set_gl_state(cull_face=False)
self._picking_mesh.visible = False
# reserved space inside border
self._padding = padding
self._border_width = border_width
# reserved space outside border
self._margin = margin
self._size = 100, 100
# layout interaction
self._width_limits = [0, None]
self._height_limits = [0, None]
self._stretch = [None, None]
# used by the constraint solver
# in Grid - these are Cassowary variables
self._var_w = self._var_h = None
self._var_x = self._var_y = None
self._widgets = []
self._border_color = Color(border_color)
self._bgcolor = Color(bgcolor)
self._face_colors = None
Compound.__init__(self, [self._mesh, self._picking_mesh], **kwargs)
self.transform = STTransform()
self.events.add(resize=Event)
self.pos = pos
self._update_colors()
self.size = size
@property
def pos(self):
return tuple(self.transform.translate[:2])
@pos.setter
def pos(self, p):
assert isinstance(p, tuple)
assert len(p) == 2
if p == self.pos:
return
self.transform.translate = p[0], p[1], 0, 0
self._update_line()
@property
def size(self):
"""The size (w, h) of this widget.
If the widget is a child of another widget, then its size is assigned
automatically by its parent.
"""
return self._size
@size.setter
def size(self, s):
assert isinstance(s, tuple)
assert len(s) == 2
if self._size == s:
return
self._size = s
self._update_line()
self._update_child_widgets()
self._update_clipper()
self.events.resize()
@property
def width(self):
"""The actual width of this widget"""
return self._size[0]
@property
def width_min(self):
"""The minimum width the widget can have"""
return self._width_limits[0]
@width_min.setter
def width_min(self, width_min):
"""Set the minimum height of the widget
Parameters
----------
height_min: float
the minimum height of the widget
"""
if width_min is None:
self._width_limits[0] = 0
return
width_min = float(width_min)
assert(0 <= width_min)
self._width_limits[0] = width_min
self._update_layout()
@property
def width_max(self):
"""The maximum width the widget can have"""
return self._width_limits[1]
@width_max.setter
def width_max(self, width_max):
"""Set the maximum width of the widget.
Parameters
----------
width_max: None | float
the maximum width of the widget. if None, maximum width
is unbounded
"""
if width_max is None:
self._width_limits[1] = None
return
width_max = float(width_max)
assert(self.width_min <= width_max)
self._width_limits[1] = width_max
self._update_layout()
@property
def height(self):
"""The actual height of the widget"""
return self._size[1]
@property
def height_min(self):
"""The minimum height of the widget"""
return self._height_limits[0]
@height_min.setter
def height_min(self, height_min):
"""Set the minimum height of the widget
Parameters
----------
height_min: float
the minimum height of the widget
"""
if height_min is None:
self._height_limits[0] = 0
return
height_min = float(height_min)
assert(height_min >= 0)
self._height_limits[0] = height_min
self._update_layout()
@property
def height_max(self):
"""The maximum height of the widget"""
return self._height_limits[1]
@height_max.setter
def height_max(self, height_max):
"""Set the maximum height of the widget.
Parameters
----------
height_max: None | float
the maximum height of the widget. if None, maximum height
is unbounded
"""
if height_max is None:
self._height_limits[1] = None
return
height_max = float(height_max)
assert(0 <= self.height_min <= height_max)
self._height_limits[1] = height_max
self._update_layout()
@property
def rect(self):
return Rect((0, 0), self.size)
@rect.setter
def rect(self, r):
with self.events.resize.blocker():
self.pos = r.pos
self.size = r.size
self.update()
self.events.resize()
@property
def inner_rect(self):
"""The rectangular area inside the margin, border, and padding.
Generally widgets should avoid drawing or placing sub-widgets outside
this rectangle.
"""
m = self.margin + self._border_width + self.padding
if not self.border_color.is_blank:
m += 1
return Rect((m, m), (self.size[0]-2*m, self.size[1]-2*m))
@property
def stretch(self):
"""Stretch factors (w, h) used when determining how much space to
allocate to this widget in a layout.
If either stretch factor is None, then it will be assigned when the
widget is added to a layout based on the number of columns or rows it
occupies.
"""
return self._stretch
@stretch.setter
def stretch(self, s):
self._stretch = [float(s[0]), float(s[1])]
if self._stretch[0] == 0:
raise RuntimeError("received 0 as stretch parameter: %s", s)
if self._stretch[1] == 0:
raise RuntimeError("received 0 as stretch parameter: %s", s)
self._update_layout()
def _update_layout(self):
if isinstance(self.parent, Widget):
self.parent._update_child_widgets()
def _update_clipper(self):
"""Called whenever the clipper for this widget may need to be updated.
"""
if self.clip_children and self._clipper is None:
self._clipper = Clipper()
elif not self.clip_children:
self._clipper = None
if self._clipper is None:
return
self._clipper.rect = self.inner_rect
self._clipper.transform = self.get_transform('framebuffer', 'visual')
@property
def border_color(self):
""" The color of the border.
"""
return self._border_color
@border_color.setter
def border_color(self, b):
self._border_color = Color(b)
self._update_colors()
self._update_line()
self.update()
@property
def bgcolor(self):
""" The background color of the Widget.
"""
return self._bgcolor
@bgcolor.setter
def bgcolor(self, value):
self._bgcolor = Color(value)
self._update_colors()
self._update_line()
self.update()
@property
def margin(self):
return self._margin
@margin.setter
def margin(self, m):
self._margin = m
self._update_child_widgets()
self._update_line()
self.update()
self.events.resize()
@property
def padding(self):
return self._padding
@padding.setter
def padding(self, p):
self._padding = p
self._update_child_widgets()
self.update()
def _update_line(self):
""" Update border line to match new shape """
w = self._border_width
m = self.margin
# border is drawn within the boundaries of the widget:
#
# size = (8, 7) margin=2
# internal rect = (3, 3, 2, 1)
# ........
# ........
# ..BBBB..
# ..B B..
# ..BBBB..
# ........
# ........
#
l = b = m
r = self.size[0] - m
t = self.size[1] - m
pos = np.array([
[l, b], [l+w, b+w],
[r, b], [r-w, b+w],
[r, t], [r-w, t-w],
[l, t], [l+w, t-w],
], dtype=np.float32)
faces = np.array([
[0, 2, 1],
[1, 2, 3],
[2, 4, 3],
[3, 5, 4],
[4, 5, 6],
[5, 7, 6],
[6, 0, 7],
[7, 0, 1],
[5, 3, 1],
[1, 5, 7],
], dtype=np.int32)
start = 8 if self._border_color.is_blank else 0
stop = 8 if self._bgcolor.is_blank else 10
face_colors = None
if self._face_colors is not None:
face_colors = self._face_colors[start:stop]
self._mesh.set_data(vertices=pos, faces=faces[start:stop],
face_colors=face_colors)
# picking mesh covers the entire area
self._picking_mesh.set_data(vertices=pos[::2])
def _update_colors(self):
self._face_colors = np.concatenate(
(np.tile(self.border_color.rgba, (8, 1)),
np.tile(self.bgcolor.rgba, (2, 1)))).astype(np.float32)
self._update_visibility()
@property
def picking(self):
return self._picking
@picking.setter
def picking(self, p):
Compound.picking.fset(self, p)
self._update_visibility()
def _update_visibility(self):
blank = self.border_color.is_blank and self.bgcolor.is_blank
picking = self.picking
self._picking_mesh.visible = picking and self.interactive
self._mesh.visible = not picking and not blank
def _update_child_widgets(self):
# Set the position and size of child boxes (only those added
# using add_widget)
for ch in self._widgets:
ch.rect = self.rect.padded(self.padding + self.margin)
def add_widget(self, widget):
"""
Add a Widget as a managed child of this Widget.
The child will be
automatically positioned and sized to fill the entire space inside
this Widget (unless _update_child_widgets is redefined).
Parameters
----------
widget : instance of Widget
The widget to add.
Returns
-------
widget : instance of Widget
The widget.
"""
self._widgets.append(widget)
widget.parent = self
self._update_child_widgets()
return widget
def add_grid(self, *args, **kwargs):
"""
Create a new Grid and add it as a child widget.
All arguments are given to Grid().
"""
from .grid import Grid
grid = Grid(*args, **kwargs)
return self.add_widget(grid)
def add_view(self, *args, **kwargs):
"""
Create a new ViewBox and add it as a child widget.
All arguments are given to ViewBox().
"""
from .viewbox import ViewBox
view = ViewBox(*args, **kwargs)
return self.add_widget(view)
def remove_widget(self, widget):
"""
Remove a Widget as a managed child of this Widget.
Parameters
----------
widget : instance of Widget
The widget to remove.
"""
self._widgets.remove(widget)
widget.parent = None
self._update_child_widgets()
| bsd-3-clause |
cchurch/ansible | lib/ansible/modules/cloud/vultr/vultr_server.py | 4 | 31945 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vultr_server
short_description: Manages virtual servers on Vultr.
description:
- Deploy, start, stop, update, restart, reinstall servers.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the server.
required: true
aliases: [ label ]
type: str
hostname:
description:
- The hostname to assign to this server.
type: str
os:
description:
- The operating system name or ID.
- Required if the server does not yet exist and is not restoring from a snapshot.
type: str
snapshot:
version_added: "2.8"
description:
- Name or ID of the snapshot to restore the server from.
type: str
firewall_group:
description:
- The firewall group description or ID to assign this server to.
type: str
plan:
description:
- Plan name or ID to use for the server.
- Required if the server does not yet exist.
type: str
force:
description:
- Force stop/start the server if required to apply changes
- Otherwise a running server will not be changed.
type: bool
default: no
notify_activate:
description:
- Whether to send an activation email when the server is ready or not.
- Only considered on creation.
type: bool
private_network_enabled:
description:
- Whether to enable private networking or not.
type: bool
auto_backup_enabled:
description:
- Whether to enable automatic backups or not.
type: bool
ipv6_enabled:
description:
- Whether to enable IPv6 or not.
type: bool
tag:
description:
- Tag for the server.
type: str
user_data:
description:
- User data to be passed to the server.
type: str
startup_script:
description:
- Name or ID of the startup script to execute on boot.
- Only considered while creating the server.
type: str
ssh_keys:
description:
- List of SSH key names or IDs passed to the server on creation.
aliases: [ ssh_key ]
type: list
reserved_ip_v4:
description:
- IP address of the floating IP to use as the main IP of this server.
- Only considered on creation.
type: str
region:
description:
- Region name or ID the server is deployed into.
- Required if the server does not yet exist.
type: str
state:
description:
- State of the server.
default: present
choices: [ present, absent, restarted, reinstalled, started, stopped ]
type: str
extends_documentation_fragment: vultr
'''
EXAMPLES = '''
- name: create server
delegate_to: localhost
vultr_server:
name: "{{ vultr_server_name }}"
os: CentOS 7 x64
plan: 1024 MB RAM,25 GB SSD,1.00 TB BW
ssh_keys:
- my_key
- your_key
region: Amsterdam
state: present
- name: ensure a server is present and started
delegate_to: localhost
vultr_server:
name: "{{ vultr_server_name }}"
os: CentOS 7 x64
plan: 1024 MB RAM,25 GB SSD,1.00 TB BW
firewall_group: my_group
ssh_key: my_key
region: Amsterdam
state: started
- name: ensure a server is present and stopped provisioned using IDs
delegate_to: localhost
vultr_server:
name: "{{ vultr_server_name }}"
os: "167"
plan: "201"
region: "7"
state: stopped
- name: ensure an existing server is stopped
delegate_to: localhost
vultr_server:
name: "{{ vultr_server_name }}"
state: stopped
- name: ensure an existing server is started
delegate_to: localhost
vultr_server:
name: "{{ vultr_server_name }}"
state: started
- name: ensure a server is absent
delegate_to: localhost
vultr_server:
name: "{{ vultr_server_name }}"
state: absent
'''
RETURN = '''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_server:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
id:
description: ID of the server
returned: success
type: str
sample: 10194376
name:
description: Name (label) of the server
returned: success
type: str
sample: "ansible-test-vm"
plan:
description: Plan used for the server
returned: success
type: str
sample: "1024 MB RAM,25 GB SSD,1.00 TB BW"
allowed_bandwidth_gb:
description: Allowed bandwidth to use in GB
returned: success
type: int
sample: 1000
auto_backup_enabled:
description: Whether automatic backups are enabled
returned: success
type: bool
sample: false
cost_per_month:
description: Cost per month for the server
returned: success
type: float
sample: 5.00
current_bandwidth_gb:
description: Current bandwidth used for the server
returned: success
type: int
sample: 0
date_created:
description: Date when the server was created
returned: success
type: str
sample: "2017-08-26 12:47:48"
default_password:
description: Password to login as root into the server
returned: success
type: str
sample: "!p3EWYJm$qDWYaFr"
disk:
description: Information about the disk
returned: success
type: str
sample: "Virtual 25 GB"
v4_gateway:
description: IPv4 gateway
returned: success
type: str
sample: "45.32.232.1"
internal_ip:
description: Internal IP
returned: success
type: str
sample: ""
kvm_url:
description: URL to the VNC
returned: success
type: str
sample: "https://my.vultr.com/subs/vps/novnc/api.php?data=xyz"
region:
description: Region the server was deployed into
returned: success
type: str
sample: "Amsterdam"
v4_main_ip:
description: Main IPv4
returned: success
type: str
sample: "45.32.233.154"
v4_netmask:
description: Netmask IPv4
returned: success
type: str
sample: "255.255.254.0"
os:
description: Operating system used for the server
returned: success
type: str
sample: "CentOS 6 x64"
firewall_group:
description: Firewall group the server is assinged to
returned: success and available
type: str
sample: "CentOS 6 x64"
pending_charges:
description: Pending charges
returned: success
type: float
sample: 0.01
power_status:
description: Power status of the server
returned: success
type: str
sample: "running"
ram:
description: Information about the RAM size
returned: success
type: str
sample: "1024 MB"
server_state:
description: State about the server
returned: success
type: str
sample: "ok"
status:
description: Status about the deployment of the server
returned: success
type: str
sample: "active"
tag:
description: TBD
returned: success
type: str
sample: ""
v6_main_ip:
description: Main IPv6
returned: success
type: str
sample: ""
v6_network:
description: Network IPv6
returned: success
type: str
sample: ""
v6_network_size:
description: Network size IPv6
returned: success
type: str
sample: ""
v6_networks:
description: Networks IPv6
returned: success
type: list
sample: []
vcpu_count:
description: Virtual CPU count
returned: success
type: int
sample: 1
'''
import time
import base64
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrServer(Vultr):
def __init__(self, module):
super(AnsibleVultrServer, self).__init__(module, "vultr_server")
self.server = None
self.returns = {
'SUBID': dict(key='id'),
'label': dict(key='name'),
'date_created': dict(),
'allowed_bandwidth_gb': dict(convert_to='int'),
'auto_backups': dict(key='auto_backup_enabled', convert_to='bool'),
'current_bandwidth_gb': dict(),
'kvm_url': dict(),
'default_password': dict(),
'internal_ip': dict(),
'disk': dict(),
'cost_per_month': dict(convert_to='float'),
'location': dict(key='region'),
'main_ip': dict(key='v4_main_ip'),
'network_v4': dict(key='v4_network'),
'gateway_v4': dict(key='v4_gateway'),
'os': dict(),
'pending_charges': dict(convert_to='float'),
'power_status': dict(),
'ram': dict(),
'plan': dict(),
'server_state': dict(),
'status': dict(),
'firewall_group': dict(),
'tag': dict(),
'v6_main_ip': dict(),
'v6_network': dict(),
'v6_network_size': dict(),
'v6_networks': dict(),
'vcpu_count': dict(convert_to='int'),
}
self.server_power_state = None
def get_startup_script(self):
return self.query_resource_by_key(
key='name',
value=self.module.params.get('startup_script'),
resource='startupscript',
)
def get_os(self):
if self.module.params.get('snapshot'):
os_name = 'Snapshot'
else:
os_name = self.module.params.get('os')
return self.query_resource_by_key(
key='name',
value=os_name,
resource='os',
use_cache=True,
id_key='OSID',
)
def get_snapshot(self):
return self.query_resource_by_key(
key='description',
value=self.module.params.get('snapshot'),
resource='snapshot',
id_key='SNAPSHOTID',
)
def get_ssh_keys(self):
ssh_key_names = self.module.params.get('ssh_keys')
if not ssh_key_names:
return []
ssh_keys = []
for ssh_key_name in ssh_key_names:
ssh_key = self.query_resource_by_key(
key='name',
value=ssh_key_name,
resource='sshkey',
use_cache=True,
id_key='SSHKEYID',
)
if ssh_key:
ssh_keys.append(ssh_key)
return ssh_keys
def get_region(self):
return self.query_resource_by_key(
key='name',
value=self.module.params.get('region'),
resource='regions',
use_cache=True,
id_key='DCID',
)
def get_plan(self):
return self.query_resource_by_key(
key='name',
value=self.module.params.get('plan'),
resource='plans',
use_cache=True,
id_key='VPSPLANID',
)
def get_firewall_group(self):
return self.query_resource_by_key(
key='description',
value=self.module.params.get('firewall_group'),
resource='firewall',
query_by='group_list',
id_key='FIREWALLGROUPID'
)
def get_user_data(self):
user_data = self.module.params.get('user_data')
if user_data is not None:
user_data = to_text(base64.b64encode(to_bytes(user_data)))
return user_data
def get_server_user_data(self, server):
if not server or not server.get('SUBID'):
return None
user_data = self.api_query(path="/v1/server/get_user_data?SUBID=%s" % server.get('SUBID'))
return user_data.get('userdata')
def get_server(self, refresh=False):
if self.server is None or refresh:
self.server = None
server_list = self.api_query(path="/v1/server/list")
if server_list:
for server_id, server_data in server_list.items():
if server_data.get('label') == self.module.params.get('name'):
self.server = server_data
plan = self.query_resource_by_key(
key='VPSPLANID',
value=server_data['VPSPLANID'],
resource='plans',
use_cache=True
)
self.server['plan'] = plan.get('name')
os = self.query_resource_by_key(
key='OSID',
value=int(server_data['OSID']),
resource='os',
use_cache=True
)
self.server['os'] = os.get('name')
fwg_id = server_data.get('FIREWALLGROUPID')
fw = self.query_resource_by_key(
key='FIREWALLGROUPID',
value=server_data.get('FIREWALLGROUPID') if fwg_id and fwg_id != "0" else None,
resource='firewall',
query_by='group_list',
use_cache=True
)
self.server['firewall_group'] = fw.get('description')
return self.server
def present_server(self, start_server=True):
server = self.get_server()
if not server:
server = self._create_server(server=server)
else:
server = self._update_server(server=server, start_server=start_server)
return server
def _create_server(self, server=None):
required_params = [
'os',
'plan',
'region',
]
snapshot_restore = self.module.params.get('snapshot') is not None
if snapshot_restore:
required_params.remove('os')
self.module.fail_on_missing_params(required_params=required_params)
self.result['changed'] = True
if not self.module.check_mode:
data = {
'DCID': self.get_region().get('DCID'),
'VPSPLANID': self.get_plan().get('VPSPLANID'),
'FIREWALLGROUPID': self.get_firewall_group().get('FIREWALLGROUPID'),
'OSID': self.get_os().get('OSID'),
'SNAPSHOTID': self.get_snapshot().get('SNAPSHOTID'),
'label': self.module.params.get('name'),
'hostname': self.module.params.get('hostname'),
'SSHKEYID': ','.join([ssh_key['SSHKEYID'] for ssh_key in self.get_ssh_keys()]),
'enable_ipv6': self.get_yes_or_no('ipv6_enabled'),
'enable_private_network': self.get_yes_or_no('private_network_enabled'),
'auto_backups': self.get_yes_or_no('auto_backup_enabled'),
'notify_activate': self.get_yes_or_no('notify_activate'),
'tag': self.module.params.get('tag'),
'reserved_ip_v4': self.module.params.get('reserved_ip_v4'),
'user_data': self.get_user_data(),
'SCRIPTID': self.get_startup_script().get('SCRIPTID'),
}
self.api_query(
path="/v1/server/create",
method="POST",
data=data
)
server = self._wait_for_state(key='status', state='active')
server = self._wait_for_state(state='running', timeout=3600 if snapshot_restore else 60)
return server
def _update_auto_backups_setting(self, server, start_server):
auto_backup_enabled_changed = self.switch_enable_disable(server, 'auto_backup_enabled', 'auto_backups')
if auto_backup_enabled_changed:
if auto_backup_enabled_changed == "enable" and server['auto_backups'] == 'disable':
self.module.warn("Backups are disabled. Once disabled, backups can only be enabled again by customer support")
else:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['auto_backup_enabled'] = server.get('auto_backups')
self.result['diff']['after']['auto_backup_enabled'] = self.get_yes_or_no('auto_backup_enabled')
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/backup_%s" % auto_backup_enabled_changed,
method="POST",
data=data
)
return server
def _update_ipv6_setting(self, server, start_server):
ipv6_enabled_changed = self.switch_enable_disable(server, 'ipv6_enabled', 'v6_main_ip')
if ipv6_enabled_changed:
if ipv6_enabled_changed == "disable":
self.module.warn("The Vultr API does not allow to disable IPv6")
else:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['ipv6_enabled'] = False
self.result['diff']['after']['ipv6_enabled'] = True
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/ipv6_%s" % ipv6_enabled_changed,
method="POST",
data=data
)
server = self._wait_for_state(key='v6_main_ip')
return server
def _update_private_network_setting(self, server, start_server):
private_network_enabled_changed = self.switch_enable_disable(server, 'private_network_enabled', 'internal_ip')
if private_network_enabled_changed:
if private_network_enabled_changed == "disable":
self.module.warn("The Vultr API does not allow to disable private network")
else:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['private_network_enabled'] = False
self.result['diff']['after']['private_network_enabled'] = True
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/private_network_%s" % private_network_enabled_changed,
method="POST",
data=data
)
return server
def _update_plan_setting(self, server, start_server):
plan = self.get_plan()
plan_changed = True if plan and plan['VPSPLANID'] != server.get('VPSPLANID') else False
if plan_changed:
server, warned = self._handle_power_status_for_update(server, start_server)
if not warned:
self.result['changed'] = True
self.result['diff']['before']['plan'] = server.get('plan')
self.result['diff']['after']['plan'] = plan['name']
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'VPSPLANID': plan['VPSPLANID'],
}
self.api_query(
path="/v1/server/upgrade_plan",
method="POST",
data=data
)
return server
def _handle_power_status_for_update(self, server, start_server):
# Remember the power state before we handle any action
if self.server_power_state is None:
self.server_power_state = server['power_status']
# A stopped server can be updated
if self.server_power_state == "stopped":
return server, False
# A running server must be forced to update unless the wanted state is stopped
elif self.module.params.get('force') or not start_server:
warned = False
if not self.module.check_mode:
# Some update APIs would restart the VM, we handle the restart manually
# by stopping the server and start it at the end of the changes
server = self.stop_server(skip_results=True)
# Warn the user that a running server won't get changed
else:
warned = True
self.module.warn("Some changes won't be applied to running instances. " +
"Use force=true to allow the instance %s to be stopped/started." % server['label'])
return server, warned
def _update_server(self, server=None, start_server=True):
# Wait for server to unlock if restoring
if server.get('os').strip() == 'Snapshot':
server = self._wait_for_state(key='server_status', state='ok', timeout=3600)
# Update auto backups settings, stops server
server = self._update_auto_backups_setting(server=server, start_server=start_server)
# Update IPv6 settings, stops server
server = self._update_ipv6_setting(server=server, start_server=start_server)
# Update private network settings, stops server
server = self._update_private_network_setting(server=server, start_server=start_server)
# Update plan settings, stops server
server = self._update_plan_setting(server=server, start_server=start_server)
# User data
user_data = self.get_user_data()
server_user_data = self.get_server_user_data(server=server)
if user_data is not None and user_data != server_user_data:
self.result['changed'] = True
self.result['diff']['before']['user_data'] = server_user_data
self.result['diff']['after']['user_data'] = user_data
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'userdata': user_data,
}
self.api_query(
path="/v1/server/set_user_data",
method="POST",
data=data
)
# Tags
tag = self.module.params.get('tag')
if tag is not None and tag != server.get('tag'):
self.result['changed'] = True
self.result['diff']['before']['tag'] = server.get('tag')
self.result['diff']['after']['tag'] = tag
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'tag': tag,
}
self.api_query(
path="/v1/server/tag_set",
method="POST",
data=data
)
# Firewall group
firewall_group = self.get_firewall_group()
if firewall_group and firewall_group.get('description') != server.get('firewall_group'):
self.result['changed'] = True
self.result['diff']['before']['firewall_group'] = server.get('firewall_group')
self.result['diff']['after']['firewall_group'] = firewall_group.get('description')
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
'FIREWALLGROUPID': firewall_group.get('FIREWALLGROUPID'),
}
self.api_query(
path="/v1/server/firewall_group_set",
method="POST",
data=data
)
# Start server again if it was running before the changes
if not self.module.check_mode:
if self.server_power_state in ['starting', 'running'] and start_server:
server = self.start_server(skip_results=True)
server = self._wait_for_state(key='status', state='active')
return server
def absent_server(self):
server = self.get_server()
if server:
self.result['changed'] = True
self.result['diff']['before']['id'] = server['SUBID']
self.result['diff']['after']['id'] = ""
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/destroy",
method="POST",
data=data
)
for s in range(0, 60):
if server is not None:
break
time.sleep(2)
server = self.get_server(refresh=True)
else:
self.fail_json(msg="Wait for server '%s' to get deleted timed out" % server['label'])
return server
def restart_server(self):
self.result['changed'] = True
server = self.get_server()
if server:
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/reboot",
method="POST",
data=data
)
server = self._wait_for_state(state='running')
return server
def reinstall_server(self):
self.result['changed'] = True
server = self.get_server()
if server:
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/reinstall",
method="POST",
data=data
)
server = self._wait_for_state(state='running')
return server
def _wait_for_state(self, key='power_status', state=None, timeout=60):
time.sleep(1)
server = self.get_server(refresh=True)
for s in range(0, timeout):
# Check for Truely if wanted state is None
if state is None and server.get(key):
break
elif server.get(key) == state:
break
time.sleep(2)
server = self.get_server(refresh=True)
# Timed out
else:
if state is None:
msg = "Wait for '%s' timed out" % key
else:
msg = "Wait for '%s' to get into state '%s' timed out" % (key, state)
self.fail_json(msg=msg)
return server
def start_server(self, skip_results=False):
server = self.get_server()
if server:
if server['power_status'] == 'starting':
server = self._wait_for_state(state='running')
elif server['power_status'] != 'running':
if not skip_results:
self.result['changed'] = True
self.result['diff']['before']['power_status'] = server['power_status']
self.result['diff']['after']['power_status'] = "running"
if not self.module.check_mode:
data = {
'SUBID': server['SUBID']
}
self.api_query(
path="/v1/server/start",
method="POST",
data=data
)
server = self._wait_for_state(state='running')
return server
def stop_server(self, skip_results=False):
server = self.get_server()
if server and server['power_status'] != "stopped":
if not skip_results:
self.result['changed'] = True
self.result['diff']['before']['power_status'] = server['power_status']
self.result['diff']['after']['power_status'] = "stopped"
if not self.module.check_mode:
data = {
'SUBID': server['SUBID'],
}
self.api_query(
path="/v1/server/halt",
method="POST",
data=data
)
server = self._wait_for_state(state='stopped')
return server
def main():
argument_spec = vultr_argument_spec()
argument_spec.update(dict(
name=dict(required=True, aliases=['label']),
hostname=dict(type='str'),
os=dict(type='str'),
snapshot=dict(type='str'),
plan=dict(type='str'),
force=dict(type='bool', default=False),
notify_activate=dict(type='bool', default=False),
private_network_enabled=dict(type='bool'),
auto_backup_enabled=dict(type='bool'),
ipv6_enabled=dict(type='bool'),
tag=dict(type='str'),
reserved_ip_v4=dict(type='str'),
firewall_group=dict(type='str'),
startup_script=dict(type='str'),
user_data=dict(type='str'),
ssh_keys=dict(type='list', aliases=['ssh_key']),
region=dict(type='str'),
state=dict(choices=['present', 'absent', 'restarted', 'reinstalled', 'started', 'stopped'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
vultr_server = AnsibleVultrServer(module)
if module.params.get('state') == "absent":
server = vultr_server.absent_server()
else:
if module.params.get('state') == "started":
server = vultr_server.present_server()
server = vultr_server.start_server()
elif module.params.get('state') == "stopped":
server = vultr_server.present_server(start_server=False)
server = vultr_server.stop_server()
elif module.params.get('state') == "restarted":
server = vultr_server.present_server()
server = vultr_server.restart_server()
elif module.params.get('state') == "reinstalled":
server = vultr_server.reinstall_server()
else:
server = vultr_server.present_server()
result = vultr_server.get_result(server)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
winpython/winpython_afterdoc | docs/WASM_almar_klein_demo/rocket.py | 2 | 2777 | """ Run rocket.wasm in Python!
* Load the wasm module, compile to PPCI IR and then to native.
* Load the native object in memory, feed it the API it needs (wasm imports).
* Implement a Python app that uses the exposed API (wasm exports).
At the moment, the game is text based. But we can use Qt or SDL2 (or tk?)
to run it visually, and also feed in user interaction.
"""
import math
import logging
import time
import io
import os
from ppci import wasm
from ppci.utils.reporting import HtmlReportGenerator
#logging.basicConfig(level=logging.DEBUG) # this produces so much output that it slows things down a lot
logging.basicConfig(level=logging.WARN)
# Load the wasm module
filename = os.path.join(os.path.dirname(__file__), 'wasm', 'rocket.wasm')
game_data = open(filename, 'rb').read()
game_module = wasm.Module(game_data)
class BaseRocketGame:
""" Simple rocket game, text based, without user input.
"""
def __init__(self, game_module=game_module):
# Create import dict from methods
env = {}
for name in dir(self):
if name.startswith('wasm_'):
env[name[5:]] = getattr(self, name)
self.imports = dict(env=env)
# Instantiate game module
self.game = wasm.instantiate(game_module, self.imports, target='python')
def run(self):
""" Enter the game's main loop.
"""
self.game.exports.resize(100, 100)
while True:
time.sleep(0.5)
self.game.exports.update(0.1)
self.game.exports.draw()
# We never call these ...
# self.game.exports.toggle_shoot(b)
# self.game.exports.toggle_turn_left(b)
# self.game.exports.toggle_turn_right(b)
# self.game.exports.toggle_boost(b)
def wasm_sin(self, a:float) -> float:
return math.sin(a)
def wasm_cos(self, a:float) -> float:
return math.cos(a)
def wasm_Math_atan(self, a:float) -> float:
return math.atan(a)
def wasm_clear_screen(self) -> None:
print('clearing screen')
def wasm_draw_bullet(self, x:float, y:float) -> None:
print(f'There is a bullet at {x}, {y}')
def wasm_draw_enemy(self, x:float, y:float) -> None:
print(f'There is an enemy at {x}, {y}')
def wasm_draw_particle(self, x:float, y:float, a:float) -> None:
print(f'There is a particle at {x}, {y} angle {a}')
def wasm_draw_player(self, x:float, y:float, a:float) -> None:
print(f'The player is at {x}, {y} angle {a}')
def wasm_draw_score(self, score:float) -> None:
print(f'The score is {score}!')
if __name__ == '__main__':
game = BaseRocketGame()
game.run()
| mit |
Sodki/ansible | lib/ansible/plugins/cache/jsonfile.py | 36 | 1681 | # (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
DOCUMENTATION:
cache: jsonfile
short_description: File backed, JSON formated.
description:
- File backed cache that uses JSON as a format, the files are per host.
version_added: "1.9"
author: Brian Coca (@bcoca)
'''
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import codecs
try:
import simplejson as json
except ImportError:
import json
from ansible.parsing.utils.jsonify import jsonify
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by json files.
"""
def _load(self, filepath):
# Valid JSON is always UTF-8 encoded.
with codecs.open(filepath, 'r', encoding='utf-8') as f:
return json.load(f)
def _dump(self, value, filepath):
with codecs.open(filepath, 'w', encoding='utf-8') as f:
f.write(jsonify(value, format=True))
| gpl-3.0 |
endlessm/chromium-browser | third_party/catapult/third_party/gsutil/third_party/oauth2client/setup.py | 9 | 2555 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for oauth2client.
Also installs included versions of third party libraries, if those libraries
are not already installed.
"""
from __future__ import print_function
import sys
from setuptools import find_packages
from setuptools import setup
import oauth2client
if sys.version_info < (2, 7):
print('oauth2client requires python2 version >= 2.7.', file=sys.stderr)
sys.exit(1)
if (3, 1) <= sys.version_info < (3, 4):
print('oauth2client requires python3 version >= 3.4.', file=sys.stderr)
sys.exit(1)
install_requires = [
'httplib2>=0.9.1',
'pyasn1>=0.1.7',
'pyasn1-modules>=0.0.5',
'rsa>=3.1.4',
'six>=1.6.1',
]
long_desc = """
oauth2client is a client library for OAuth 2.0.
Note: oauth2client is now deprecated. No more features will be added to the
libraries and the core team is turning down support. We recommend you use
`google-auth <https://google-auth.readthedocs.io>`__ and
`oauthlib <http://oauthlib.readthedocs.io/>`__.
"""
version = oauth2client.__version__
setup(
name='oauth2client',
version=version,
description='OAuth 2.0 client library',
long_description=long_desc,
author='Google Inc.',
author_email='jonwayne+oauth2client@google.com',
url='http://github.com/google/oauth2client/',
install_requires=install_requires,
packages=find_packages(exclude=('tests*',)),
license='Apache 2.0',
keywords='google oauth 2.0 http client',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Development Status :: 7 - Inactive',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX',
'Topic :: Internet :: WWW/HTTP',
],
)
| bsd-3-clause |
mustafat/odoo-1 | addons/google_calendar/__openerp__.py | 299 | 1671 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Google Calendar',
'version': '1.0',
'category': 'Tools',
'description': """
The module adds the possibility to synchronize Google Calendar with OpenERP
===========================================================================
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['google_account', 'calendar'],
'qweb': ['static/src/xml/*.xml'],
'data': [
'res_config_view.xml',
'security/ir.model.access.csv',
'views/google_calendar.xml',
'views/res_users.xml',
'google_calendar.xml',
],
'demo': [],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
mrquim/repository.mrquim | script.module.pycryptodome/lib/Crypto/SelfTest/Hash/__init__.py | 12 | 3352 | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/__init__.py: Self-test for hash modules
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for hash modules"""
__revision__ = "$Id$"
def get_tests(config={}):
tests = []
from Crypto.SelfTest.Hash import test_HMAC; tests += test_HMAC.get_tests(config=config)
from Crypto.SelfTest.Hash import test_CMAC; tests += test_CMAC.get_tests(config=config)
from Crypto.SelfTest.Hash import test_MD2; tests += test_MD2.get_tests(config=config)
from Crypto.SelfTest.Hash import test_MD4; tests += test_MD4.get_tests(config=config)
from Crypto.SelfTest.Hash import test_MD5; tests += test_MD5.get_tests(config=config)
from Crypto.SelfTest.Hash import test_RIPEMD160; tests += test_RIPEMD160.get_tests(config=config)
from Crypto.SelfTest.Hash import test_SHA1; tests += test_SHA1.get_tests(config=config)
from Crypto.SelfTest.Hash import test_SHA256; tests += test_SHA256.get_tests(config=config)
from Crypto.SelfTest.Hash import test_SHA3_224; tests += test_SHA3_224.get_tests(config=config)
from Crypto.SelfTest.Hash import test_SHA3_256; tests += test_SHA3_256.get_tests(config=config)
from Crypto.SelfTest.Hash import test_SHA3_384; tests += test_SHA3_384.get_tests(config=config)
from Crypto.SelfTest.Hash import test_SHA3_512; tests += test_SHA3_512.get_tests(config=config)
from Crypto.SelfTest.Hash import test_keccak; tests += test_keccak.get_tests(config=config)
from Crypto.SelfTest.Hash import test_SHAKE; tests += test_SHAKE.get_tests(config=config)
try:
from Crypto.SelfTest.Hash import test_SHA224; tests += test_SHA224.get_tests(config=config)
from Crypto.SelfTest.Hash import test_SHA384; tests += test_SHA384.get_tests(config=config)
from Crypto.SelfTest.Hash import test_SHA512; tests += test_SHA512.get_tests(config=config)
except ImportError:
import sys
sys.stderr.write("SelfTest: warning: not testing SHA224/SHA384/SHA512 modules (not available)\n")
from Crypto.SelfTest.Hash import test_BLAKE2; tests += test_BLAKE2.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| gpl-2.0 |
KaranToor/MA450 | google-cloud-sdk/platform/gsutil/third_party/boto/tests/integration/route53/domains/__init__.py | 113 | 1122 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| apache-2.0 |
travs/PyOpenWorm | examples/add_reference.py | 3 | 1042 | """
How to reference supporting evidence for some object in the database.
See: "Metadata in PyOpenWorm" for discussion on semantics of what giving
evidence for an object means.
"""
import sys
import PyOpenWorm as P
#Create dummy database configuration.
d = P.Data({})
#Connect to database with dummy configuration
P.connect(conf=d)
#Create a new Neuron object to work with
n = P.Neuron(name='AVAL')
#Create a new Evidence object with `doi` and `pmid` fields populated.
#See `PyOpenWorm/evidence.py` for other available fields.
e = P.Evidence(doi='125.41.3/ploscompbiol', pmid='57182010')
#Evidence object asserts something about the enclosed dataObject.
#Here we add a receptor to the Neuron we made earlier, and "assert it".
#As the discussion (see top) reads, this might be asserting the existence of
#receptor UNC-8 on neuron AVAL.
e.asserts(n.receptor('UNC-8'))
#Save the Neuron and Evidence objects to the database.
n.save()
e.save()
#What does my evidence object contain?
print e
#Disconnect from the database.
P.disconnect()
| mit |
kbs5280/game-time | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/analyzer.py | 1382 | 30567 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
the generator flag config_path) the path of a json file that dictates the files
and targets to search for. The following keys are supported:
files: list of paths (relative) of the files to search for.
test_targets: unqualified target names to search for. Any target in this list
that depends upon a file in |files| is output regardless of the type of target
or chain of dependencies.
additional_compile_targets: Unqualified targets to search for in addition to
test_targets. Targets in the combined list that depend upon a file in |files|
are not necessarily output. For example, if the target is of type none then the
target is not output (but one of the descendants of the target will be).
The following is output:
error: only supplied if there is an error.
compile_targets: minimal set of targets that directly or indirectly (for
targets of type none) depend on the files in |files| and is one of the
supplied targets or a target that one of the supplied targets depends on.
The expectation is this set of targets is passed into a build step. This list
always contains the output of test_targets as well.
test_targets: set of targets from the supplied |test_targets| that either
directly or indirectly depend upon a file in |files|. This list if useful
if additional processing needs to be done for certain targets after the
build, such as running tests.
status: outputs one of three values: none of the supplied files were found,
one of the include files changed so that it should be assumed everything
changed (in this case test_targets and compile_targets are not output) or at
least one file was found.
invalid_targets: list of supplied targets that were not found.
Example:
Consider a graph like the following:
A D
/ \
B C
A depends upon both B and C, A is of type none and B and C are executables.
D is an executable, has no dependencies and nothing depends on it.
If |additional_compile_targets| = ["A"], |test_targets| = ["B", "C"] and
files = ["b.cc", "d.cc"] (B depends upon b.cc and D depends upon d.cc), then
the following is output:
|compile_targets| = ["B"] B must built as it depends upon the changed file b.cc
and the supplied target A depends upon it. A is not output as a build_target
as it is of type none with no rules and actions.
|test_targets| = ["B"] B directly depends upon the change file b.cc.
Even though the file d.cc, which D depends upon, has changed D is not output
as it was not supplied by way of |additional_compile_targets| or |test_targets|.
If the generator flag analyzer_output_path is specified, output is written
there. Otherwise output is written to stdout.
In Gyp the "all" target is shorthand for the root targets in the files passed
to gyp. For example, if file "a.gyp" contains targets "a1" and
"a2", and file "b.gyp" contains targets "b1" and "b2" and "a2" has a dependency
on "b2" and gyp is supplied "a.gyp" then "all" consists of "a1" and "a2".
Notice that "b1" and "b2" are not in the "all" target as "b.gyp" was not
directly supplied to gyp. OTOH if both "a.gyp" and "b.gyp" are supplied to gyp
then the "all" target includes "b1" and "b2".
"""
import gyp.common
import gyp.ninja_syntax as ninja_syntax
import json
import os
import posixpath
import sys
debug = False
found_dependency_string = 'Found dependency'
no_dependency_string = 'No dependencies'
# Status when it should be assumed that everything has changed.
all_changed_string = 'Found dependency (all)'
# MatchStatus is used indicate if and how a target depends upon the supplied
# sources.
# The target's sources contain one of the supplied paths.
MATCH_STATUS_MATCHES = 1
# The target has a dependency on another target that contains one of the
# supplied paths.
MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
# The target's sources weren't in the supplied paths and none of the target's
# dependencies depend upon a target that matched.
MATCH_STATUS_DOESNT_MATCH = 3
# The target doesn't contain the source, but the dependent targets have not yet
# been visited to determine a more specific status yet.
MATCH_STATUS_TBD = 4
generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
generator_default_variables[dirname] = '!!!'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def _ToGypPath(path):
"""Converts a path to the format used by gyp."""
if os.sep == '\\' and os.altsep == '/':
return path.replace('\\', '/')
return path
def _ResolveParent(path, base_path_components):
"""Resolves |path|, which starts with at least one '../'. Returns an empty
string if the path shouldn't be considered. See _AddSources() for a
description of |base_path_components|."""
depth = 0
while path.startswith('../'):
depth += 1
path = path[3:]
# Relative includes may go outside the source tree. For example, an action may
# have inputs in /usr/include, which are not in the source tree.
if depth > len(base_path_components):
return ''
if depth == len(base_path_components):
return path
return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
'/' + path
def _AddSources(sources, base_path, base_path_components, result):
"""Extracts valid sources from |sources| and adds them to |result|. Each
source file is relative to |base_path|, but may contain '..'. To make
resolving '..' easier |base_path_components| contains each of the
directories in |base_path|. Additionally each source may contain variables.
Such sources are ignored as it is assumed dependencies on them are expressed
and tracked in some other means."""
# NOTE: gyp paths are always posix style.
for source in sources:
if not len(source) or source.startswith('!!!') or source.startswith('$'):
continue
# variable expansion may lead to //.
org_source = source
source = source[0] + source[1:].replace('//', '/')
if source.startswith('../'):
source = _ResolveParent(source, base_path_components)
if len(source):
result.append(source)
continue
result.append(base_path + source)
if debug:
print 'AddSource', org_source, result[len(result) - 1]
def _ExtractSourcesFromAction(action, base_path, base_path_components,
results):
if 'inputs' in action:
_AddSources(action['inputs'], base_path, base_path_components, results)
def _ToLocalPath(toplevel_dir, path):
"""Converts |path| to a path relative to |toplevel_dir|."""
if path == toplevel_dir:
return ''
if path.startswith(toplevel_dir + '/'):
return path[len(toplevel_dir) + len('/'):]
return path
def _ExtractSources(target, target_dict, toplevel_dir):
# |target| is either absolute or relative and in the format of the OS. Gyp
# source paths are always posix. Convert |target| to a posix path relative to
# |toplevel_dir_|. This is done to make it easy to build source paths.
base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
base_path_components = base_path.split('/')
# Add a trailing '/' so that _AddSources() can easily build paths.
if len(base_path):
base_path += '/'
if debug:
print 'ExtractSources', target, base_path
results = []
if 'sources' in target_dict:
_AddSources(target_dict['sources'], base_path, base_path_components,
results)
# Include the inputs from any actions. Any changes to these affect the
# resulting output.
if 'actions' in target_dict:
for action in target_dict['actions']:
_ExtractSourcesFromAction(action, base_path, base_path_components,
results)
if 'rules' in target_dict:
for rule in target_dict['rules']:
_ExtractSourcesFromAction(rule, base_path, base_path_components, results)
return results
class Target(object):
"""Holds information about a particular target:
deps: set of Targets this Target depends upon. This is not recursive, only the
direct dependent Targets.
match_status: one of the MatchStatus values.
back_deps: set of Targets that have a dependency on this Target.
visited: used during iteration to indicate whether we've visited this target.
This is used for two iterations, once in building the set of Targets and
again in _GetBuildTargets().
name: fully qualified name of the target.
requires_build: True if the target type is such that it needs to be built.
See _DoesTargetTypeRequireBuild for details.
added_to_compile_targets: used when determining if the target was added to the
set of targets that needs to be built.
in_roots: true if this target is a descendant of one of the root nodes.
is_executable: true if the type of target is executable.
is_static_library: true if the type of target is static_library.
is_or_has_linked_ancestor: true if the target does a link (eg executable), or
if there is a target in back_deps that does a link."""
def __init__(self, name):
self.deps = set()
self.match_status = MATCH_STATUS_TBD
self.back_deps = set()
self.name = name
# TODO(sky): I don't like hanging this off Target. This state is specific
# to certain functions and should be isolated there.
self.visited = False
self.requires_build = False
self.added_to_compile_targets = False
self.in_roots = False
self.is_executable = False
self.is_static_library = False
self.is_or_has_linked_ancestor = False
class Config(object):
"""Details what we're looking for
files: set of files to search for
targets: see file description for details."""
def __init__(self):
self.files = []
self.targets = set()
self.additional_compile_target_names = set()
self.test_target_names = set()
def Init(self, params):
"""Initializes Config. This is a separate method as it raises an exception
if there is a parse error."""
generator_flags = params.get('generator_flags', {})
config_path = generator_flags.get('config_path', None)
if not config_path:
return
try:
f = open(config_path, 'r')
config = json.load(f)
f.close()
except IOError:
raise Exception('Unable to open file ' + config_path)
except ValueError as e:
raise Exception('Unable to parse config file ' + config_path + str(e))
if not isinstance(config, dict):
raise Exception('config_path must be a JSON file containing a dictionary')
self.files = config.get('files', [])
self.additional_compile_target_names = set(
config.get('additional_compile_targets', []))
self.test_target_names = set(config.get('test_targets', []))
def _WasBuildFileModified(build_file, data, files, toplevel_dir):
"""Returns true if the build file |build_file| is either in |files| or
one of the files included by |build_file| is in |files|. |toplevel_dir| is
the root of the source tree."""
if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
if debug:
print 'gyp file modified', build_file
return True
# First element of included_files is the file itself.
if len(data[build_file]['included_files']) <= 1:
return False
for include_file in data[build_file]['included_files'][1:]:
# |included_files| are relative to the directory of the |build_file|.
rel_include_file = \
_ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
if _ToLocalPath(toplevel_dir, rel_include_file) in files:
if debug:
print 'included gyp file modified, gyp_file=', build_file, \
'included file=', rel_include_file
return True
return False
def _GetOrCreateTargetByName(targets, target_name):
"""Creates or returns the Target at targets[target_name]. If there is no
Target for |target_name| one is created. Returns a tuple of whether a new
Target was created and the Target."""
if target_name in targets:
return False, targets[target_name]
target = Target(target_name)
targets[target_name] = target
return True, target
def _DoesTargetTypeRequireBuild(target_dict):
"""Returns true if the target type is such that it needs to be built."""
# If a 'none' target has rules or actions we assume it requires a build.
return bool(target_dict['type'] != 'none' or
target_dict.get('actions') or target_dict.get('rules'))
def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
build_files):
"""Returns a tuple of the following:
. A dictionary mapping from fully qualified name to Target.
. A list of the targets that have a source file in |files|.
. Targets that constitute the 'all' target. See description at top of file
for details on the 'all' target.
This sets the |match_status| of the targets that contain any of the source
files in |files| to MATCH_STATUS_MATCHES.
|toplevel_dir| is the root of the source tree."""
# Maps from target name to Target.
name_to_target = {}
# Targets that matched.
matching_targets = []
# Queue of targets to visit.
targets_to_visit = target_list[:]
# Maps from build file to a boolean indicating whether the build file is in
# |files|.
build_file_in_files = {}
# Root targets across all files.
roots = set()
# Set of Targets in |build_files|.
build_file_targets = set()
while len(targets_to_visit) > 0:
target_name = targets_to_visit.pop()
created_target, target = _GetOrCreateTargetByName(name_to_target,
target_name)
if created_target:
roots.add(target)
elif target.visited:
continue
target.visited = True
target.requires_build = _DoesTargetTypeRequireBuild(
target_dicts[target_name])
target_type = target_dicts[target_name]['type']
target.is_executable = target_type == 'executable'
target.is_static_library = target_type == 'static_library'
target.is_or_has_linked_ancestor = (target_type == 'executable' or
target_type == 'shared_library')
build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
if not build_file in build_file_in_files:
build_file_in_files[build_file] = \
_WasBuildFileModified(build_file, data, files, toplevel_dir)
if build_file in build_files:
build_file_targets.add(target)
# If a build file (or any of its included files) is modified we assume all
# targets in the file are modified.
if build_file_in_files[build_file]:
print 'matching target from modified build file', target_name
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
else:
sources = _ExtractSources(target_name, target_dicts[target_name],
toplevel_dir)
for source in sources:
if _ToGypPath(os.path.normpath(source)) in files:
print 'target', target_name, 'matches', source
target.match_status = MATCH_STATUS_MATCHES
matching_targets.append(target)
break
# Add dependencies to visit as well as updating back pointers for deps.
for dep in target_dicts[target_name].get('dependencies', []):
targets_to_visit.append(dep)
created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target,
dep)
if not created_dep_target:
roots.discard(dep_target)
target.deps.add(dep_target)
dep_target.back_deps.add(target)
return name_to_target, matching_targets, roots & build_file_targets
def _GetUnqualifiedToTargetMapping(all_targets, to_find):
"""Returns a tuple of the following:
. mapping (dictionary) from unqualified name to Target for all the
Targets in |to_find|.
. any target names not found. If this is empty all targets were found."""
result = {}
if not to_find:
return {}, []
to_find = set(to_find)
for target_name in all_targets.keys():
extracted = gyp.common.ParseQualifiedTarget(target_name)
if len(extracted) > 1 and extracted[1] in to_find:
to_find.remove(extracted[1])
result[extracted[1]] = all_targets[target_name]
if not to_find:
return result, []
return result, [x for x in to_find]
def _DoesTargetDependOnMatchingTargets(target):
"""Returns true if |target| or any of its dependencies is one of the
targets containing the files supplied as input to analyzer. This updates
|matches| of the Targets as it recurses.
target: the Target to look for."""
if target.match_status == MATCH_STATUS_DOESNT_MATCH:
return False
if target.match_status == MATCH_STATUS_MATCHES or \
target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
return True
for dep in target.deps:
if _DoesTargetDependOnMatchingTargets(dep):
target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
print '\t', target.name, 'matches by dep', dep.name
return True
target.match_status = MATCH_STATUS_DOESNT_MATCH
return False
def _GetTargetsDependingOnMatchingTargets(possible_targets):
"""Returns the list of Targets in |possible_targets| that depend (either
directly on indirectly) on at least one of the targets containing the files
supplied as input to analyzer.
possible_targets: targets to search from."""
found = []
print 'Targets that matched by dependency:'
for target in possible_targets:
if _DoesTargetDependOnMatchingTargets(target):
found.append(target)
return found
def _AddCompileTargets(target, roots, add_if_no_ancestor, result):
"""Recurses through all targets that depend on |target|, adding all targets
that need to be built (and are in |roots|) to |result|.
roots: set of root targets.
add_if_no_ancestor: If true and there are no ancestors of |target| then add
|target| to |result|. |target| must still be in |roots|.
result: targets that need to be built are added here."""
if target.visited:
return
target.visited = True
target.in_roots = target in roots
for back_dep_target in target.back_deps:
_AddCompileTargets(back_dep_target, roots, False, result)
target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
target.in_roots |= back_dep_target.in_roots
target.is_or_has_linked_ancestor |= (
back_dep_target.is_or_has_linked_ancestor)
# Always add 'executable' targets. Even though they may be built by other
# targets that depend upon them it makes detection of what is going to be
# built easier.
# And always add static_libraries that have no dependencies on them from
# linkables. This is necessary as the other dependencies on them may be
# static libraries themselves, which are not compile time dependencies.
if target.in_roots and \
(target.is_executable or
(not target.added_to_compile_targets and
(add_if_no_ancestor or target.requires_build)) or
(target.is_static_library and add_if_no_ancestor and
not target.is_or_has_linked_ancestor)):
print '\t\tadding to compile targets', target.name, 'executable', \
target.is_executable, 'added_to_compile_targets', \
target.added_to_compile_targets, 'add_if_no_ancestor', \
add_if_no_ancestor, 'requires_build', target.requires_build, \
'is_static_library', target.is_static_library, \
'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor
result.add(target)
target.added_to_compile_targets = True
def _GetCompileTargets(matching_targets, supplied_targets):
"""Returns the set of Targets that require a build.
matching_targets: targets that changed and need to be built.
supplied_targets: set of targets supplied to analyzer to search from."""
result = set()
for target in matching_targets:
print 'finding compile targets for match', target.name
_AddCompileTargets(target, supplied_targets, True, result)
return result
def _WriteOutput(params, **values):
"""Writes the output, either to stdout or a file is specified."""
if 'error' in values:
print 'Error:', values['error']
if 'status' in values:
print values['status']
if 'targets' in values:
values['targets'].sort()
print 'Supplied targets that depend on changed files:'
for target in values['targets']:
print '\t', target
if 'invalid_targets' in values:
values['invalid_targets'].sort()
print 'The following targets were not found:'
for target in values['invalid_targets']:
print '\t', target
if 'build_targets' in values:
values['build_targets'].sort()
print 'Targets that require a build:'
for target in values['build_targets']:
print '\t', target
if 'compile_targets' in values:
values['compile_targets'].sort()
print 'Targets that need to be built:'
for target in values['compile_targets']:
print '\t', target
if 'test_targets' in values:
values['test_targets'].sort()
print 'Test targets:'
for target in values['test_targets']:
print '\t', target
output_path = params.get('generator_flags', {}).get(
'analyzer_output_path', None)
if not output_path:
print json.dumps(values)
return
try:
f = open(output_path, 'w')
f.write(json.dumps(values) + '\n')
f.close()
except IOError as e:
print 'Error writing to output file', output_path, str(e)
def _WasGypIncludeFileModified(params, files):
"""Returns true if one of the files in |files| is in the set of included
files."""
if params['options'].includes:
for include in params['options'].includes:
if _ToGypPath(os.path.normpath(include)) in files:
print 'Include file modified, assuming all changed', include
return True
return False
def _NamesNotIn(names, mapping):
"""Returns a list of the values in |names| that are not in |mapping|."""
return [name for name in names if name not in mapping]
def _LookupTargets(names, mapping):
"""Returns a list of the mapping[name] for each value in |names| that is in
|mapping|."""
return [mapping[name] for name in names if name in mapping]
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
class TargetCalculator(object):
"""Calculates the matching test_targets and matching compile_targets."""
def __init__(self, files, additional_compile_target_names, test_target_names,
data, target_list, target_dicts, toplevel_dir, build_files):
self._additional_compile_target_names = set(additional_compile_target_names)
self._test_target_names = set(test_target_names)
self._name_to_target, self._changed_targets, self._root_targets = (
_GenerateTargets(data, target_list, target_dicts, toplevel_dir,
frozenset(files), build_files))
self._unqualified_mapping, self.invalid_targets = (
_GetUnqualifiedToTargetMapping(self._name_to_target,
self._supplied_target_names_no_all()))
def _supplied_target_names(self):
return self._additional_compile_target_names | self._test_target_names
def _supplied_target_names_no_all(self):
"""Returns the supplied test targets without 'all'."""
result = self._supplied_target_names();
result.discard('all')
return result
def is_build_impacted(self):
"""Returns true if the supplied files impact the build at all."""
return self._changed_targets
def find_matching_test_target_names(self):
"""Returns the set of output test targets."""
assert self.is_build_impacted()
# Find the test targets first. 'all' is special cased to mean all the
# root targets. To deal with all the supplied |test_targets| are expanded
# to include the root targets during lookup. If any of the root targets
# match, we remove it and replace it with 'all'.
test_target_names_no_all = set(self._test_target_names)
test_target_names_no_all.discard('all')
test_targets_no_all = _LookupTargets(test_target_names_no_all,
self._unqualified_mapping)
test_target_names_contains_all = 'all' in self._test_target_names
if test_target_names_contains_all:
test_targets = [x for x in (set(test_targets_no_all) |
set(self._root_targets))]
else:
test_targets = [x for x in test_targets_no_all]
print 'supplied test_targets'
for target_name in self._test_target_names:
print '\t', target_name
print 'found test_targets'
for target in test_targets:
print '\t', target.name
print 'searching for matching test targets'
matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets)
matching_test_targets_contains_all = (test_target_names_contains_all and
set(matching_test_targets) &
set(self._root_targets))
if matching_test_targets_contains_all:
# Remove any of the targets for all that were not explicitly supplied,
# 'all' is subsequentely added to the matching names below.
matching_test_targets = [x for x in (set(matching_test_targets) &
set(test_targets_no_all))]
print 'matched test_targets'
for target in matching_test_targets:
print '\t', target.name
matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in matching_test_targets]
if matching_test_targets_contains_all:
matching_target_names.append('all')
print '\tall'
return matching_target_names
def find_matching_compile_target_names(self):
"""Returns the set of output compile targets."""
assert self.is_build_impacted();
# Compile targets are found by searching up from changed targets.
# Reset the visited status for _GetBuildTargets.
for target in self._name_to_target.itervalues():
target.visited = False
supplied_targets = _LookupTargets(self._supplied_target_names_no_all(),
self._unqualified_mapping)
if 'all' in self._supplied_target_names():
supplied_targets = [x for x in (set(supplied_targets) |
set(self._root_targets))]
print 'Supplied test_targets & compile_targets'
for target in supplied_targets:
print '\t', target.name
print 'Finding compile targets'
compile_targets = _GetCompileTargets(self._changed_targets,
supplied_targets)
return [gyp.common.ParseQualifiedTarget(target.name)[1]
for target in compile_targets]
def GenerateOutput(target_list, target_dicts, data, params):
"""Called by gyp as the final stage. Outputs results."""
config = Config()
try:
config.Init(params)
if not config.files:
raise Exception('Must specify files to analyze via config_path generator '
'flag')
toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
if debug:
print 'toplevel_dir', toplevel_dir
if _WasGypIncludeFileModified(params, config.files):
result_dict = { 'status': all_changed_string,
'test_targets': list(config.test_target_names),
'compile_targets': list(
config.additional_compile_target_names |
config.test_target_names) }
_WriteOutput(params, **result_dict)
return
calculator = TargetCalculator(config.files,
config.additional_compile_target_names,
config.test_target_names, data,
target_list, target_dicts, toplevel_dir,
params['build_files'])
if not calculator.is_build_impacted():
result_dict = { 'status': no_dependency_string,
'test_targets': [],
'compile_targets': [] }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
return
test_target_names = calculator.find_matching_test_target_names()
compile_target_names = calculator.find_matching_compile_target_names()
found_at_least_one_target = compile_target_names or test_target_names
result_dict = { 'test_targets': test_target_names,
'status': found_dependency_string if
found_at_least_one_target else no_dependency_string,
'compile_targets': list(
set(compile_target_names) |
set(test_target_names)) }
if calculator.invalid_targets:
result_dict['invalid_targets'] = calculator.invalid_targets
_WriteOutput(params, **result_dict)
except Exception as e:
_WriteOutput(params, error=str(e))
| mit |
neharejanjeva/techstitution | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/url.py | 713 | 5879 | from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| cc0-1.0 |
cmelange/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_sshkey.py | 32 | 5074 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
short_description: Create/delete an SSH key in DigitalOcean
description:
- Create/delete an SSH key.
version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the SSH key id you want to operate on.
name:
description:
- String, this is the name of an SSH key to create or destroy.
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
- Version 1 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
- dopy
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean_sshkey:
state: present
name: my_ssh_key
ssh_pub_key: 'ssh-rsa AAAA...'
client_id: XXX
api_key: XXX
'''
import os
import traceback
try:
from dopy.manager import DoError, DoManager
HAS_DOPY = True
except ImportError:
HAS_DOPY = False
from ansible.module_utils.basic import AnsibleModule
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
state = module.params['state']
SSH.setup(client_id, api_key)
name = getkeyordie('name')
if state in ('present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['present', 'absent'], default='present'),
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
api_key = dict(aliases=['API_KEY'], no_log=True),
name = dict(type='str'),
id = dict(aliases=['droplet_id'], type='int'),
ssh_pub_key = dict(type='str'),
),
required_one_of = (
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy required for this module')
try:
core(module)
except (DoError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
bhupennewalkar1337/erpnext | erpnext/buying/doctype/request_for_quotation/test_request_for_quotation.py | 17 | 2749 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import nowdate
class TestRequestforQuotation(unittest.TestCase):
def test_make_supplier_quotation(self):
from erpnext.buying.doctype.request_for_quotation.request_for_quotation import make_supplier_quotation
rfq = make_request_for_quotation()
sq = make_supplier_quotation(rfq.name, rfq.get('suppliers')[0].supplier)
sq.submit()
sq1 = make_supplier_quotation(rfq.name, rfq.get('suppliers')[1].supplier)
sq1.submit()
self.assertEquals(sq.supplier, rfq.get('suppliers')[0].supplier)
self.assertEquals(sq.get('items')[0].request_for_quotation, rfq.name)
self.assertEquals(sq.get('items')[0].item_code, "_Test Item")
self.assertEquals(sq.get('items')[0].qty, 5)
self.assertEquals(sq1.supplier, rfq.get('suppliers')[1].supplier)
self.assertEquals(sq1.get('items')[0].request_for_quotation, rfq.name)
self.assertEquals(sq1.get('items')[0].item_code, "_Test Item")
self.assertEquals(sq1.get('items')[0].qty, 5)
def test_make_supplier_quotation_from_portal(self):
from erpnext.buying.doctype.request_for_quotation.request_for_quotation import create_supplier_quotation
rfq = make_request_for_quotation()
rfq.get('items')[0].rate = 100
rfq.supplier = rfq.suppliers[0].supplier
supplier_quotation_name = create_supplier_quotation(rfq)
supplier_quotation_doc = frappe.get_doc('Supplier Quotation', supplier_quotation_name)
self.assertEquals(supplier_quotation_doc.supplier, rfq.get('suppliers')[0].supplier)
self.assertEquals(supplier_quotation_doc.get('items')[0].request_for_quotation, rfq.name)
self.assertEquals(supplier_quotation_doc.get('items')[0].item_code, "_Test Item")
self.assertEquals(supplier_quotation_doc.get('items')[0].qty, 5)
self.assertEquals(supplier_quotation_doc.get('items')[0].amount, 500)
def make_request_for_quotation():
supplier_data = get_supplier_data()
rfq = frappe.new_doc('Request for Quotation')
rfq.transaction_date = nowdate()
rfq.status = 'Draft'
rfq.company = '_Test Company'
rfq.message_for_supplier = 'Please supply the specified items at the best possible rates.'
for data in supplier_data:
rfq.append('suppliers', data)
rfq.append("items", {
"item_code": "_Test Item",
"description": "_Test Item",
"uom": "_Test UOM",
"qty": 5,
"warehouse": "_Test Warehouse - _TC",
"schedule_date": nowdate()
})
rfq.submit()
return rfq
def get_supplier_data():
return [{
"supplier": "_Test Supplier",
"supplier_name": "_Test Supplier"
},
{
"supplier": "_Test Supplier 1",
"supplier_name": "_Test Supplier 1"
}]
| gpl-3.0 |
Piasy/proxy-searcher | site-packages/django/core/paginator.py | 94 | 5058 | from math import ceil
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0, allow_empty_first_page=True):
self.object_list = object_list
self.per_page = int(per_page)
self.orphans = int(orphans)
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = self._count = None
def validate_number(self, number):
"Validates the given 1-based page number."
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def page(self, number):
"Returns a Page object for the given 1-based page number."
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return Page(self.object_list[bottom:top], number, self)
def _get_count(self):
"Returns the total number of objects, across all pages."
if self._count is None:
try:
self._count = self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"Returns the total number of pages."
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return range(1, self.num_pages + 1)
page_range = property(_get_page_range)
QuerySetPaginator = Paginator # For backwards-compatibility.
class Page(object):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def __len__(self):
return len(self.object_list)
def __getitem__(self, index):
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
return list(self.object_list)[index]
# The following four methods are only necessary for Python <2.6
# compatibility (this class could just extend 2.6's collections.Sequence).
def __iter__(self):
i = 0
try:
while True:
v = self[i]
yield v
i += 1
except IndexError:
return
def __contains__(self, value):
for v in self:
if v == value:
return True
return False
def index(self, value):
for i, v in enumerate(self):
if v == value:
return i
raise ValueError
def count(self, value):
return sum([1 for v in self if v == value])
# End of compatibility methods.
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.number + 1
def previous_page_number(self):
return self.number - 1
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
| mit |
yqm/sl4a | python-build/python-libs/gdata/src/atom/auth.py | 297 | 1199 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import base64
class BasicAuth(object):
"""Sets the Authorization header as defined in RFC1945"""
def __init__(self, user_id, password):
self.basic_cookie = base64.encodestring(
'%s:%s' % (user_id, password)).strip()
def modify_request(self, http_request):
http_request.headers['Authorization'] = 'Basic %s' % self.basic_cookie
ModifyRequest = modify_request
class NoAuth(object):
def modify_request(self, http_request):
pass
| apache-2.0 |
defionscode/ansible | lib/ansible/modules/cloud/vultr/vultr_firewall_group.py | 27 | 5529 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vultr_firewall_group
short_description: Manages firewall groups on Vultr.
description:
- Create and remove firewall groups.
version_added: "2.5"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the firewall group.
required: true
aliases: [ description ]
state:
description:
- State of the firewall group.
default: present
choices: [ present, absent ]
extends_documentation_fragment: vultr
'''
EXAMPLES = '''
- name: ensure a firewall group is present
local_action:
module: vultr_firewall_group
name: my http firewall
- name: ensure a firewall group is absent
local_action:
module: vultr_firewall_group
name: my http firewall
state: absent
'''
RETURN = '''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: string
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: string
sample: "https://api.vultr.com"
vultr_firewall_group:
description: Response from Vultr API
returned: success
type: complex
contains:
id:
description: ID of the firewall group
returned: success
type: string
sample: 1234abcd
name:
description: Name of the firewall group
returned: success
type: string
sample: my firewall group
date_created:
description: Date the firewall group was created
returned: success
type: string
sample: "2017-08-26 12:47:48"
date_modified:
description: Date the firewall group was modified
returned: success
type: string
sample: "2017-08-26 12:47:48"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrFirewallGroup(Vultr):
def __init__(self, module):
super(AnsibleVultrFirewallGroup, self).__init__(module, "vultr_firewall_group")
self.returns = {
'FIREWALLGROUPID': dict(key='id'),
'description': dict(key='name'),
'date_created': dict(),
'date_modified': dict(),
}
def get_firewall_group(self):
firewall_groups = self.api_query(path="/v1/firewall/group_list")
if firewall_groups:
for firewall_group_id, firewall_group_data in firewall_groups.items():
if firewall_group_data.get('description') == self.module.params.get('name'):
return firewall_group_data
return {}
def present_firewall_group(self):
firewall_group = self.get_firewall_group()
if not firewall_group:
firewall_group = self._create_firewall_group(firewall_group)
return firewall_group
def _create_firewall_group(self, firewall_group):
self.result['changed'] = True
data = {
'description': self.module.params.get('name'),
}
self.result['diff']['before'] = {}
self.result['diff']['after'] = data
if not self.module.check_mode:
self.api_query(
path="/v1/firewall/group_create",
method="POST",
data=data
)
firewall_group = self.get_firewall_group()
return firewall_group
def absent_firewall_group(self):
firewall_group = self.get_firewall_group()
if firewall_group:
self.result['changed'] = True
data = {
'FIREWALLGROUPID': firewall_group['FIREWALLGROUPID'],
}
self.result['diff']['before'] = firewall_group
self.result['diff']['after'] = {}
if not self.module.check_mode:
self.api_query(
path="/v1/firewall/group_delete",
method="POST",
data=data
)
return firewall_group
def main():
argument_spec = vultr_argument_spec()
argument_spec.update(dict(
name=dict(required=True, aliases=['description']),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
vultr_firewall_group = AnsibleVultrFirewallGroup(module)
if module.params.get('state') == "absent":
firewall_group = vultr_firewall_group.absent_firewall_group()
else:
firewall_group = vultr_firewall_group.present_firewall_group()
result = vultr_firewall_group.get_result(firewall_group)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
SimplyKnownAsG/yamlize | setup.py | 1 | 2647 | from setuptools import setup, find_packages
setup(
name='yamlize',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.7.0',
description='yamlize, a package for Python object serialization and data validation.',
long_description='see https://github.com/SimplyKnownAsG/yamlize/blob/master/README.rst',
# The project's main homepage.
url='https://github.com/SimplyKnownAsG/yamlize',
author='g',
author_email='gtmalmgren@gmail.com',
# Choose your license
license='Apache',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project?
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='yaml serialization type checking validation',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here.
install_requires=['ruamel.yaml', 'six'],
# List additional groups of dependencies here (e.g. development dependencies)
extras_require={
'dev': ['yaml', 'pytest', 'pycodestyle', 'sphinx', 'aenum', 'numpy'],
},
# # Data files
# package_data={},
# # Data files outside of your packages.
# data_files=[],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={},
)
| apache-2.0 |
liangjiaxing/sympy | sympy/plotting/pygletplot/plot_axes.py | 95 | 8666 | from __future__ import print_function, division
from pyglet.gl import *
from pyglet import font
from plot_object import PlotObject
from util import strided_range, billboard_matrix
from util import get_direction_vectors
from util import dot_product, vec_sub, vec_mag
from sympy.core import S
from sympy.core.compatibility import is_sequence, range
class PlotAxes(PlotObject):
def __init__(self, *args, **kwargs):
# initialize style parameter
style = kwargs.pop('style', '').lower()
# allow alias kwargs to override style kwarg
if kwargs.pop('none', None) is not None:
style = 'none'
if kwargs.pop('frame', None) is not None:
style = 'frame'
if kwargs.pop('box', None) is not None:
style = 'box'
if kwargs.pop('ordinate', None) is not None:
style = 'ordinate'
if style in ['', 'ordinate']:
self._render_object = PlotAxesOrdinate(self)
elif style in ['frame', 'box']:
self._render_object = PlotAxesFrame(self)
elif style in ['none']:
self._render_object = None
else:
raise ValueError(("Unrecognized axes style %s.") % (style))
# initialize stride parameter
stride = kwargs.pop('stride', 0.25)
try:
stride = eval(stride)
except TypeError:
pass
if is_sequence(stride):
if len(stride) != 3:
raise ValueError("length should be equal to 3")
self._stride = stride
else:
self._stride = [stride, stride, stride]
self._tick_length = float(kwargs.pop('tick_length', 0.1))
# setup bounding box and ticks
self._origin = [0, 0, 0]
self.reset_bounding_box()
def flexible_boolean(input, default):
if input in [True, False]:
return input
if input in ['f', 'F', 'false', 'False']:
return False
if input in ['t', 'T', 'true', 'True']:
return True
return default
# initialize remaining parameters
self.visible = flexible_boolean(kwargs.pop('visible', ''), True)
self._overlay = flexible_boolean(kwargs.pop('overlay', ''), True)
self._colored = flexible_boolean(kwargs.pop('colored', ''), False)
self._label_axes = flexible_boolean(
kwargs.pop('label_axes', ''), False)
self._label_ticks = flexible_boolean(
kwargs.pop('label_ticks', ''), True)
# setup label font
self.font_face = kwargs.pop('font_face', 'Arial')
self.font_size = kwargs.pop('font_size', 28)
# this is also used to reinit the
# font on window close/reopen
self.reset_resources()
def reset_resources(self):
self.label_font = None
def reset_bounding_box(self):
self._bounding_box = [[None, None], [None, None], [None, None]]
self._axis_ticks = [[], [], []]
def draw(self):
if self._render_object:
glPushAttrib(GL_ENABLE_BIT | GL_POLYGON_BIT | GL_DEPTH_BUFFER_BIT)
if self._overlay:
glDisable(GL_DEPTH_TEST)
self._render_object.draw()
glPopAttrib()
def adjust_bounds(self, child_bounds):
b = self._bounding_box
c = child_bounds
for i in [0, 1, 2]:
if abs(c[i][0]) is S.Infinity or abs(c[i][1]) is S.Infinity:
continue
b[i][0] = [min([b[i][0], c[i][0]]), c[i][0]][b[i][0] is None]
b[i][1] = [max([b[i][1], c[i][1]]), c[i][1]][b[i][1] is None]
self._recalculate_axis_ticks(i)
def _recalculate_axis_ticks(self, axis):
b = self._bounding_box
if b[axis][0] is None or b[axis][1] is None:
self._axis_ticks[axis] = []
else:
self._axis_ticks[axis] = strided_range(b[axis][0], b[axis][1],
self._stride[axis])
def toggle_visible(self):
self.visible = not self.visible
def toggle_colors(self):
self._colored = not self._colored
class PlotAxesBase(PlotObject):
def __init__(self, parent_axes):
self._p = parent_axes
def draw(self):
color = [([0.2, 0.1, 0.3], [0.2, 0.1, 0.3], [0.2, 0.1, 0.3]),
([0.9, 0.3, 0.5], [0.5, 1.0, 0.5], [0.3, 0.3, 0.9])][self._p._colored]
self.draw_background(color)
self.draw_axis(2, color[2])
self.draw_axis(1, color[1])
self.draw_axis(0, color[0])
def draw_background(self, color):
pass # optional
def draw_axis(self, axis, color):
raise NotImplementedError()
def draw_text(self, text, position, color, scale=1.0):
if len(color) == 3:
color = (color[0], color[1], color[2], 1.0)
if self._p.label_font is None:
self._p.label_font = font.load(self._p.font_face,
self._p.font_size,
bold=True, italic=False)
label = font.Text(self._p.label_font, text,
color=color,
valign=font.Text.BASELINE,
halign=font.Text.CENTER)
glPushMatrix()
glTranslatef(*position)
billboard_matrix()
scale_factor = 0.005 * scale
glScalef(scale_factor, scale_factor, scale_factor)
glColor4f(0, 0, 0, 0)
label.draw()
glPopMatrix()
def draw_line(self, v, color):
o = self._p._origin
glBegin(GL_LINES)
glColor3f(*color)
glVertex3f(v[0][0] + o[0], v[0][1] + o[1], v[0][2] + o[2])
glVertex3f(v[1][0] + o[0], v[1][1] + o[1], v[1][2] + o[2])
glEnd()
class PlotAxesOrdinate(PlotAxesBase):
def __init__(self, parent_axes):
super(PlotAxesOrdinate, self).__init__(parent_axes)
def draw_axis(self, axis, color):
ticks = self._p._axis_ticks[axis]
radius = self._p._tick_length / 2.0
if len(ticks) < 2:
return
# calculate the vector for this axis
axis_lines = [[0, 0, 0], [0, 0, 0]]
axis_lines[0][axis], axis_lines[1][axis] = ticks[0], ticks[-1]
axis_vector = vec_sub(axis_lines[1], axis_lines[0])
# calculate angle to the z direction vector
pos_z = get_direction_vectors()[2]
d = abs(dot_product(axis_vector, pos_z))
d = d / vec_mag(axis_vector)
# don't draw labels if we're looking down the axis
labels_visible = abs(d - 1.0) > 0.02
# draw the ticks and labels
for tick in ticks:
self.draw_tick_line(axis, color, radius, tick, labels_visible)
# draw the axis line and labels
self.draw_axis_line(axis, color, ticks[0], ticks[-1], labels_visible)
def draw_axis_line(self, axis, color, a_min, a_max, labels_visible):
axis_line = [[0, 0, 0], [0, 0, 0]]
axis_line[0][axis], axis_line[1][axis] = a_min, a_max
self.draw_line(axis_line, color)
if labels_visible:
self.draw_axis_line_labels(axis, color, axis_line)
def draw_axis_line_labels(self, axis, color, axis_line):
if not self._p._label_axes:
return
axis_labels = [axis_line[0][::], axis_line[1][::]]
axis_labels[0][axis] -= 0.3
axis_labels[1][axis] += 0.3
a_str = ['X', 'Y', 'Z'][axis]
self.draw_text("-" + a_str, axis_labels[0], color)
self.draw_text("+" + a_str, axis_labels[1], color)
def draw_tick_line(self, axis, color, radius, tick, labels_visible):
tick_axis = {0: 1, 1: 0, 2: 1}[axis]
tick_line = [[0, 0, 0], [0, 0, 0]]
tick_line[0][axis] = tick_line[1][axis] = tick
tick_line[0][tick_axis], tick_line[1][tick_axis] = -radius, radius
self.draw_line(tick_line, color)
if labels_visible:
self.draw_tick_line_label(axis, color, radius, tick)
def draw_tick_line_label(self, axis, color, radius, tick):
if not self._p._label_axes:
return
tick_label_vector = [0, 0, 0]
tick_label_vector[axis] = tick
tick_label_vector[{0: 1, 1: 0, 2: 1}[axis]] = [-1, 1, 1][
axis] * radius * 3.5
self.draw_text(str(tick), tick_label_vector, color, scale=0.5)
class PlotAxesFrame(PlotAxesBase):
def __init__(self, parent_axes):
super(PlotAxesFrame, self).__init__(parent_axes)
def draw_background(self, color):
pass
def draw_axis(self, axis, color):
raise NotImplementedError()
| bsd-3-clause |
shinsterneck/pdns | regression-tests.api/runtests.py | 6 | 7928 | #!/usr/bin/env python
#
# Shell-script style.
from __future__ import print_function
import os
import requests
import shutil
import subprocess
import sys
import tempfile
import time
try:
raw_input
except NameError:
raw_input = input
MYSQL_DB='pdnsapi'
MYSQL_USER='root'
MYSQL_HOST=os.environ.get('MYSQL_HOST', 'localhost')
MYSQL_PASSWD=''
PGSQL_DB='pdnsapi'
SQLITE_DB = 'pdns.sqlite3'
LMDB_DB = 'pdns.lmdb'
WEBPORT = 5556
DNSPORT = 5300
APIKEY = '1234567890abcdefghijklmnopq-key'
WEBPASSWORD = 'something'
PDNSUTIL_CMD = [os.environ.get("PDNSUTIL", "../pdns/pdnsutil"), "--config-dir=."]
ZONES = ["example.com", "powerdnssec.org", "cryptokeys.org"]
ZONE_DIR = "../regression-tests/zones/"
AUTH_MYSQL_TPL = """
# Generated by runtests.py
launch=gmysql
gmysql-dnssec=on
gmysql-dbname="""+MYSQL_DB+"""
gmysql-user="""+MYSQL_USER+"""
gmysql-host="""+MYSQL_HOST+"""
gmysql-password="""+MYSQL_PASSWD+"""
"""
AUTH_PGSQL_TPL = """
# Generated by runtests.py
launch=gpgsql
gpgsql-dnssec=on
gpgsql-dbname="""+PGSQL_DB+"""
# on conflict is available in pg 9.5 and up
gpgsql-set-tsig-key-query=insert into tsigkeys (name,algorithm,secret) values($1,$2,$3) on conflict(name, algorithm) do update set secret=Excluded.secret
"""
AUTH_SQLITE_TPL = """
# Generated by runtests.py
launch=gsqlite3
gsqlite3-dnssec=on
gsqlite3-database="""+SQLITE_DB+"""
"""
AUTH_LMDB_TPL = """
# Generated by runtests.py
launch=lmdb
lmdb-filename="""+LMDB_DB+"""
"""
AUTH_COMMON_TPL = """
module-dir=../regression-tests/modules
default-soa-edit=INCEPTION-INCREMENT
launch+=bind
bind-config=bindbackend.conf
loglevel=5
"""
BINDBACKEND_CONF_TPL = """
# Generated by runtests.py
"""
ACL_LIST_TPL = """
# Generated by runtests.py
# local host
127.0.0.1
::1
"""
REC_EXAMPLE_COM_CONF_TPL = """
# Generated by runtests.py
auth-zones+=example.com=../regression-tests/zones/example.com
"""
REC_CONF_TPL = """
# Generated by runtests.py
auth-zones=
forward-zones=
forward-zones-recurse=
allow-from-file=acl.list
api-config-dir=%(conf_dir)s
include-dir=%(conf_dir)s
"""
def ensure_empty_dir(name):
if os.path.exists(name):
shutil.rmtree(name)
os.mkdir(name)
def format_call_args(cmd):
return "$ '%s'" % ("' '".join(cmd))
def run_check_call(cmd, *args, **kwargs):
print(format_call_args(cmd))
subprocess.check_call(cmd, *args, **kwargs)
wait = ('--wait' in sys.argv)
if wait:
sys.argv.remove('--wait')
tests = [opt for opt in sys.argv if opt.startswith('--tests=')]
if tests:
for opt in tests:
sys.argv.remove(opt)
tests = [opt.split('=', 1)[1] for opt in tests]
daemon = (len(sys.argv) >= 2) and sys.argv[1] or None
backend = (len(sys.argv) == 3) and sys.argv[2] or 'gsqlite3'
if daemon not in ('authoritative', 'recursor') or backend not in ('gmysql', 'gpgsql', 'gsqlite3', 'lmdb'):
print("Usage: ./runtests (authoritative|recursor) [gmysql|gpgsql|gsqlite3|lmdb]")
sys.exit(2)
daemon = sys.argv[1]
pdns_server = os.environ.get("PDNSSERVER", "../pdns/pdns_server")
pdns_recursor = os.environ.get("PDNSRECURSOR", "../pdns/recursordist/pdns_recursor")
common_args = [
"--daemon=no", "--socket-dir=.", "--config-dir=.",
"--local-address=127.0.0.1", "--local-port="+str(DNSPORT),
"--webserver=yes", "--webserver-port="+str(WEBPORT), "--webserver-address=127.0.0.1",
"--webserver-password="+WEBPASSWORD,
"--api-key="+APIKEY
]
# Take sdig if it exists (recursor in travis), otherwise build it from Authoritative source.
sdig = os.environ.get("SDIG", "")
if sdig:
sdig = os.path.abspath(sdig)
if not sdig or not os.path.exists(sdig):
run_check_call(["make", "-C", "../pdns", "sdig"])
sdig = "../pdns/sdig"
if daemon == 'authoritative':
zone2sql = os.environ.get("ZONE2SQL", "../pdns/zone2sql")
# Prepare mysql DB with some zones.
if backend == 'gmysql':
subprocess.call(["mysqladmin", "--user=" + MYSQL_USER, "--password=" + MYSQL_PASSWD, "--host=" + MYSQL_HOST, "--force", "drop", MYSQL_DB])
run_check_call(["mysqladmin", "--user=" + MYSQL_USER, "--password=" + MYSQL_PASSWD, "--host=" + MYSQL_HOST, "create", MYSQL_DB])
with open('../modules/gmysqlbackend/schema.mysql.sql', 'r') as schema_file:
run_check_call(["mysql", "--user=" + MYSQL_USER, "--password=" + MYSQL_PASSWD, "--host=" + MYSQL_HOST, MYSQL_DB], stdin=schema_file)
with open('pdns.conf', 'w') as pdns_conf:
pdns_conf.write(AUTH_MYSQL_TPL + AUTH_COMMON_TPL)
# Prepare pgsql DB with some zones.
elif backend == 'gpgsql':
subprocess.call(["dropdb", PGSQL_DB])
subprocess.check_call(["createdb", PGSQL_DB])
with open('../modules/gpgsqlbackend/schema.pgsql.sql', 'r') as schema_file:
subprocess.check_call(["psql", PGSQL_DB], stdin=schema_file)
with open('pdns.conf', 'w') as pdns_conf:
pdns_conf.write(AUTH_PGSQL_TPL + AUTH_COMMON_TPL)
# Prepare sqlite DB with some zones.
elif backend == 'gsqlite3':
subprocess.call("rm -f " + SQLITE_DB + "*", shell=True)
with open('../modules/gsqlite3backend/schema.sqlite3.sql', 'r') as schema_file:
run_check_call(["sqlite3", SQLITE_DB], stdin=schema_file)
with open('pdns.conf', 'w') as pdns_conf:
pdns_conf.write(AUTH_SQLITE_TPL + AUTH_COMMON_TPL)
# Prepare lmdb DB with some zones.
elif backend == 'lmdb':
subprocess.call("rm -f " + LMDB_DB + "*", shell=True)
with open('pdns.conf', 'w') as pdns_conf:
pdns_conf.write(AUTH_LMDB_TPL + AUTH_COMMON_TPL)
with open('bindbackend.conf', 'w') as bindbackend_conf:
bindbackend_conf.write(BINDBACKEND_CONF_TPL)
for zone in ZONES:
run_check_call(PDNSUTIL_CMD + ["load-zone", zone, ZONE_DIR+zone])
run_check_call(PDNSUTIL_CMD + ["secure-zone", "powerdnssec.org"])
servercmd = [pdns_server] + common_args + ["--no-shuffle", "--dnsupdate=yes", "--cache-ttl=0", "--api=yes"]
else:
conf_dir = 'rec-conf.d'
ensure_empty_dir(conf_dir)
with open('acl.list', 'w') as acl_list:
acl_list.write(ACL_LIST_TPL)
with open('recursor.conf', 'w') as recursor_conf:
recursor_conf.write(REC_CONF_TPL % locals())
with open(conf_dir+'/example.com..conf', 'w') as conf_file:
conf_file.write(REC_EXAMPLE_COM_CONF_TPL)
servercmd = [pdns_recursor] + common_args
# Now run pdns and the tests.
print("Launching server...")
print(format_call_args(servercmd))
serverproc = subprocess.Popen(servercmd, close_fds=True)
print("Waiting for webserver port to become available...")
available = False
for try_number in range(0, 10):
try:
res = requests.get('http://127.0.0.1:%s/' % WEBPORT)
available = True
break
except:
time.sleep(0.5)
if not available:
print("Webserver port not reachable after 10 tries, giving up.")
serverproc.terminate()
serverproc.wait()
sys.exit(2)
print("Query for example.com/A to create statistic data...")
run_check_call([sdig, "127.0.0.1", str(DNSPORT), "example.com", "A"])
print("Running tests...")
returncode = 0
test_env = {}
test_env.update(os.environ)
test_env.update({
'WEBPASSWORD': WEBPASSWORD,
'WEBPORT': str(WEBPORT),
'APIKEY': APIKEY,
'DAEMON': daemon,
'BACKEND': backend,
'MYSQL_DB': MYSQL_DB,
'MYSQL_USER': MYSQL_USER,
'MYSQL_HOST': MYSQL_HOST,
'MYSQL_PASSWD': MYSQL_PASSWD,
'PGSQL_DB': PGSQL_DB,
'SQLITE_DB': SQLITE_DB,
'LMDB_DB': LMDB_DB,
'PDNSUTIL_CMD': ' '.join(PDNSUTIL_CMD),
'SDIG': sdig,
'DNSPORT': str(DNSPORT)
})
try:
print("")
run_check_call(["nosetests", "--with-xunit", "-v"] + tests, env=test_env)
except subprocess.CalledProcessError as ex:
returncode = ex.returncode
finally:
if wait:
print("Waiting as requested, press ENTER to stop.")
raw_input()
serverproc.terminate()
serverproc.wait()
sys.exit(returncode)
| gpl-2.0 |
Nitaco/ansible | lib/ansible/modules/windows/win_region.py | 52 | 3103 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Ansible, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
module: win_region
version_added: "2.3"
short_description: Set the region and format settings
description:
- Set the location settings of a Windows Server.
- Set the format settings of a Windows Server.
- Set the unicode language settings of a Windows Server.
- Copy across these settings to the default profile.
options:
location:
description:
- The location to set for the current user, see
U(https://msdn.microsoft.com/en-us/library/dd374073.aspx)
for a list of GeoIDs you can use and what location it relates to.
This needs to be set if C(format) or C(unicode_language) is not
set.
format:
description:
- The language format to set for the current user, see
U(https://msdn.microsoft.com/en-us/library/system.globalization.cultureinfo.aspx)
for a list of culture names to use. This needs to be set if
C(location) or C(unicode_language) is not set.
unicode_language:
description:
- The unicode language format to set for all users, see
U(https://msdn.microsoft.com/en-us/library/system.globalization.cultureinfo.aspx)
for a list of culture names to use. This needs to be set if
C(location) or C(format) is not set. After setting this
value a reboot is required for it to take effect.
copy_settings:
description:
- This will copy the current format and location values to new user
profiles and the welcome screen. This will only run if
C(location), C(format) or C(unicode_language) has resulted in a
change. If this process runs then it will always result in a
change.
type: bool
default: 'no'
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
# Set the region format to English United States
- win_region:
format: en-US
# Set the region format to English Australia and copy settings to new profiles
- win_region:
format: en-AU
copy_settings: yes
# Set the unicode language to English Great Britain, reboot if required
- win_region:
unicode_language: en-GB
register: result
- win_reboot:
when: result.restart_required
# Set the location to United States
- win_region:
location: 244
# Set format, location and unicode to English Australia and copy settings, reboot if required
- win_region:
location: 12
format: en-AU
unicode_language: en-AU
register: result
- win_reboot:
when: result.restart_required
'''
RETURN = r'''
restart_required:
description: Whether a reboot is required for the change to take effect
returned: success
type: boolean
sample: True
'''
| gpl-3.0 |
nkatre/php-buildpack | extensions/sessions/extension.py | 9 | 4668 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Session Config Extension
Configures redis or memcached for session sharing
"""
from extension_helpers import PHPExtensionHelper
class BaseSetup(object):
def __init__(self, ctx, info):
self._ctx = ctx
self._info = info
self.creds = self._info.get('credentials', {})
def session_store_key(self):
key_name = self.DEFAULT_SESSION_STORE_TRIGGER
if self.CUSTOM_SESSION_STORE_KEY_NAME in self._ctx:
key_name = self._ctx[self.CUSTOM_SESSION_STORE_KEY_NAME]
return key_name
def custom_config_php_ini(self, php_ini):
pass
class RedisSetup(BaseSetup):
DEFAULT_SESSION_STORE_TRIGGER = 'redis-sessions'
CUSTOM_SESSION_STORE_KEY_NAME = 'REDIS_SESSION_STORE_SERVICE_NAME'
EXTENSION_NAME = 'redis'
def __init__(self, ctx, info):
BaseSetup.__init__(self, ctx, info)
def session_save_path(self):
return "tcp://%s:%s?auth=%s" % (
self.creds.get('hostname',
self.creds.get('host', 'not-found')),
self.creds.get('port', 'not-found'),
self.creds.get('password', ''))
class MemcachedSetup(BaseSetup):
DEFAULT_SESSION_STORE_TRIGGER = 'memcached-sessions'
CUSTOM_SESSION_STORE_KEY_NAME = 'MEMCACHED_SESSION_STORE_SERVICE_NAME'
EXTENSION_NAME = 'memcached'
def __init__(self, ctx, info):
BaseSetup.__init__(self, ctx, info)
def session_save_path(self):
return 'PERSISTENT=app_sessions %s' % self.creds.get('servers',
'not-found')
def custom_config_php_ini(self, php_ini):
php_ini.append_lines([
'memcached.sess_binary=On\n',
'memcached.use_sasl=On\n',
'memcached.sess_sasl_username=%s\n' % self.creds.get('username',
''),
'memcached.sess_sasl_password=%s\n' % self.creds.get('password', '')
])
class SessionStoreConfig(PHPExtensionHelper):
def __init__(self, ctx):
PHPExtensionHelper.__init__(self, ctx)
self.service = None
def _should_compile(self):
if self.service is None:
self.service = self._load_session()
return self.service is not None
def _load_session(self):
# load search keys
session_types = [
RedisSetup,
MemcachedSetup
]
# search for an appropriately name session store
vcap_services = self._ctx.get('VCAP_SERVICES', {})
for provider, services in vcap_services.iteritems():
for service in services:
service_name = service.get('name', '')
for session_type in session_types:
session = session_type(self._ctx, service)
if service_name.find(session.session_store_key()) != -1:
return session
def _configure(self):
# load the PHP extension that provides session save handler
if self.service is not None:
self._ctx.get('PHP_EXTENSIONS',
[]).append(self.service.EXTENSION_NAME)
def _compile(self, install):
# modify php.ini to contain the right session config
self.load_config()
self._php_ini.update_lines(
'^session\.name = JSESSIONID$',
'session.name = PHPSESSIONID')
self._php_ini.update_lines(
'^session\.save_handler = files$',
'session.save_handler = %s' % self.service.EXTENSION_NAME)
self._php_ini.update_lines(
'^session\.save_path = "@{TMPDIR}"$',
'session.save_path = "%s"' % self.service.session_save_path())
self.service.custom_config_php_ini(self._php_ini)
self._php_ini.save(self._php_ini_path)
SessionStoreConfig.register(__name__)
| apache-2.0 |
arifsetiawan/edx-platform | lms/djangoapps/staticbook/views.py | 91 | 6351 | """
Views for serving static textbooks.
"""
from django.contrib.auth.decorators import login_required
from django.http import Http404
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.annotator_token import retrieve_token
from courseware.access import has_access
from courseware.courses import get_course_with_access
from notes.utils import notes_enabled_for_course
from static_replace import replace_static_urls
@login_required
def index(request, course_id, book_index, page=None):
"""
Serve static image-based textbooks.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.textbooks[book_index]
table_of_contents = textbook.table_of_contents
if page is None:
page = textbook.start_page
return render_to_response(
'staticbook.html',
{
'book_index': book_index, 'page': int(page),
'course': course,
'book_url': textbook.book_url,
'table_of_contents': table_of_contents,
'start_page': textbook.start_page,
'end_page': textbook.end_page,
'staff_access': staff_access,
},
)
def remap_static_url(original_url, course):
"""Remap a URL in the ways the course requires."""
# Ick: this should be possible without having to quote and unquote the URL...
input_url = "'" + original_url + "'"
output_url = replace_static_urls(
input_url,
getattr(course, 'data_dir', None),
course_id=course.id,
static_asset_path=course.static_asset_path
)
# strip off the quotes again...
return output_url[1:-1]
@login_required
def pdf_index(request, course_id, book_index, chapter=None, page=None):
"""
Display a PDF textbook.
course_id: course for which to display text. The course should have
"pdf_textbooks" property defined.
book index: zero-based index of which PDF textbook to display.
chapter: (optional) one-based index into the chapter array of textbook PDFs to display.
Defaults to first chapter. Specifying this assumes that there are separate PDFs for
each chapter in a textbook.
page: (optional) one-based page number to display within the PDF. Defaults to first page.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.pdf_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.pdf_textbooks[book_index]
viewer_params = '&file='
current_url = ''
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
viewer_params += textbook['url']
current_url = textbook['url']
# then remap all the chapter URLs as well, if they are provided.
current_chapter = None
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
if chapter is not None:
current_chapter = textbook['chapters'][int(chapter) - 1]
else:
current_chapter = textbook['chapters'][0]
viewer_params += current_chapter['url']
current_url = current_chapter['url']
viewer_params += '#zoom=page-fit&disableRange=true'
if page is not None:
viewer_params += '&page={}'.format(page)
if request.GET.get('viewer', '') == 'true':
template = 'pdf_viewer.html'
else:
template = 'static_pdfbook.html'
return render_to_response(
template,
{
'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'page': page,
'viewer_params': viewer_params,
'current_chapter': current_chapter,
'staff_access': staff_access,
'current_url': current_url,
},
)
@login_required
def html_index(request, course_id, book_index, chapter=None):
"""
Display an HTML textbook.
course_id: course for which to display text. The course should have
"html_textbooks" property defined.
book index: zero-based index of which HTML textbook to display.
chapter: (optional) one-based index into the chapter array of textbook HTML files to display.
Defaults to first chapter. Specifying this assumes that there are separate HTML files for
each chapter in a textbook.
"""
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
course = get_course_with_access(request.user, 'load', course_key)
staff_access = bool(has_access(request.user, 'staff', course))
notes_enabled = notes_enabled_for_course(course)
book_index = int(book_index)
if book_index < 0 or book_index >= len(course.html_textbooks):
raise Http404("Invalid book index value: {0}".format(book_index))
textbook = course.html_textbooks[book_index]
if 'url' in textbook:
textbook['url'] = remap_static_url(textbook['url'], course)
# then remap all the chapter URLs as well, if they are provided.
if 'chapters' in textbook:
for entry in textbook['chapters']:
entry['url'] = remap_static_url(entry['url'], course)
student = request.user
return render_to_response(
'static_htmlbook.html',
{
'book_index': book_index,
'course': course,
'textbook': textbook,
'chapter': chapter,
'student': student,
'staff_access': staff_access,
'notes_enabled': notes_enabled,
'storage': course.annotation_storage_url,
'token': retrieve_token(student.email, course.annotation_token_secret),
},
)
| agpl-3.0 |
seshin/namebench | libnamebench/reporter.py | 173 | 16737 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Report generation class."""
import csv
import datetime
import operator
import os.path
import platform
# external dependencies (from third_party)
import jinja2
import simplejson
import addr_util
import charts
import nameserver
import nameserver_list
import url_map
import util
# Only bother showing a percentage if we have this many tests.
MIN_RELEVANT_COUNT = 50
class ReportGenerator(object):
"""Generate reports - ASCII, HTML, etc."""
def __init__(self, config, nameservers, results, index=None, geodata=None,
status_callback=None):
"""Constructor.
Args:
config: A dictionary of configuration information.
nameservers: A list of nameserver objects to include in the report.
results: A dictionary of results from Benchmark.Run()
index: A dictionary of results for index hosts.
geodata: A dictionary of geographic information.
status_callback: where to send msg() calls.
"""
self.nameservers = nameservers
self.results = results
self.index = index
self.config = config
self.geodata = geodata
self.status_callback = status_callback
self.cached_averages = {}
self.cached_summary = None
def msg(self, msg, **kwargs):
if self.status_callback:
self.status_callback(msg, **kwargs)
def ComputeAverages(self):
"""Process all runs for all hosts, yielding an average for each host."""
if len(self.results) in self.cached_averages:
return self.cached_averages[len(self.results)]
records = []
for ns in self.results:
if ns.is_disabled or ns.is_hidden:
continue
failure_count = 0
nx_count = 0
run_averages = []
for test_run in self.results[ns]:
# x: record, req_type, duration, response
total_count = len(test_run)
failure_count += len([x for x in test_run if not x[3]])
nx_count += len([x for x in test_run if x[3] and not x[3].answer])
duration = sum([x[2] for x in test_run])
run_averages.append(duration / len(test_run))
# This appears to be a safe use of averaging averages
overall_average = util.CalculateListAverage(run_averages)
(fastest, slowest) = self.FastestAndSlowestDurationForNameServer(ns)
records.append((ns, overall_average, run_averages, fastest, slowest,
failure_count, nx_count, total_count))
self.cached_averages[len(self.results)] = records
return self.cached_averages[len(self.results)]
def FastestAndSlowestDurationForNameServer(self, ns):
"""For a given nameserver, find the fastest/slowest non-error durations."""
fastest_duration = 2**32
slowest_duration = -1
durations = []
for test_run_results in self.results[ns]:
for (unused_host, unused_type, duration, response, unused_error) in test_run_results:
durations.append(duration)
if response and response.answer:
if duration < fastest_duration:
fastest_duration = duration
if duration > slowest_duration:
slowest_duration = duration
# If we have no error-free durations, settle for anything.
if fastest_duration == 2**32:
fastest_duration = min(durations)
if slowest_duration == -1:
slowest_duration = max(durations)
return (fastest_duration, slowest_duration)
def FastestNameServerResult(self):
"""Process all runs for all hosts, yielding an average for each host."""
# TODO(tstromberg): This should not count queries which failed.
fastest = [(ns, self.FastestAndSlowestDurationForNameServer(ns)[0]) for ns in self.results]
return sorted(fastest, key=operator.itemgetter(1))
def BestOverallNameServer(self):
"""Return the best nameserver we found."""
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
hosts = [x[0] for x in sorted_averages]
for host in hosts:
if not host.is_failure_prone and not host.is_disabled:
return host
# return something if none of them are good.
return hosts[0]
def NearestNameServers(self, count=2):
"""Return the nameservers with the least latency."""
min_responses = sorted(self.FastestNameServerResult(),
key=operator.itemgetter(1))
return [x[0] for x in min_responses if not x.is_disabled][0:count]
def _LowestLatencyAsciiChart(self):
"""Return a simple set of tuples to generate an ASCII chart from."""
fastest = self.FastestNameServerResult()
slowest_result = fastest[-1][1]
chart = []
for (ns, duration) in fastest:
textbar = util.DrawTextBar(duration, slowest_result)
chart.append((ns.name, textbar, duration))
return chart
def _MeanRequestAsciiChart(self):
"""Creates an ASCII Chart of Mean Response Time."""
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
max_result = sorted_averages[-1][1]
chart = []
for result in sorted_averages:
(ns, overall_mean) = result[0:2]
textbar = util.DrawTextBar(overall_mean, max_result)
chart.append((ns.name, textbar, overall_mean))
return chart
def CreateReport(self, format='ascii', output_fp=None, csv_path=None,
sharing_url=None, sharing_state=None):
"""Create a Report in a given format.
Args:
format: string (ascii, html, etc.) which defines what template to load.
output_fp: A File object to send the output to (optional)
csv_path: A string pathname to the CSV output to link to (optional)
sharing_url: A string URL where the results have been shared to. (optional)
sharing_state: A string showing what the shared result state is (optional)
Returns:
A rendered template (string)
"""
# First generate all of the charts necessary.
if format == 'ascii':
lowest_latency = self._LowestLatencyAsciiChart()
mean_duration = self._MeanRequestAsciiChart()
else:
lowest_latency = None
mean_duration = None
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
runs_data = [(x[0].name, x[2]) for x in sorted_averages]
mean_duration_url = charts.PerRunDurationBarGraph(runs_data)
min_duration_url = charts.MinimumDurationBarGraph(self.FastestNameServerResult())
distribution_url_200 = charts.DistributionLineGraph(self.DigestedResults(),
scale=200)
distribution_url = charts.DistributionLineGraph(self.DigestedResults(),
scale=self.config.timeout * 1000)
# Now generate all of the required textual information.
ns_summary = self._GenerateNameServerSummary()
best_ns = self.BestOverallNameServer()
recommended = [ns_summary[0]]
for row in sorted(ns_summary, key=operator.itemgetter('duration_min')):
if row['ip'] != ns_summary[0]['ip']:
recommended.append(row)
if len(recommended) == 3:
break
compare_title = 'Undecided'
compare_subtitle = 'Not enough servers to compare.'
compare_reference = None
for ns_record in ns_summary:
if ns_record.get('is_reference'):
if ns_record == ns_summary[0]:
compare_reference = ns_record
compare_title = 'N/A'
compare_subtitle = ''
elif len(ns_record['durations'][0]) >= MIN_RELEVANT_COUNT:
compare_reference = ns_record
compare_title = '%0.1f%%' % ns_summary[0]['diff']
compare_subtitle = 'Faster'
else:
compare_subtitle = 'Too few tests (needs %s)' % (MIN_RELEVANT_COUNT)
break
# Fragile, makes assumption about the CSV being in the same path as the HTML file
if csv_path:
csv_link = os.path.basename(csv_path)
else:
csv_link = None
template_name = '%s.tmpl' % format
template_path = util.FindDataFile(os.path.join('templates', template_name))
filtered_config = self.FilteredConfig()
template_dir = os.path.dirname(template_path)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = env.get_template(template_name)
rendered = template.render(
best_ns=best_ns,
timestamp=datetime.datetime.now(),
lowest_latency=lowest_latency,
version=self.config.version,
compare_subtitle=compare_subtitle,
compare_title=compare_title,
compare_reference=compare_reference,
sharing_url=sharing_url,
sharing_state=sharing_state,
config=filtered_config,
mean_duration=mean_duration,
ns_summary=ns_summary,
mean_duration_url=mean_duration_url,
min_duration_url=min_duration_url,
distribution_url=distribution_url,
distribution_url_200=distribution_url_200,
recommended=recommended,
csv_link=csv_link
)
if output_fp:
output_fp.write(rendered)
output_fp.close()
else:
return rendered
def FilteredConfig(self):
"""Generate a watered down config listing for our report."""
keys = [x for x in dir(self.config) if not x.startswith('_') and x not in ('config', 'site_url')]
config_items = []
for key in keys:
value = getattr(self.config, key)
# > values are ConfigParser internals. None values are just noise.
if isinstance(value, int) or isinstance(value, float) or isinstance(value, str):
config_items.append((key, value))
return sorted(config_items)
def DigestedResults(self):
"""Return a tuple of nameserver and all associated durations."""
duration_data = []
for ns in self.results:
durations = []
for test_run_results in self.results[ns]:
durations += [x[2] for x in test_run_results]
duration_data.append((ns, durations))
return duration_data
def _GenerateNameServerSummary(self):
if self.cached_summary:
return self.cached_summary
nsdata = {}
sorted_averages = sorted(self.ComputeAverages(), key=operator.itemgetter(1))
placed_at = -1
fastest = {}
fastest_normal = {}
reference = {}
# Fill in basic information for all nameservers, even those without scores.
fake_position = 1000
for ns in sorted(self.nameservers.visible_servers, key=operator.attrgetter('check_average')):
if ns.is_hidden:
continue
fake_position += 1
nsdata[ns] = {
'ip': ns.ip,
'name': ns.name,
'hostname': ns.hostname,
'version': ns.version,
'node_ids': list(ns.node_ids),
'sys_position': ns.system_position,
'is_failure_prone': ns.is_failure_prone,
'duration_min': float(ns.fastest_check_duration),
'is_reference': False,
'is_disabled': ns.is_disabled,
'check_average': ns.check_average,
'error_count': ns.error_count,
'timeout_count': ns.timeout_count,
'notes': url_map.CreateNoteUrlTuples(ns.notes),
'position': fake_position
}
# Fill the scores in.
for (ns, unused_avg, run_averages, fastest, slowest, unused_failures, nx_count, unused_total) in sorted_averages:
placed_at += 1
durations = []
for _ in self.results[ns]:
durations.append([x[2] for x in self.results[ns][0]])
nsdata[ns].update({
'position': placed_at,
'overall_average': util.CalculateListAverage(run_averages),
'averages': run_averages,
'duration_min': float(fastest),
'duration_max': slowest,
'nx_count': nx_count,
'durations': durations,
'index': self._GenerateIndexSummary(ns),
})
# Determine which nameserver to refer to for improvement scoring
if not ns.is_disabled:
if ns.system_position == 0:
reference = ns
elif not fastest_normal and not ns.HasTag('preferred'):
fastest_normal = ns
# If no reference was found, use the fastest non-global nameserver record.
if not reference:
if fastest_normal:
reference = fastest_normal
else:
# The second ns.
if len(sorted_averages) > 1:
reference = sorted_averages[1][0]
# Update the improvement scores for each nameserver.
if reference:
for ns in nsdata:
if nsdata[ns]['ip'] != nsdata[reference]['ip']:
if 'overall_average' in nsdata[ns]:
nsdata[ns]['diff'] = ((nsdata[reference]['overall_average'] /
nsdata[ns]['overall_average']) - 1) * 100
else:
nsdata[ns]['is_reference'] = True
self.cached_summary = sorted(nsdata.values(), key=operator.itemgetter('position'))
return self.cached_summary
def _GenerateIndexSummary(self, ns):
# Get the meat out of the index data.
index = []
if ns in self.index:
for host, req_type, duration, response, unused_x in self.index[ns]:
answer_count, ttl = self._ResponseToCountTtlText(response)[0:2]
index.append((host, req_type, duration, answer_count, ttl,
nameserver.ResponseToAscii(response)))
return index
def _GetPlatform(self):
my_platform = platform.system()
if my_platform == 'Darwin':
if os.path.exists('/usr/sbin/sw_vers') or os.path.exists('/usr/sbin/system_profiler'):
my_platform = 'Mac OS X'
if my_platform == 'Linux':
distro = platform.dist()[0]
if distro:
my_platform = 'Linux (%s)' % distro
return my_platform
def _CreateSharingData(self):
config = dict(self.FilteredConfig())
config['platform'] = self._GetPlatform()
# Purge sensitive information (be aggressive!)
purged_rows = []
for row in self._GenerateNameServerSummary():
# This will be our censored record.
p = dict(row)
p['notes'] = []
for note in row['notes']:
p['notes'].append({'text': addr_util.MaskStringWithIPs(note['text']), 'url': note['url']})
p['ip'], p['hostname'], p['name'] = addr_util.MaskPrivateHost(row['ip'], row['hostname'], row['name'])
if (addr_util.IsPrivateIP(row['ip']) or addr_util.IsLoopbackIP(row['ip'])
or addr_util.IsPrivateHostname(row['hostname'])):
p['node_ids'] = []
p['version'] = None
purged_rows.append(p)
return {'config': config, 'nameservers': purged_rows, 'geodata': self.geodata}
def CreateJsonData(self):
sharing_data = self._CreateSharingData()
return simplejson.dumps(sharing_data)
def _ResponseToCountTtlText(self, response):
"""For a given DNS response, parse the most important details out.
Args:
response: DNS response
Returns:
tuple of (answer_count, ttl, answer_text)
"""
answer_text = ''
answer_count = -1
ttl = -1
if response:
if response.answer:
answer_count = len(response.answer)
ttl = response.answer[0].ttl
answer_text = nameserver.ResponseToAscii(response)
return (answer_count, ttl, answer_text)
def SaveResultsToCsv(self, filename):
"""Write out a CSV file with detailed results on each request.
Args:
filename: full path on where to save results (string)
Sample output:
nameserver, test_number, test, type, duration, answer_count, ttl
"""
self.msg('Opening %s for write' % filename, debug=True)
csv_file = open(filename, 'w')
output = csv.writer(csv_file)
output.writerow(['IP', 'Name', 'Test_Num', 'Record',
'Record_Type', 'Duration', 'TTL', 'Answer_Count',
'Response'])
for ns in self.results:
self.msg('Saving detailed data for %s' % ns, debug=True)
for (test_run, test_results) in enumerate(self.results[ns]):
for (record, req_type, duration, response, error_msg) in test_results:
(answer_count, ttl, answer_text) = self._ResponseToCountTtlText(response)
output.writerow([ns.ip, ns.name, test_run, record, req_type, duration,
ttl, answer_count, answer_text, error_msg])
csv_file.close()
self.msg('%s saved.' % filename, debug=True)
| apache-2.0 |
karanisverma/flasktest | lib/werkzeug/http.py | 317 | 33404 | # -*- coding: utf-8 -*-
"""
werkzeug.http
~~~~~~~~~~~~~
Werkzeug comes with a bunch of utilities that help Werkzeug to deal with
HTTP data. Most of the classes and functions provided by this module are
used by the wrappers, but they are useful on their own, too, especially if
the response and request objects are not used.
This covers some of the more HTTP centric features of WSGI, some other
utilities such as cookie handling are documented in the `werkzeug.utils`
module.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
from time import time, gmtime
try:
from email.utils import parsedate_tz
except ImportError: # pragma: no cover
from email.Utils import parsedate_tz
try:
from urllib2 import parse_http_list as _parse_list_header
except ImportError: # pragma: no cover
from urllib.request import parse_http_list as _parse_list_header
from datetime import datetime, timedelta
from hashlib import md5
import base64
from werkzeug._internal import _cookie_quote, _make_cookie_domain, \
_cookie_parse_impl
from werkzeug._compat import to_unicode, iteritems, text_type, \
string_types, try_coerce_native, to_bytes, PY2, \
integer_types
# incorrect
_cookie_charset = 'latin1'
_accept_re = re.compile(r'([^\s;,]+)(?:[^,]*?;\s*q=(\d*(?:\.\d+)?))?')
_token_chars = frozenset("!#$%&'*+-.0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
'^_`abcdefghijklmnopqrstuvwxyz|~')
_etag_re = re.compile(r'([Ww]/)?(?:"(.*?)"|(.*?))(?:\s*,\s*|$)')
_unsafe_header_chars = set('()<>@,;:\"/[]?={} \t')
_quoted_string_re = r'"[^"\\]*(?:\\.[^"\\]*)*"'
_option_header_piece_re = re.compile(r';\s*(%s|[^\s;=]+)\s*(?:=\s*(%s|[^;]+))?\s*' %
(_quoted_string_re, _quoted_string_re))
_entity_headers = frozenset([
'allow', 'content-encoding', 'content-language', 'content-length',
'content-location', 'content-md5', 'content-range', 'content-type',
'expires', 'last-modified'
])
_hop_by_hop_headers = frozenset([
'connection', 'keep-alive', 'proxy-authenticate',
'proxy-authorization', 'te', 'trailer', 'transfer-encoding',
'upgrade'
])
HTTP_STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used', # see RFC 3229
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required', # unused
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', # see RFC 2324
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
428: 'Precondition Required', # see RFC 6585
429: 'Too Many Requests',
431: 'Request Header Fields Too Large',
449: 'Retry With', # proprietary MS extension
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended'
}
def wsgi_to_bytes(data):
"""coerce wsgi unicode represented bytes to real ones
"""
if isinstance(data, bytes):
return data
return data.encode('latin1') #XXX: utf8 fallback?
def bytes_to_wsgi(data):
assert isinstance(data, bytes), 'data must be bytes'
if isinstance(data, str):
return data
else:
return data.decode('latin1')
def quote_header_value(value, extra_chars='', allow_token=True):
"""Quote a header value if necessary.
.. versionadded:: 0.5
:param value: the value to quote.
:param extra_chars: a list of extra characters to skip quoting.
:param allow_token: if this is enabled token values are returned
unchanged.
"""
if isinstance(value, bytes):
value = bytes_to_wsgi(value)
value = str(value)
if allow_token:
token_chars = _token_chars | set(extra_chars)
if set(value).issubset(token_chars):
return value
return '"%s"' % value.replace('\\', '\\\\').replace('"', '\\"')
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
.. versionadded:: 0.5
:param value: the header value to unquote.
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append('%s=%s' % (key, quote_header_value(value)))
return '; '.join(segments)
def dump_header(iterable, allow_token=True):
"""Dump an HTTP header again. This is the reversal of
:func:`parse_list_header`, :func:`parse_set_header` and
:func:`parse_dict_header`. This also quotes strings that include an
equals sign unless you pass it as dict of key, value pairs.
>>> dump_header({'foo': 'bar baz'})
'foo="bar baz"'
>>> dump_header(('foo', 'bar baz'))
'foo, "bar baz"'
:param iterable: the iterable or dict of values to quote.
:param allow_token: if set to `False` tokens as values are disallowed.
See :func:`quote_header_value` for more details.
"""
if isinstance(iterable, dict):
items = []
for key, value in iteritems(iterable):
if value is None:
items.append(key)
else:
items.append('%s=%s' % (
key,
quote_header_value(value, allow_token=allow_token)
))
else:
items = [quote_header_value(x, allow_token=allow_token)
for x in iterable]
return ', '.join(items)
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
def parse_dict_header(value, cls=dict):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict (or any other mapping object created from
the type with a dict like interface provided by the `cls` arugment):
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
.. versionchanged:: 0.9
Added support for `cls` argument.
:param value: a string with a dict header.
:param cls: callable to use for storage of parsed results.
:return: an instance of `cls`
"""
result = cls()
if not isinstance(value, text_type):
#XXX: validate
value = bytes_to_wsgi(value)
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
def parse_options_header(value):
"""Parse a ``Content-Type`` like header into a tuple with the content
type and the options:
>>> parse_options_header('text/html; charset=utf8')
('text/html', {'charset': 'utf8'})
This should not be used to parse ``Cache-Control`` like headers that use
a slightly different format. For these headers use the
:func:`parse_dict_header` function.
.. versionadded:: 0.5
:param value: the header to parse.
:return: (str, options)
"""
def _tokenize(string):
for match in _option_header_piece_re.finditer(string):
key, value = match.groups()
key = unquote_header_value(key)
if value is not None:
value = unquote_header_value(value, key == 'filename')
yield key, value
if not value:
return '', {}
parts = _tokenize(';' + value)
name = next(parts)[0]
extra = dict(parts)
return name, extra
def parse_accept_header(value, cls=None):
"""Parses an HTTP Accept-* header. This does not implement a complete
valid algorithm but one that supports at least value and quality
extraction.
Returns a new :class:`Accept` object (basically a list of ``(value, quality)``
tuples sorted by the quality with some additional accessor methods).
The second parameter can be a subclass of :class:`Accept` that is created
with the parsed values and returned.
:param value: the accept header string to be parsed.
:param cls: the wrapper class for the return value (can be
:class:`Accept` or a subclass thereof)
:return: an instance of `cls`.
"""
if cls is None:
cls = Accept
if not value:
return cls(None)
result = []
for match in _accept_re.finditer(value):
quality = match.group(2)
if not quality:
quality = 1
else:
quality = max(min(float(quality), 1), 0)
result.append((match.group(1), quality))
return cls(result)
def parse_cache_control_header(value, on_update=None, cls=None):
"""Parse a cache control header. The RFC differs between response and
request cache control, this method does not. It's your responsibility
to not use the wrong control statements.
.. versionadded:: 0.5
The `cls` was added. If not specified an immutable
:class:`~werkzeug.datastructures.RequestCacheControl` is returned.
:param value: a cache control header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.CacheControl`
object is changed.
:param cls: the class for the returned object. By default
:class:`~werkzeug.datastructures.RequestCacheControl` is used.
:return: a `cls` object.
"""
if cls is None:
cls = RequestCacheControl
if not value:
return cls(None, on_update)
return cls(parse_dict_header(value), on_update)
def parse_set_header(value, on_update=None):
"""Parse a set-like header and return a
:class:`~werkzeug.datastructures.HeaderSet` object:
>>> hs = parse_set_header('token, "quoted value"')
The return value is an object that treats the items case-insensitively
and keeps the order of the items:
>>> 'TOKEN' in hs
True
>>> hs.index('quoted value')
1
>>> hs
HeaderSet(['token', 'quoted value'])
To create a header from the :class:`HeaderSet` again, use the
:func:`dump_header` function.
:param value: a set header to be parsed.
:param on_update: an optional callable that is called every time a
value on the :class:`~werkzeug.datastructures.HeaderSet`
object is changed.
:return: a :class:`~werkzeug.datastructures.HeaderSet`
"""
if not value:
return HeaderSet(None, on_update)
return HeaderSet(parse_list_header(value), on_update)
def parse_authorization_header(value):
"""Parse an HTTP basic/digest authorization header transmitted by the web
browser. The return value is either `None` if the header was invalid or
not given, otherwise an :class:`~werkzeug.datastructures.Authorization`
object.
:param value: the authorization header to parse.
:return: a :class:`~werkzeug.datastructures.Authorization` object or `None`.
"""
if not value:
return
value = wsgi_to_bytes(value)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
return
if auth_type == b'basic':
try:
username, password = base64.b64decode(auth_info).split(b':', 1)
except Exception as e:
return
return Authorization('basic', {'username': bytes_to_wsgi(username),
'password': bytes_to_wsgi(password)})
elif auth_type == b'digest':
auth_map = parse_dict_header(auth_info)
for key in 'username', 'realm', 'nonce', 'uri', 'response':
if not key in auth_map:
return
if 'qop' in auth_map:
if not auth_map.get('nc') or not auth_map.get('cnonce'):
return
return Authorization('digest', auth_map)
def parse_www_authenticate_header(value, on_update=None):
"""Parse an HTTP WWW-Authenticate header into a
:class:`~werkzeug.datastructures.WWWAuthenticate` object.
:param value: a WWW-Authenticate header to parse.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.WWWAuthenticate`
object is changed.
:return: a :class:`~werkzeug.datastructures.WWWAuthenticate` object.
"""
if not value:
return WWWAuthenticate(on_update=on_update)
try:
auth_type, auth_info = value.split(None, 1)
auth_type = auth_type.lower()
except (ValueError, AttributeError):
return WWWAuthenticate(value.strip().lower(), on_update=on_update)
return WWWAuthenticate(auth_type, parse_dict_header(auth_info),
on_update)
def parse_if_range_header(value):
"""Parses an if-range header which can be an etag or a date. Returns
a :class:`~werkzeug.datastructures.IfRange` object.
.. versionadded:: 0.7
"""
if not value:
return IfRange()
date = parse_date(value)
if date is not None:
return IfRange(date=date)
# drop weakness information
return IfRange(unquote_etag(value)[0])
def parse_range_header(value, make_inclusive=True):
"""Parses a range header into a :class:`~werkzeug.datastructures.Range`
object. If the header is missing or malformed `None` is returned.
`ranges` is a list of ``(start, stop)`` tuples where the ranges are
non-inclusive.
.. versionadded:: 0.7
"""
if not value or '=' not in value:
return None
ranges = []
last_end = 0
units, rng = value.split('=', 1)
units = units.strip().lower()
for item in rng.split(','):
item = item.strip()
if '-' not in item:
return None
if item.startswith('-'):
if last_end < 0:
return None
begin = int(item)
end = None
last_end = -1
elif '-' in item:
begin, end = item.split('-', 1)
begin = int(begin)
if begin < last_end or last_end < 0:
return None
if end:
end = int(end) + 1
if begin >= end:
return None
else:
end = None
last_end = end
ranges.append((begin, end))
return Range(units, ranges)
def parse_content_range_header(value, on_update=None):
"""Parses a range header into a
:class:`~werkzeug.datastructures.ContentRange` object or `None` if
parsing is not possible.
.. versionadded:: 0.7
:param value: a content range header to be parsed.
:param on_update: an optional callable that is called every time a value
on the :class:`~werkzeug.datastructures.ContentRange`
object is changed.
"""
if value is None:
return None
try:
units, rangedef = (value or '').strip().split(None, 1)
except ValueError:
return None
if '/' not in rangedef:
return None
rng, length = rangedef.split('/', 1)
if length == '*':
length = None
elif length.isdigit():
length = int(length)
else:
return None
if rng == '*':
return ContentRange(units, None, None, length, on_update=on_update)
elif '-' not in rng:
return None
start, stop = rng.split('-', 1)
try:
start = int(start)
stop = int(stop) + 1
except ValueError:
return None
if is_byte_range_valid(start, stop, length):
return ContentRange(units, start, stop, length, on_update=on_update)
def quote_etag(etag, weak=False):
"""Quote an etag.
:param etag: the etag to quote.
:param weak: set to `True` to tag it "weak".
"""
if '"' in etag:
raise ValueError('invalid etag')
etag = '"%s"' % etag
if weak:
etag = 'w/' + etag
return etag
def unquote_etag(etag):
"""Unquote a single etag:
>>> unquote_etag('w/"bar"')
('bar', True)
>>> unquote_etag('"bar"')
('bar', False)
:param etag: the etag identifier to unquote.
:return: a ``(etag, weak)`` tuple.
"""
if not etag:
return None, None
etag = etag.strip()
weak = False
if etag[:2] in ('w/', 'W/'):
weak = True
etag = etag[2:]
if etag[:1] == etag[-1:] == '"':
etag = etag[1:-1]
return etag, weak
def parse_etags(value):
"""Parse an etag header.
:param value: the tag header to parse
:return: an :class:`~werkzeug.datastructures.ETags` object.
"""
if not value:
return ETags()
strong = []
weak = []
end = len(value)
pos = 0
while pos < end:
match = _etag_re.match(value, pos)
if match is None:
break
is_weak, quoted, raw = match.groups()
if raw == '*':
return ETags(star_tag=True)
elif quoted:
raw = quoted
if is_weak:
weak.append(raw)
else:
strong.append(raw)
pos = match.end()
return ETags(strong, weak)
def generate_etag(data):
"""Generate an etag for some data."""
return md5(data).hexdigest()
def parse_date(value):
"""Parse one of the following date formats into a datetime object:
.. sourcecode:: text
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
If parsing fails the return value is `None`.
:param value: a string with a supported date format.
:return: a :class:`datetime.datetime` object.
"""
if value:
t = parsedate_tz(value.strip())
if t is not None:
try:
year = t[0]
# unfortunately that function does not tell us if two digit
# years were part of the string, or if they were prefixed
# with two zeroes. So what we do is to assume that 69-99
# refer to 1900, and everything below to 2000
if year >= 0 and year <= 68:
year += 2000
elif year >= 69 and year <= 99:
year += 1900
return datetime(*((year,) + t[1:7])) - \
timedelta(seconds=t[-1] or 0)
except (ValueError, OverflowError):
return None
def _dump_date(d, delim):
"""Used for `http_date` and `cookie_date`."""
if d is None:
d = gmtime()
elif isinstance(d, datetime):
d = d.utctimetuple()
elif isinstance(d, (integer_types, float)):
d = gmtime(d)
return '%s, %02d%s%s%s%s %02d:%02d:%02d GMT' % (
('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun')[d.tm_wday],
d.tm_mday, delim,
('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep',
'Oct', 'Nov', 'Dec')[d.tm_mon - 1],
delim, str(d.tm_year), d.tm_hour, d.tm_min, d.tm_sec
)
def cookie_date(expires=None):
"""Formats the time to ensure compatibility with Netscape's cookie
standard.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD-Mon-YYYY HH:MM:SS GMT``.
:param expires: If provided that date is used, otherwise the current.
"""
return _dump_date(expires, '-')
def http_date(timestamp=None):
"""Formats the time to match the RFC1123 date format.
Accepts a floating point number expressed in seconds since the epoch in, a
datetime object or a timetuple. All times in UTC. The :func:`parse_date`
function can be used to parse such a date.
Outputs a string in the format ``Wdy, DD Mon YYYY HH:MM:SS GMT``.
:param timestamp: If provided that date is used, otherwise the current.
"""
return _dump_date(timestamp, ' ')
def is_resource_modified(environ, etag=None, data=None, last_modified=None):
"""Convenience method for conditional requests.
:param environ: the WSGI environment of the request to be checked.
:param etag: the etag for the response for comparison.
:param data: or alternatively the data of the response to automatically
generate an etag using :func:`generate_etag`.
:param last_modified: an optional date of the last modification.
:return: `True` if the resource was modified, otherwise `False`.
"""
if etag is None and data is not None:
etag = generate_etag(data)
elif data is not None:
raise TypeError('both data and etag given')
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
return False
unmodified = False
if isinstance(last_modified, string_types):
last_modified = parse_date(last_modified)
# ensure that microsecond is zero because the HTTP spec does not transmit
# that either and we might have some false positives. See issue #39
if last_modified is not None:
last_modified = last_modified.replace(microsecond=0)
modified_since = parse_date(environ.get('HTTP_IF_MODIFIED_SINCE'))
if modified_since and last_modified and last_modified <= modified_since:
unmodified = True
if etag:
if_none_match = parse_etags(environ.get('HTTP_IF_NONE_MATCH'))
if if_none_match:
unmodified = if_none_match.contains_raw(etag)
return not unmodified
def remove_entity_headers(headers, allowed=('expires', 'content-location')):
"""Remove all entity headers from a list or :class:`Headers` object. This
operation works in-place. `Expires` and `Content-Location` headers are
by default not removed. The reason for this is :rfc:`2616` section
10.3.5 which specifies some entity headers that should be sent.
.. versionchanged:: 0.5
added `allowed` parameter.
:param headers: a list or :class:`Headers` object.
:param allowed: a list of headers that should still be allowed even though
they are entity headers.
"""
allowed = set(x.lower() for x in allowed)
headers[:] = [(key, value) for key, value in headers if
not is_entity_header(key) or key.lower() in allowed]
def remove_hop_by_hop_headers(headers):
"""Remove all HTTP/1.1 "Hop-by-Hop" headers from a list or
:class:`Headers` object. This operation works in-place.
.. versionadded:: 0.5
:param headers: a list or :class:`Headers` object.
"""
headers[:] = [(key, value) for key, value in headers if
not is_hop_by_hop_header(key)]
def is_entity_header(header):
"""Check if a header is an entity header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _entity_headers
def is_hop_by_hop_header(header):
"""Check if a header is an HTTP/1.1 "Hop-by-Hop" header.
.. versionadded:: 0.5
:param header: the header to test.
:return: `True` if it's an entity header, `False` otherwise.
"""
return header.lower() in _hop_by_hop_headers
def parse_cookie(header, charset='utf-8', errors='replace', cls=None):
"""Parse a cookie. Either from a string or WSGI environ.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
This function now returns a :class:`TypeConversionDict` instead of a
regular dict. The `cls` parameter was added.
:param header: the header to be used to parse the cookie. Alternatively
this can be a WSGI environment.
:param charset: the charset for the cookie values.
:param errors: the error behavior for the charset decoding.
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`TypeConversionDict` is
used.
"""
if isinstance(header, dict):
header = header.get('HTTP_COOKIE', '')
elif header is None:
header = ''
# If the value is an unicode string it's mangled through latin1. This
# is done because on PEP 3333 on Python 3 all headers are assumed latin1
# which however is incorrect for cookies, which are sent in page encoding.
# As a result we
if isinstance(header, text_type):
header = header.encode('latin1', 'replace')
if cls is None:
cls = TypeConversionDict
def _parse_pairs():
for key, val in _cookie_parse_impl(header):
key = to_unicode(key, charset, errors, allow_none_charset=True)
val = to_unicode(val, charset, errors, allow_none_charset=True)
yield try_coerce_native(key), val
return cls(_parse_pairs())
def dump_cookie(key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False,
charset='utf-8', sync_expires=True):
"""Creates a new Set-Cookie header without the ``Set-Cookie`` prefix
The parameters are the same as in the cookie Morsel object in the
Python standard library but it accepts unicode data, too.
On Python 3 the return value of this function will be a unicode
string, on Python 2 it will be a native string. In both cases the
return value is usually restricted to ascii as the vast majority of
values are properly escaped, but that is no guarantee. If a unicode
string is returned it's tunneled through latin1 as required by
PEP 3333.
The return value is not ASCII safe if the key contains unicode
characters. This is technically against the specification but
happens in the wild. It's strongly recommended to not use
non-ASCII values for the keys.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session. Additionally `timedelta` objects
are accepted, too.
:param expires: should be a `datetime` object or unix timestamp.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
:param domain: Use this if you want to set a cross-domain cookie. For
example, ``domain=".example.com"`` will set a cookie
that is readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param secure: The cookie will only be available via HTTPS
:param httponly: disallow JavaScript to access the cookie. This is an
extension to the cookie standard and probably not
supported by all browsers.
:param charset: the encoding for unicode values.
:param sync_expires: automatically set expires if max_age is defined
but expires not.
"""
key = to_bytes(key, charset)
value = to_bytes(value, charset)
if path is not None:
path = iri_to_uri(path, charset)
domain = _make_cookie_domain(domain)
if isinstance(max_age, timedelta):
max_age = (max_age.days * 60 * 60 * 24) + max_age.seconds
if expires is not None:
if not isinstance(expires, string_types):
expires = cookie_date(expires)
elif max_age is not None and sync_expires:
expires = to_bytes(cookie_date(time() + max_age))
buf = [key + b'=' + _cookie_quote(value)]
# XXX: In theory all of these parameters that are not marked with `None`
# should be quoted. Because stdlib did not quote it before I did not
# want to introduce quoting there now.
for k, v, q in ((b'Domain', domain, True),
(b'Expires', expires, False,),
(b'Max-Age', max_age, False),
(b'Secure', secure, None),
(b'HttpOnly', httponly, None),
(b'Path', path, False)):
if q is None:
if v:
buf.append(k)
continue
if v is None:
continue
tmp = bytearray(k)
if not isinstance(v, (bytes, bytearray)):
v = to_bytes(text_type(v), charset)
if q:
v = _cookie_quote(v)
tmp += b'=' + v
buf.append(bytes(tmp))
# The return value will be an incorrectly encoded latin1 header on
# Python 3 for consistency with the headers object and a bytestring
# on Python 2 because that's how the API makes more sense.
rv = b'; '.join(buf)
if not PY2:
rv = rv.decode('latin1')
return rv
def is_byte_range_valid(start, stop, length):
"""Checks if a given byte content range is valid for the given length.
.. versionadded:: 0.7
"""
if (start is None) != (stop is None):
return False
elif start is None:
return length is None or length >= 0
elif length is None:
return 0 <= start < stop
elif start >= stop:
return False
return 0 <= start < length
# circular dependency fun
from werkzeug.datastructures import Accept, HeaderSet, ETags, Authorization, \
WWWAuthenticate, TypeConversionDict, IfRange, Range, ContentRange, \
RequestCacheControl
# DEPRECATED
# backwards compatible imports
from werkzeug.datastructures import MIMEAccept, CharsetAccept, \
LanguageAccept, Headers
from werkzeug.urls import iri_to_uri
| apache-2.0 |
dongjoon-hyun/spark | examples/src/main/python/mllib/gradient_boosting_regression_example.py | 27 | 2404 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Gradient Boosted Trees Regression Example.
"""
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import GradientBoostedTrees, GradientBoostedTreesModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonGradientBoostedTreesRegressionExample")
# $example on$
# Load and parse the data file.
data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a GradientBoostedTrees model.
# Notes: (a) Empty categoricalFeaturesInfo indicates all features are continuous.
# (b) Use more iterations in practice.
model = GradientBoostedTrees.trainRegressor(trainingData,
categoricalFeaturesInfo={}, numIterations=3)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testMSE = labelsAndPredictions.map(lambda lp: (lp[0] - lp[1]) * (lp[0] - lp[1])).sum() /\
float(testData.count())
print('Test Mean Squared Error = ' + str(testMSE))
print('Learned regression GBT model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myGradientBoostingRegressionModel")
sameModel = GradientBoostedTreesModel.load(sc, "target/tmp/myGradientBoostingRegressionModel")
# $example off$
| apache-2.0 |
shl198/Projects | Modules/f07_picard.py | 2 | 5095 | import subprocess,os
def chunk(l,n):
n = max(n,1)
res = [l[i:i+n] for i in range(0,len(l),n)]
return res
def read_group(ID,sample,platform,library,platunit):
return ('@RG\\tID:'+ID+'\\tSM:'+sample+'\\tPL:'+platform+'\\tLB:'+library
+'\\tPU:'+platunit)
#@RG\\tID:chosgroup1\\tSM:sample1\\tPL:illumina\\tLB:lib1\\tPU:unit1
def sam2sortbam(picard,samfiles):
"""
this function change samfile to sorted bam file
"""
sorted_files = []
for sam in samfiles:
sort_bam = sam[:-3] + 'sort.bam'
sorted_files.append(sort_bam)
cmd = ('java -jar {picard} SortSam INPUT={input} OUTPUT={output} '
'SORT_ORDER=coordinate').format(picard=picard,
input=sam,output=sort_bam)
subprocess.check_call(cmd,shell=True)
return sorted_files
def markduplicates(picard,sortBams):
"""
this function mark duplicates of the sorted bam file
"""
if not os.path.exists('tmp'):
os.makedirs('tmp')
dedup_files = []
cmd = ''
for bam in sortBams:
dedup = bam[:-8] + 'dedup.bam'
dedup_files.append(dedup)
cmd = cmd + ('java -Djava.io.tmpdir=tmp -jar {picard} MarkDuplicates I={input} O={output} CREATE_INDEX=true '
'METRICS_FILE=metrics.txt MAX_RECORDS_IN_RAM=8000000 '
'MAX_FILE_HANDLES_FOR_READ_ENDS_MAP=1000 '
'VALIDATION_STRINGENCY=LENIENT && ').format(picard=picard,input=bam,
output=dedup)
subprocess.check_call(cmd[:-3],shell=True)
return dedup_files
def addReadGroup(picard,sortBamFiles,readgroups,batch=1):
"""
This function adds readgroup to a list of samfiles
"""
batch = min(batch,len(sortBamFiles))
subBams = chunk(sortBamFiles,batch)
subGroups = chunk(readgroups,batch)
sortBams = []
for Bams,Groups in zip(subBams,subGroups):
cmd = ''
for sam,rg in zip(Bams,Groups):
sortbam = sam[:-3] + 'adrg.bam'
sortBams.append(sortbam)
readgroup = rg.split('\\t')
ID = readgroup[1][3:-1]
SM = readgroup[2][3:]
PL = 'illumina' #readgroup[3][3:-1]
LB = 'lib20000' #readgroup[4][3:-1]
PU = 'unit1' #readgroup[5][3:]
cmd = cmd + ('java -jar {picard} AddOrReplaceReadGroups I={input} O={sortbam} SO=coordinate '
'RGID={ID} RGSM={SM} RGPL={PL} RGLB={LB} RGPU={PU} & ').format(
picard=picard,input=sam,sortbam=sortbam,ID=ID,SM=SM,PL=PL,LB=LB,
PU=PU)
print cmd
subprocess.call(cmd + 'wait',shell=True)
# # the file name in sortBams is filename.sort.sort.bam, need to change to filename.sort.bam
# final_sort_bams = []
# for bam in sortBams:
# finalSortBam = bam[:-13] + 'sort.bam'
# final_sort_bams.append(finalSortBam)
# os.remove(finalSortBam)
# renameCmd = ('mv {sortBam} {finalSortBam}').format(sortBam=bam,finalSortBam=finalSortBam)
# subprocess.check_call(renameCmd,shell=True)
#
return sortBams
def sam2fastq(picard,samFiles,endType):
"""
This function transfer sam/bam to fastq files
For paired end data, return [['f1.fq.gz','f2.fq.gz'],...]
for single end data, return [['f1.fq.gz'],...]
* samFiles is a list of sam/bam files
* Type: 'single' or 'pair'
"""
fqs = []
cmd = ''
if endType == 'pair':
for sam in samFiles:
fq1 = sam[:-4] + '_1.fq.gz'
fq2 = sam[:-4] + '_2.fq.gz'
fqs.append([fq1,fq2])
sam2fqCmd = ('java -jar {picard} SamToFastq I={input} F={fq1} F2={fq2} '
'VALIDATION_STRINGENCY=LENIENT').format(
picard=picard,input=sam,fq1=fq1,fq2=fq2)
cmd = cmd + sam2fqCmd + ' & '
else:
for sam in samFiles:
fq = sam[:-4] + '.fq.gz'
fqs.append([fq])
sam2fqCmd = ('java -jar {picard} SamToFastq I={input} F={fq} '
'VALIDATION_STRINGENCY=LENIENT').format(
picard=picard,input=sam,fq=fq)
cmd = cmd + sam2fqCmd + ' & '
subprocess.call(cmd + 'wait',shell=True)
return fqs
def sortVCF(picard,vcfFiles,fa_dict,batch=1):
"""This function reorders chromosome in vcf, making it the same as reference
* picard
* vcfFile: str.vcf file name
* fa_dict: reference.dict that GATK used
"""
VCFs = chunk(vcfFiles,batch)
outFiles = []
for vcfs in VCFs:
cmd = ''
outFiles = []
for vcf in vcfs:
outVCF = vcf[:-3] + 'sort.vcf'
outFiles.append(outVCF)
cmd = cmd + ('java -jar {picard} SortVcf I={input} O={output} SEQUENCE_DICTIONARY={fa_dict} & ').format(
picard=picard,input=vcf,output=outVCF,fa_dict=fa_dict)
print cmd
subprocess.call(cmd + 'wait',shell=True)
# remove the original files
for f_in,f_out in zip(vcfs,outFiles):
os.remove(f_in)
os.rename(f_out,f_in)
| mit |
vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/desktop/core/ext-py/cx_Oracle-5.1.2/test/LongVar.py | 34 | 4064 | """Module for testing long and long raw variables."""
import sys
class TestLongVar(BaseTestCase):
def __PerformTest(self, a_Type, a_InputType):
self.cursor.execute("truncate table Test%ss" % a_Type)
longString = ""
for i in range(1, 11):
char = chr(ord('A') + i - 1)
longString += char * 25000
self.cursor.setinputsizes(p_LongString = a_InputType)
if a_Type == "LongRaw" and sys.version_info[0] >= 3:
bindValue = longString.encode("ascii")
else:
bindValue = longString
self.cursor.execute("""
insert into Test%ss (
IntCol,
%sCol
) values (
:p_IntegerValue,
:p_LongString
)""" % (a_Type, a_Type),
p_IntegerValue = i,
p_LongString = bindValue)
self.connection.commit()
self.cursor.setoutputsize(250000, 2)
self.cursor.execute("""
select *
from Test%ss
order by IntCol""" % a_Type)
longString = ""
while 1:
row = self.cursor.fetchone()
if row is None:
break
integerValue, fetchedValue = row
char = chr(ord('A') + integerValue - 1)
longString += char * 25000
if a_Type == "LongRaw" and sys.version_info[0] >= 3:
actualValue = longString.encode("ascii")
else:
actualValue = longString
self.failUnlessEqual(len(fetchedValue), integerValue * 25000)
self.failUnlessEqual(fetchedValue, actualValue)
def testLongs(self):
"test binding and fetching long data"
self.__PerformTest("Long", cx_Oracle.LONG_STRING)
def testLongRaws(self):
"test binding and fetching long raw data"
self.__PerformTest("LongRaw", cx_Oracle.LONG_BINARY)
def testLongCursorDescription(self):
"test cursor description is accurate for longs"
self.cursor.execute("select * from TestLongs")
self.failUnlessEqual(self.cursor.description,
[ ('INTCOL', cx_Oracle.NUMBER, 10, 22, 9, 0, 0),
('LONGCOL', cx_Oracle.LONG_STRING, -1, 0, 0, 0, 0) ])
def testLongRawCursorDescription(self):
"test cursor description is accurate for long raws"
self.cursor.execute("select * from TestLongRaws")
self.failUnlessEqual(self.cursor.description,
[ ('INTCOL', cx_Oracle.NUMBER, 10, 22, 9, 0, 0),
('LONGRAWCOL', cx_Oracle.LONG_BINARY, -1, 0, 0, 0, 0) ])
def testSetOutputSizesAll(self):
"test setoutputsizes is valid (all)"
self.cursor.setoutputsize(25000)
self.cursor.execute("select * from TestLongRaws")
longVar = self.cursor.fetchvars[1]
self.failUnlessEqual(longVar.size, 25000)
self.failUnlessEqual(longVar.bufferSize, 25004)
def testSetOutputSizesWrongColumn(self):
"test setoutputsizes is valid (wrong column)"
self.cursor.setoutputsize(25000, 1)
self.cursor.execute("select * from TestLongs")
longVar = self.cursor.fetchvars[1]
self.failUnlessEqual(longVar.size, 131072)
self.failUnlessEqual(longVar.bufferSize,
131072 * self.connection.maxBytesPerCharacter + 4)
def testSetOutputSizesRightColumn(self):
"test setoutputsizes is valid (right column)"
self.cursor.setoutputsize(35000, 2)
self.cursor.execute("select * from TestLongRaws")
longVar = self.cursor.fetchvars[1]
self.failUnlessEqual(longVar.size, 35000)
self.failUnlessEqual(longVar.bufferSize, 35004)
def testArraySizeTooLarge(self):
"test array size too large generates an exception"
self.cursor.arraysize = 65536
self.failUnlessRaises(ValueError, self.cursor.execute,
"select * from TestLongRaws")
| gpl-2.0 |
chennan47/osf.io | api_tests/nodes/views/test_node_links_list.py | 10 | 49231 | import pytest
from api.base.settings.defaults import API_BASE
from framework.auth.core import Auth
from osf.models import NodeLog
from osf_tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory
)
from rest_framework import exceptions
from tests.utils import assert_latest_log
def node_url_for(n_id):
return '/{}nodes/{}/'.format(API_BASE, n_id)
@pytest.fixture()
def user():
return AuthUserFactory()
@pytest.mark.django_db
class TestNodeLinksList:
@pytest.fixture()
def public_non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def private_pointer_project(self, user):
return ProjectFactory(is_public=False, creator=user)
@pytest.fixture()
def private_project(self, user, private_pointer_project):
private_project = ProjectFactory(is_public=False, creator=user)
private_project.add_pointer(private_pointer_project, auth=Auth(user))
return private_project
@pytest.fixture()
def private_url(self, private_project):
return '/{}nodes/{}/node_links/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_pointer_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_project(self, user, public_pointer_project):
public_project = ProjectFactory(is_public=True, creator=user)
public_project.add_pointer(public_pointer_project, auth=Auth(user))
return public_project
@pytest.fixture()
def public_url(self, public_project):
return '/{}nodes/{}/node_links/'.format(API_BASE, public_project._id)
def test_non_mutational_node_links_list_tests(
self, app, user, public_non_contrib, public_pointer_project,
private_pointer_project, public_url, private_url):
# test_return_embedded_public_node_pointers_logged_out
res = app.get(public_url)
res_json = res.json['data']
assert len(res_json) == 1
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert embedded == public_pointer_project._id
# test_return_embedded_public_node_pointers_logged_in
res = app.get(public_url, auth=public_non_contrib.auth)
res_json = res.json['data']
assert len(res_json) == 1
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert embedded == public_pointer_project._id
# test_return_private_node_pointers_logged_out
res = app.get(private_url, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_return_private_node_pointers_logged_in_contributor
res = app.get(private_url, auth=user.auth)
res_json = res.json['data']
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(res_json) == 1
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert embedded == private_pointer_project._id
# test_return_private_node_pointers_logged_in_non_contributor
res = app.get(
private_url,
auth=public_non_contrib.auth,
expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_node_links_bad_version
url = '{}?version=2.1'.format(public_url)
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert res.json['errors'][0]['detail'] == 'This feature is deprecated as of version 2.1'
def test_deleted_links_not_returned(
self, app, public_url, public_pointer_project):
res = app.get(public_url, expect_errors=True)
res_json = res.json['data']
original_length = len(res_json)
public_pointer_project.is_deleted = True
public_pointer_project.save()
res = app.get(public_url)
res_json = res.json['data']
assert len(res_json) == original_length - 1
@pytest.mark.django_db
class TestNodeLinkCreate:
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(is_public=False, creator=user)
@pytest.fixture()
def private_pointer_project(self, user):
return ProjectFactory(is_public=False, creator=user)
@pytest.fixture()
def private_url(self, user, private_project):
return '/{}nodes/{}/node_links/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_pointer_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_url(self, public_project):
return '/{}nodes/{}/node_links/'.format(API_BASE, public_project._id)
@pytest.fixture()
def fake_url(self):
return '/{}nodes/{}/node_links/'.format(API_BASE, 'rheis')
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def user_two_project(self, user_two):
return ProjectFactory(is_public=True, creator=user_two)
@pytest.fixture()
def user_two_url(self, user_two_project):
return '/{}nodes/{}/node_links/'.format(API_BASE, user_two_project._id)
@pytest.fixture()
def make_payload(self):
# creates a fake payload by default
def payload(id='rheis'):
return {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': id,
'type': 'nodes'
}
}
}
}
}
return payload
def test_add_node_link(
self, app, user, public_pointer_project, public_url):
# test_add_node_link_relationships_is_a_list
data = {
'data': {
'type': 'node_links',
'relationships': [{
'target_node_id': public_pointer_project._id
}]
}
}
res = app.post_json_api(
public_url, data, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
# test_add_node_link_no_relationships
data = {
'data': {
'type': 'node_links',
'attributes': {
'id': public_pointer_project._id
}
}
}
res = app.post_json_api(
public_url, data, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/relationships'
# test_add_node_links_empty_relationships
data = {
'data': {
'type': 'node_links',
'relationships': {}
}
}
res = app.post_json_api(
public_url, data, auth=user.auth,
expect_errors=True)
assert res.json['errors'][0]['source']['pointer'] == '/data/relationships'
# test_add_node_links_no_nodes_key_in_relationships
data = {
'data': {
'type': 'node_links',
'relationships': {
'data': {
'id': public_pointer_project._id,
'type': 'nodes'
}
}
}
}
res = app.post_json_api(
public_url, data, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
# test_add_node_links_no_data_in_relationships
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'id': public_pointer_project._id,
'type': 'nodes'
}
}
}
}
res = app.post_json_api(
public_url, data, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /data.'
# test_add_node_links_no_target_type_in_relationships
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': public_pointer_project._id
}
}
}
}
}
res = app.post_json_api(
public_url, data, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /type.'
# test_add_node_links_no_target_id_in_relationships
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'type': 'nodes'
}
}
}
}
}
res = app.post_json_api(
public_url, data, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/id'
# test_add_node_links_incorrect_target_id_in_relationships
data = {
'data': {
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'type': 'nodes',
'id': '12345'
}
}
}
}
}
res = app.post_json_api(
public_url, data, auth=user.auth,
expect_errors=True)
assert res.status_code == 400
# test_add_node_links_incorrect_target_type_in_relationships
data = {
'data': {
'type': 'nodes',
'relationships': {
'nodes': {
'data': {
'type': 'Incorrect!',
'id': public_pointer_project._id
}
}
}
}
}
res = app.post_json_api(
public_url, data, auth=user.auth,
expect_errors=True)
assert res.status_code == 409
def test_create_node_link_invalid_data(self, app, user, public_url):
res = app.post_json_api(
public_url, 'Incorrect data',
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == exceptions.ParseError.default_detail
def test_creates_node_link_target_not_nested(
self, app, user_two, private_pointer_project, public_url):
payload = {
'data': {
'type': 'node_links',
'id': private_pointer_project._id
}
}
res = app.post_json_api(
public_url, payload,
auth=user_two.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/relationships'
assert res.json['errors'][0]['detail'] == 'Request must include /data/relationships.'
def test_creates_public_node_pointer_logged_out(
self, app, public_url, public_pointer_project, make_payload):
public_payload = make_payload(id=public_pointer_project._id)
res = app.post_json_api(public_url, public_payload, expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
def test_creates_public_node_pointer_logged_in(
self, app, user, user_two, public_project,
public_pointer_project, public_url, make_payload):
public_payload = make_payload(id=public_pointer_project._id)
with assert_latest_log(NodeLog.POINTER_CREATED, public_project):
res = app.post_json_api(
public_url, public_payload,
auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
res = app.post_json_api(public_url, public_payload, auth=user.auth)
assert res.status_code == 201
assert res.content_type == 'application/vnd.api+json'
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert embedded == public_pointer_project._id
def test_creates_private_node_pointer_logged_out(
self, app, private_pointer_project, private_url, make_payload):
private_payload = make_payload(id=private_pointer_project._id)
res = app.post_json_api(
private_url, private_payload,
expect_errors=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
def test_creates_private_node_pointer_logged_in_contributor(
self, app, user, private_pointer_project, private_url, make_payload):
private_payload = make_payload(id=private_pointer_project._id)
res = app.post_json_api(private_url, private_payload, auth=user.auth)
assert res.status_code == 201
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert embedded == private_pointer_project._id
assert res.content_type == 'application/vnd.api+json'
def test_creates_private_node_pointer_logged_in_non_contributor(
self, app, user_two, private_pointer_project, private_url, make_payload):
private_payload = make_payload(id=private_pointer_project._id)
res = app.post_json_api(
private_url, private_payload,
auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_create_node_pointer_non_contributing_node_to_contributing_node(
self, app, user_two, user_two_project, private_url, make_payload):
user_two_payload = make_payload(id=user_two_project._id)
res = app.post_json_api(
private_url, user_two_payload,
auth=user_two.auth,
expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_create_node_pointer_contributing_node_to_non_contributing_node(
self, app, user, user_two_project, private_project,
private_url, make_payload):
with assert_latest_log(NodeLog.POINTER_CREATED, private_project):
user_two_payload = make_payload(id=user_two_project._id)
res = app.post_json_api(
private_url, user_two_payload, auth=user.auth)
assert res.status_code == 201
assert res.content_type == 'application/vnd.api+json'
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert embedded == user_two_project._id
def test_create_pointer_non_contributing_node_to_fake_node(
self, app, user_two, private_url, make_payload):
fake_payload = make_payload()
res = app.post_json_api(
private_url, fake_payload,
auth=user_two.auth,
expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
def test_create_pointer_contributing_node_to_fake_node(
self, app, user, private_url, make_payload):
fake_payload = make_payload()
res = app.post_json_api(
private_url, fake_payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert 'detail' in res.json['errors'][0]
def test_create_fake_node_pointing_to_contributing_node(
self, app, user, user_two, private_pointer_project, fake_url, make_payload):
private_payload = make_payload(id=private_pointer_project._id)
res = app.post_json_api(
fake_url, private_payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 404
assert 'detail' in res.json['errors'][0]
res = app.post_json_api(
fake_url, private_payload,
auth=user_two.auth, expect_errors=True)
assert res.status_code == 404
assert 'detail' in res.json['errors'][0]
def test_create_node_pointer_to_itself(
self, app, user, public_project,
public_url, make_payload):
with assert_latest_log(NodeLog.POINTER_CREATED, public_project):
point_to_itself_payload = make_payload(id=public_project._id)
res = app.post_json_api(
public_url,
point_to_itself_payload,
auth=user.auth)
res_json = res.json['data']
assert res.status_code == 201
assert res.content_type == 'application/vnd.api+json'
embedded = res_json['embeds']['target_node']['data']['id']
assert embedded == public_project._id
def test_create_node_pointer_errors(
self, app, user, user_two, public_project,
user_two_project, public_pointer_project,
public_url, private_url, make_payload):
# test_create_node_pointer_to_itself_unauthorized
point_to_itself_payload = make_payload(id=public_project._id)
res = app.post_json_api(
public_url, point_to_itself_payload,
auth=user_two.auth,
expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_create_node_pointer_already_connected
with assert_latest_log(NodeLog.POINTER_CREATED, public_project):
public_payload = make_payload(id=public_pointer_project._id)
res = app.post_json_api(public_url, public_payload, auth=user.auth)
assert res.status_code == 201
assert res.content_type == 'application/vnd.api+json'
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert embedded == public_pointer_project._id
res = app.post_json_api(
public_url, public_payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert 'detail' in res.json['errors'][0]
# test_create_node_pointer_no_type
payload = {
'data': {
'relationships': {
'nodes': {
'data': {
'id': user_two_project._id,
'type': 'nodes'
}
}
}
}
}
res = app.post_json_api(
private_url, payload,
auth=user.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_create_node_pointer_incorrect_type
payload = {
'data': {
'type': 'Wrong type.',
'relationships': {
'nodes': {
'data': {
'id': user_two_project._id,
'type': 'nodes'
}
}
}
}
}
res = app.post_json_api(
private_url, payload,
auth=user.auth,
expect_errors=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "node_links", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
def test_cannot_add_link_to_registration(
self, app, user, public_pointer_project, make_payload):
registration = RegistrationFactory(creator=user)
url = '/{}nodes/{}/node_links/'.format(API_BASE, registration._id)
payload = make_payload(id=public_pointer_project._id)
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True)
assert res.status_code == 404
@pytest.mark.django_db
class TestNodeLinksBulkCreate:
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(is_public=False, creator=user)
@pytest.fixture()
def private_pointer_project_one(self, user):
return ProjectFactory(is_public=False, creator=user)
@pytest.fixture()
def private_pointer_project_two(self, user):
return ProjectFactory(is_public=False, creator=user)
@pytest.fixture()
def private_url(self, private_project):
return '/{}nodes/{}/node_links/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_pointer_project_one(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_pointer_project_two(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_url(self, public_project):
return '/{}nodes/{}/node_links/'.format(API_BASE, public_project._id)
@pytest.fixture()
def user_two_project(self, user_two):
return ProjectFactory(is_public=True, creator=user_two)
@pytest.fixture()
def user_two_url(self, user_two_project):
return '/{}nodes/{}/node_links/'.format(API_BASE, user_two_project._id)
@pytest.fixture()
def private_payload(
self, private_pointer_project_one, private_pointer_project_two):
return {
'data': [{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': private_pointer_project_one._id,
'type': 'nodes'
}
}
}
},
{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': private_pointer_project_two._id,
'type': 'nodes'
}
}
}
}]
}
@pytest.fixture()
def public_payload(
self, public_pointer_project_one, public_pointer_project_two):
return {
'data': [{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': public_pointer_project_one._id,
'type': 'nodes'
}
}
}
},
{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': public_pointer_project_two._id,
'type': 'nodes'
}
}
}
}]
}
@pytest.fixture()
def user_two_payload(self, user_two_project):
return {
'data': [{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': user_two_project._id,
'type': 'nodes'
}
}
}
}]
}
def test_bulk_create_errors(
self, app, user, user_two, public_project, user_two_project,
private_pointer_project_one, public_url, private_url,
public_payload, private_payload, user_two_payload):
# test_bulk_create_node_links_blank_request
res = app.post_json_api(
public_url, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
# test_bulk_creates_pointers_limits
payload = {'data': [public_payload['data'][0]] * 101}
res = app.post_json_api(
public_url, payload,
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
res = app.get(public_url)
assert res.json['data'] == []
# test_bulk_creates_project_target_not_nested
payload = {'data': [{'type': 'node_links',
'target_node_id': private_pointer_project_one._id}]}
res = app.post_json_api(
public_url, payload,
auth=user_two.auth,
expect_errors=True,
bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/relationships'
assert res.json['errors'][0]['detail'] == 'Request must include /data/relationships.'
# test_bulk_creates_public_node_pointers_logged_out
res = app.post_json_api(
public_url, public_payload,
expect_errors=True, bulk=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
res = app.get(public_url)
assert res.json['data'] == []
# test_bulk_creates_public_node_pointer_logged_in_non_contrib
res = app.post_json_api(
public_url, public_payload,
auth=user_two.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
# test_bulk_creates_private_node_pointers_logged_out
res = app.post_json_api(
private_url, private_payload,
expect_errors=True, bulk=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
res = app.get(private_url, auth=user.auth)
assert res.json['data'] == []
# test_bulk_creates_private_node_pointers_logged_in_non_contributor
res = app.post_json_api(
private_url, private_payload,
auth=user_two.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
res = app.get(private_url, auth=user.auth)
assert res.json['data'] == []
# test_bulk_creates_node_pointers_non_contributing_node_to_contributing_node
res = app.post_json_api(
private_url, user_two_payload,
auth=user_two.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_bulk_creates_pointers_non_contributing_node_to_fake_node
fake_payload = {
'data': [{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': 'rheis',
'type': 'nodes'
}
}
}
}]
}
res = app.post_json_api(
private_url, fake_payload,
auth=user_two.auth,
expect_errors=True,
bulk=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_bulk_creates_pointers_contributing_node_to_fake_node
fake_payload = {
'data': [{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'id': 'rheis',
'type': 'nodes'
}
}
}
}]
}
res = app.post_json_api(
private_url, fake_payload,
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert 'detail' in res.json['errors'][0]
# test_bulk_creates_fake_nodes_pointing_to_contributing_node
fake_url = '/{}nodes/{}/node_links/'.format(API_BASE, 'rheis')
res = app.post_json_api(
fake_url, private_payload,
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 404
assert 'detail' in res.json['errors'][0]
res = app.post_json_api(
fake_url, private_payload,
auth=user_two.auth,
expect_errors=True, bulk=True)
assert res.status_code == 404
assert 'detail' in res.json['errors'][0]
# test_bulk_creates_node_pointer_to_itself_unauthorized
point_to_itself_payload = {
'data': [{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'type': 'nodes',
'id': public_project._id
}
}
}
}]
}
res = app.post_json_api(
public_url, point_to_itself_payload,
bulk=True, auth=user_two.auth,
expect_errors=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_bulk_creates_node_pointer_no_type
payload = {
'data': [{
'relationships': {
'nodes': {
'data': {
'type': 'nodes',
'id': user_two_project._id
}
}
}
}]
}
res = app.post_json_api(
private_url, payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
assert res.json['errors'][0]['source']['pointer'] == '/data/0/type'
# test_bulk_creates_node_pointer_incorrect_type
payload = {
'data': [{
'type': 'Wrong type.',
'relationships': {
'nodes': {
'data': {
'type': 'nodes',
'id': user_two_project._id
}
}
}
}]
}
res = app.post_json_api(
private_url, payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
assert res.json['errors'][0]['detail'] == 'This resource has a type of "node_links", but you set the json body\'s type field to "Wrong type.". You probably need to change the type field to match the resource\'s type.'
def test_bulk_creates_public_node_pointer_logged_in_contrib(
self, app, user, public_project,
public_pointer_project_one,
public_pointer_project_two,
public_url, public_payload):
with assert_latest_log(NodeLog.POINTER_CREATED, public_project):
res = app.post_json_api(
public_url, public_payload,
auth=user.auth, bulk=True)
assert res.status_code == 201
assert res.content_type == 'application/vnd.api+json'
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert embedded == public_pointer_project_one._id
embedded = res_json[1]['embeds']['target_node']['data']['id']
assert embedded == public_pointer_project_two._id
def test_bulk_creates_private_node_pointer_logged_in_contributor(
self, app, user, private_project, private_payload,
private_pointer_project_one, private_pointer_project_two,
private_url):
with assert_latest_log(NodeLog.POINTER_CREATED, private_project):
res = app.post_json_api(
private_url, private_payload,
auth=user.auth, bulk=True)
assert res.status_code == 201
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert embedded == private_pointer_project_one._id
embedded = res_json[1]['embeds']['target_node']['data']['id']
assert embedded == private_pointer_project_two._id
assert res.content_type == 'application/vnd.api+json'
def test_bulk_creates_node_pointers_contributing_node_to_non_contributing_node(
self, app, user, private_project, user_two_project,
user_two_payload, private_url):
with assert_latest_log(NodeLog.POINTER_CREATED, private_project):
res = app.post_json_api(
private_url, user_two_payload,
auth=user.auth, bulk=True)
assert res.status_code == 201
assert res.content_type == 'application/vnd.api+json'
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert embedded == user_two_project._id
res = app.get(private_url, auth=user.auth)
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert embedded == user_two_project._id
def test_bulk_creates_node_pointer_to_itself(
self, app, user, public_project, public_url):
with assert_latest_log(NodeLog.POINTER_CREATED, public_project):
point_to_itself_payload = {
'data': [{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'type': 'nodes',
'id': public_project._id
}
}
}
}]
}
res = app.post_json_api(
public_url, point_to_itself_payload,
auth=user.auth, bulk=True)
assert res.status_code == 201
assert res.content_type == 'application/vnd.api+json'
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert embedded == public_project._id
def test_bulk_creates_node_pointer_already_connected(
self, app, user, public_project,
public_pointer_project_one,
public_pointer_project_two,
public_url, public_payload):
with assert_latest_log(NodeLog.POINTER_CREATED, public_project):
res = app.post_json_api(
public_url, public_payload,
auth=user.auth, bulk=True)
assert res.status_code == 201
assert res.content_type == 'application/vnd.api+json'
res_json = res.json['data']
embedded = res_json[0]['embeds']['target_node']['data']['id']
assert embedded == public_pointer_project_one._id
embedded_two = res_json[1]['embeds']['target_node']['data']['id']
assert embedded_two == public_pointer_project_two._id
res = app.post_json_api(
public_url, public_payload,
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert 'Target Node \'{}\' already pointed to by \'{}\'.'.format(
public_pointer_project_one._id,
public_project._id
) in res.json['errors'][0]['detail']
def test_bulk_cannot_add_link_to_registration(
self, app, user, public_pointer_project_one):
registration = RegistrationFactory(creator=user)
url = '/{}nodes/{}/node_links/'.format(API_BASE, registration._id)
payload = {
'data': [{
'type': 'node_links',
'relationships': {
'nodes': {
'data': {
'type': 'nodes',
'id': public_pointer_project_one._id
}
}
}
}]
}
res = app.post_json_api(
url, payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 404
@pytest.mark.django_db
class TestBulkDeleteNodeLinks:
@pytest.fixture()
def non_contrib(self):
return AuthUserFactory()
@pytest.fixture()
def private_project(self, user):
return ProjectFactory(creator=user, is_public=False)
@pytest.fixture()
def private_project_pointer_project_one(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def private_project_pointer_project_two(self, user):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture()
def private_pointer_one(
self, user, private_project,
private_project_pointer_project_one):
return private_project.add_pointer(
private_project_pointer_project_one, auth=Auth(user), save=True)
@pytest.fixture()
def private_pointer_two(
self, user, private_project,
private_project_pointer_project_two):
return private_project.add_pointer(
private_project_pointer_project_two, auth=Auth(user), save=True)
@pytest.fixture()
def private_payload(self, private_pointer_one, private_pointer_two):
return {
'data': [
{'type': 'node_links', 'id': private_pointer_one._id},
{'type': 'node_links', 'id': private_pointer_two._id}
]
}
@pytest.fixture()
def private_url(self, private_project):
return '/{}nodes/{}/node_links/'.format(API_BASE, private_project._id)
@pytest.fixture()
def public_project(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_project_pointer_project_one(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_project_pointer_project_two(self, user):
return ProjectFactory(is_public=True, creator=user)
@pytest.fixture()
def public_pointer_one(
self, user, public_project,
public_project_pointer_project_one):
return public_project.add_pointer(
public_project_pointer_project_one, auth=Auth(user), save=True)
@pytest.fixture()
def public_pointer_two(
self, user, public_project,
public_project_pointer_project_two):
return public_project.add_pointer(
public_project_pointer_project_two, auth=Auth(user), save=True)
@pytest.fixture()
def public_payload(self, public_pointer_one, public_pointer_two):
return {
'data': [
{'type': 'node_links', 'id': public_pointer_one._id},
{'type': 'node_links', 'id': public_pointer_two._id}
]
}
@pytest.fixture()
def public_url(self, public_project):
return '/{}nodes/{}/node_links/'.format(API_BASE, public_project._id)
def test_bulk_delete_errors(
self, app, user, non_contrib, public_project,
public_pointer_one, public_pointer_two,
public_project_pointer_project_one,
public_project_pointer_project_two,
public_url, private_url, public_payload,
private_payload):
# test_bulk_delete_node_links_blank_request
res = app.delete_json_api(
public_url, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
# test_bulk_delete_pointer_limits
res = app.delete_json_api(
public_url,
{'data': [public_payload['data'][0]] * 101},
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Bulk operation limit is 100, got 101.'
assert res.json['errors'][0]['source']['pointer'] == '/data'
# test_bulk_delete_dict_inside_data
res = app.delete_json_api(
public_url,
{'data': {
'id': public_project._id,
'type': 'node_links'
}},
auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Expected a list of items but got type "dict".'
# test_bulk_delete_pointers_no_type
payload = {'data': [
{'id': public_project_pointer_project_one._id},
{'id': public_project_pointer_project_two._id}
]}
res = app.delete_json_api(
public_url, payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/type'
# test_bulk_delete_pointers_incorrect_type
payload = {'data': [
{'id': public_pointer_one._id, 'type': 'Incorrect type.'},
{'id': public_pointer_two._id, 'type': 'Incorrect type.'}
]}
res = app.delete_json_api(
public_url, payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 409
# test_bulk_delete_pointers_no_id
payload = {'data': [
{'type': 'node_links'},
{'type': 'node_links'}
]}
res = app.delete_json_api(
public_url, payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['source']['pointer'] == '/data/id'
# test_bulk_delete_pointers_no_data
res = app.delete_json_api(
public_url, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must contain array of resource identifier objects.'
# test_bulk_delete_pointers_payload_is_empty_dict
res = app.delete_json_api(
public_url, {}, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /data.'
# test_bulk_deletes_public_node_pointers_logged_out
res = app.delete_json_api(
public_url, public_payload,
expect_errors=True, bulk=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
# test_bulk_deletes_public_node_pointers_fails_if_bad_auth
node_count_before = len(public_project.nodes_pointer)
res = app.delete_json_api(
public_url, public_payload,
auth=non_contrib.auth,
expect_errors=True, bulk=True)
# This is could arguably be a 405, but we don't need to go crazy with
# status codes
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
public_project.reload()
assert node_count_before == len(public_project.nodes_pointer)
# test_bulk_deletes_private_node_pointers_logged_in_non_contributor
res = app.delete_json_api(
private_url, private_payload,
auth=non_contrib.auth,
expect_errors=True, bulk=True)
assert res.status_code == 403
assert 'detail' in res.json['errors'][0]
# test_bulk_deletes_private_node_pointers_logged_out
res = app.delete_json_api(
private_url, private_payload,
expect_errors=True, bulk=True)
assert res.status_code == 401
assert 'detail' in res.json['errors'][0]
def test_cannot_delete_if_registration(
self, app, user, public_project, public_payload):
registration = RegistrationFactory(project=public_project)
url = '/{}registrations/{}/node_links/'.format(
API_BASE, registration._id)
res = app.delete_json_api(
url, public_payload, auth=user.auth,
expect_errors=True, bulk=True)
assert res.status_code == 405
def test_bulk_deletes_public_node_pointers_succeeds_as_owner(
self, app, user, public_project, public_url, public_payload):
with assert_latest_log(NodeLog.POINTER_REMOVED, public_project):
node_count_before = len(public_project.nodes_pointer)
res = app.delete_json_api(
public_url, public_payload, auth=user.auth, bulk=True)
public_project.reload()
assert res.status_code == 204
assert node_count_before - 2 == len(public_project.nodes_pointer)
public_project.reload()
def test_bulk_deletes_private_node_pointers_logged_in_contributor(
self, app, user, private_project, private_url, private_payload):
with assert_latest_log(NodeLog.POINTER_REMOVED, private_project):
res = app.delete_json_api(
private_url, private_payload,
auth=user.auth, bulk=True)
private_project.reload() # Update the model to reflect changes made by post request
assert res.status_code == 204
assert len(private_project.nodes_pointer) == 0
def test_return_bulk_deleted_public_node_pointer(
self, app, user, public_project,
public_pointer_one, public_url, public_payload):
with assert_latest_log(NodeLog.POINTER_REMOVED, public_project):
res = app.delete_json_api(
public_url, public_payload, auth=user.auth, bulk=True)
public_project.reload() # Update the model to reflect changes made by post request
assert res.status_code == 204
pointer_url = '/{}nodes/{}/node_links/{}/'.format(
API_BASE, public_project._id, public_pointer_one._id)
# check that deleted pointer can not be returned
res = app.get(pointer_url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
def test_return_bulk_deleted_private_node_pointer(
self, app, user, private_project, private_pointer_one,
private_url, private_payload):
with assert_latest_log(NodeLog.POINTER_REMOVED, private_project):
res = app.delete_json_api(
private_url, private_payload,
auth=user.auth, bulk=True)
private_project.reload() # Update the model to reflect changes made by post request
assert res.status_code == 204
pointer_url = '/{}nodes/{}/node_links/{}/'.format(
API_BASE, private_project._id, private_pointer_one._id)
# check that deleted pointer can not be returned
res = app.get(pointer_url, auth=user.auth, expect_errors=True)
assert res.status_code == 404
# Regression test for https://openscience.atlassian.net/browse/OSF-4322
def test_bulk_delete_link_that_is_not_linked_to_correct_node(
self, app, user, private_url, public_payload):
ProjectFactory(creator=user)
# The node link belongs to a different project
res = app.delete_json_api(
private_url, public_payload, auth=user.auth,
expect_errors=True, bulk=True
)
assert res.status_code == 400
errors = res.json['errors']
assert len(errors) == 1
assert errors[0]['detail'] == 'Node link does not belong to the requested node.'
| apache-2.0 |
jwlawson/tensorflow | tensorflow/python/kernel_tests/split_op_test.py | 19 | 12874 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Split Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
_TEST_DTYPES = (dtypes.float32, dtypes.float64, dtypes.complex64,
dtypes.complex128)
class SplitOpTest(test.TestCase):
def _makeData(self, shape, dtype):
data = np.random.rand(*shape).astype(dtype.as_numpy_dtype)
if dtype.is_complex:
data -= 1j * data
return data
def testShapeInference(self):
model_input = array_ops.placeholder(dtypes.float32, shape=(1, 10))
# check that we fail during static shape inference if sizes are known
with self.assertRaises(ValueError):
# pylint: disable=expression-not-assigned
array_ops.split(model_input, [4], axis=1)[0]
# pylint: enable=expression-not-assigned
model_input = array_ops.placeholder(dtypes.float32)
inp = np.zeros((1, 10))
# check that we still fail at runtime if the shapes were unknown
with self.test_session(use_gpu=True) as sess:
with self.assertRaises(errors_impl.InvalidArgumentError):
sess.run(array_ops.split(model_input, [4]), {model_input: inp})
# test that we can pass a scalar Tensor as num_splits
for axis in [0, -2]:
with self.test_session(use_gpu=True) as sess:
result = sess.run(
array_ops.split(
array_ops.ones([4, 4]),
num_or_size_splits=array_ops.ones([2, 2]).get_shape()[1],
axis=axis))
self.assertEqual(result[0].shape, (2, 4))
self.assertEqual(result[1].shape, (2, 4))
# test that none split dimensions remain, even if we don't know how
# the split_dim will be split, but we do know the axis
result = array_ops.split(
array_ops.ones([5, 2]), array_ops.constant([2, 1, 2]) * 1, axis=0)
self.assertEqual(result[0].shape[1], 2)
self.assertEqual(result[1].shape[1], 2)
self.assertEqual(result[2].shape[1], 2)
model_input2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
result = array_ops.split(model_input2, [2, 2], axis=0)[0]
with self.test_session(use_gpu=True) as sess:
sess.run(result, feed_dict={model_input2: np.ones([4, 2])})
def testFailWithoutExplicitNum(self):
size_splits = array_ops.placeholder(dtype=dtypes.int32, shape=[None])
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
with self.test_session(use_gpu=True) as sess:
with self.assertRaises(ValueError) as context:
sess.run(array_ops.split(value, size_splits), {size_splits: [2, 2, 6]})
self.assertTrue("Cannot infer num from shape" in str(context.exception))
@test_util.run_in_graph_and_eager_modes()
def testExplicitNum(self):
size_splits = array_ops.constant([2, 2, 6], dtype=dtypes.int32)
value = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Eager and Graph modes raise different exceptions
with self.assertRaises((errors_impl.InvalidArgumentError, ValueError)):
array_ops.split(value, size_splits, num=4)
r = self.evaluate(array_ops.split(value, size_splits, num=3))
self.assertAllEqual(r[0], value[0:2])
self.assertAllEqual(r[1], value[2:4])
self.assertAllEqual(r[2], value[4:])
@test_util.run_in_graph_and_eager_modes()
def testListOfScalarTensors(self):
a = math_ops.to_int32(5)
b = math_ops.to_int32(6)
value = np.random.rand(11, 11)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(value, [a, b]))
self.assertAllEqual(result[0], value[0:5, :])
self.assertAllEqual(result[1], value[5:, :])
def _RunAndVerifyVariable(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(1, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(16, 25)
else:
num_split = np.random.randint(2, 8)
size_splits = np.random.randint(2, 8, num_split, dtype=np.int32)
shape[split_dim] = np.sum(size_splits)
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
def _testSpecialCasesVariable(self):
inp = np.random.rand(4, 4).astype("f")
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, [4], 0))
self.assertAllEqual(result[0], inp)
result = self.evaluate(array_ops.split(inp, [-1, 3], 0))
self.assertAllEqual(result[0], inp[0:1, :])
self.assertAllEqual(result[1], inp[1:4, :])
def _testHugeNumberOfTensorsVariable(self, dtype):
num_split = 1000
size_splits = np.random.randint(1, 3, num_split, dtype=np.int32)
shape = [3, np.sum(size_splits)]
split_dim = 1
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(array_ops.split(inp, size_splits, split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
for i in range(num_split):
slices[split_dim] = slice(offset, offset + size_splits[i])
offset += size_splits[i]
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes()
def testSpecialCasesVariable(self):
self._testSpecialCasesVariable()
for dtype in _TEST_DTYPES:
self._testHugeNumberOfTensorsVariable(dtype)
def _testGradientsSimpleVariable(self, dtype):
inp = self._makeData((4, 4), dtype)
with test_util.device(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(inp_tensor, [1, 3], 1)
inp_grads = [
self._makeData((4, 1), dtype), self._makeData((4, 3), dtype)
]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[-1]
result = self.evaluate(grad)
self.assertAllEqual(result[:, 0:1], inp_grads[0])
self.assertAllEqual(result[:, 1:4], inp_grads[1])
def testOutputShape(self):
for axis in [1, -1]:
with self.test_session(use_gpu=True):
tensor = array_ops.placeholder(dtypes.float32, shape=[None, 12])
size_splits = [3, 7, 2]
outputs = array_ops.split(tensor, size_splits, axis)
for i, output in enumerate(outputs):
self.assertEqual(output.get_shape().as_list(), [None, size_splits[i]])
def _compare(self, x, dim, num):
np_ans = np.split(x, num, dim)
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(np_ans))
self.assertEqual(num, len(out))
for i in range(num):
self.assertAllEqual(np_ans[i], out[i])
self.assertShapeEqual(np_ans[i], tf_ans[i])
@test_util.run_in_graph_and_eager_modes()
def testSplitRows(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 0, 4)
@test_util.run_in_graph_and_eager_modes()
def testSplitCols(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((4, 4), dtype)
self._compare(inp, 1, 4)
def _testEmpty(self, x, dim, num, expected_shape):
with test_util.device(use_gpu=True):
tf_ans = array_ops.split(value=x, num_or_size_splits=num, axis=dim)
out = self.evaluate(tf_ans)
self.assertEqual(x.size, 0)
self.assertEqual(len(out), num)
for i in range(num):
self.assertEqual(out[i].shape, expected_shape)
self.assertEqual(expected_shape, tf_ans[i].get_shape())
@test_util.run_in_graph_and_eager_modes()
def testEmpty(self):
# Note: np.split returns a rank-0 empty ndarray
# if the input ndarray is empty.
for dtype in _TEST_DTYPES:
inp = self._makeData((8, 0, 21), dtype)
self._testEmpty(inp, 0, 2, (4, 0, 21))
self._testEmpty(inp, 0, 4, (2, 0, 21))
self._testEmpty(inp, 1, 4, (8, 0, 21))
self._testEmpty(inp, 2, 3, (8, 0, 7))
self._testEmpty(inp, 2, 7, (8, 0, 3))
@test_util.run_in_graph_and_eager_modes()
def testIdentity(self):
for dtype in _TEST_DTYPES:
inp = self._makeData((2, 2, 2), dtype)
self._compare(inp, 0, 1)
self._compare(inp, 1, 1)
self._compare(inp, 2, 1)
@test_util.run_in_graph_and_eager_modes()
def testSplitDim0(self):
for dtype in _TEST_DTYPES:
self._compare(self._makeData((6, 10, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 18), dtype), 0, 3)
self._compare(self._makeData((6, 7, 9), dtype), 0, 3)
def _RunAndVerify(self, dtype, large_num_splits=False):
# Random dims of rank 5
shape = np.random.randint(0, 5, size=5)
split_dim = np.random.randint(-5, 5)
if large_num_splits:
num_split = np.random.randint(9, 15)
else:
num_split = np.random.randint(2, 8)
shape[split_dim] = np.random.randint(2, 5) * num_split
inp = self._makeData(shape, dtype)
with test_util.device(use_gpu=True):
result = self.evaluate(
array_ops.split(
value=inp, num_or_size_splits=num_split, axis=split_dim))
slices = [slice(0, x) for x in shape]
offset = 0
length = shape[split_dim] // num_split
for i in range(num_split):
slices[split_dim] = slice(offset, offset + length)
offset += length
self.assertAllEqual(result[i], inp[slices])
@test_util.run_in_graph_and_eager_modes()
def testRandom(self):
for dtype in _TEST_DTYPES:
for _ in range(5):
self._RunAndVerify(dtype)
self._RunAndVerify(dtype, large_num_splits=True)
self._RunAndVerifyVariable(dtype)
self._RunAndVerifyVariable(dtype, large_num_splits=True)
def _testGradientsSimple(self, dtype):
inp = self._makeData((4, 4), dtype)
with self.test_session(use_gpu=True):
inp_tensor = ops.convert_to_tensor(inp)
s = array_ops.split(value=inp_tensor, num_or_size_splits=4, axis=1)
inp_grads = [self._makeData((4, 1), dtype)for _ in range(4)]
grad_tensors = [constant_op.constant(x) for x in inp_grads]
grad = gradients_impl.gradients(s, [inp_tensor], grad_tensors)[0]
result = grad.eval()
for i in range(4):
self.assertAllEqual(result[:, i:i + 1], inp_grads[i])
def testGradientsAll(self):
for dtype in _TEST_DTYPES:
self._testGradientsSimple(dtype)
self._testGradientsSimpleVariable(dtype)
def testShapeFunctionEdgeCases(self):
# split_dim greater than rank of input.
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=2)
# split dim less than -(rank of input)
with self.assertRaises(ValueError):
array_ops.split(value=[[0, 1], [2, 3]], num_or_size_splits=4, axis=-3)
# num_split does not evenly divide the size in split_dim.
with self.assertRaisesRegexp(ValueError, "should evenly divide"):
array_ops.split(value=[0, 1, 2, 3], num_or_size_splits=3, axis=0)
# Unknown split_dim.
splits = array_ops.split(
value=[[0, 1, 2, 3]],
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual([None, None], s.get_shape().as_list())
# Unknown split_dim and input shape.
splits = array_ops.split(
value=array_ops.placeholder(dtypes.float32),
num_or_size_splits=4,
axis=array_ops.placeholder(dtypes.int32))
for s in splits:
self.assertEqual(None, s.get_shape().ndims)
if __name__ == "__main__":
test.main()
| apache-2.0 |
k3nnyfr/s2a_fr-nsis | s2a/Python/Lib/lib-tk/FileDialog.py | 133 | 8831 | """File selection dialog classes.
Classes:
- FileDialog
- LoadFileDialog
- SaveFileDialog
"""
from Tkinter import *
from Dialog import Dialog
import os
import fnmatch
dialogstates = {}
class FileDialog:
"""Standard file selection dialog -- no checks on selected file.
Usage:
d = FileDialog(master)
fname = d.go(dir_or_file, pattern, default, key)
if fname is None: ...canceled...
else: ...open file...
All arguments to go() are optional.
The 'key' argument specifies a key in the global dictionary
'dialogstates', which keeps track of the values for the directory
and pattern arguments, overriding the values passed in (it does
not keep track of the default argument!). If no key is specified,
the dialog keeps no memory of previous state. Note that memory is
kept even when the dialog is canceled. (All this emulates the
behavior of the Macintosh file selection dialogs.)
"""
title = "File Selection Dialog"
def __init__(self, master, title=None):
if title is None: title = self.title
self.master = master
self.directory = None
self.top = Toplevel(master)
self.top.title(title)
self.top.iconname(title)
self.botframe = Frame(self.top)
self.botframe.pack(side=BOTTOM, fill=X)
self.selection = Entry(self.top)
self.selection.pack(side=BOTTOM, fill=X)
self.selection.bind('<Return>', self.ok_event)
self.filter = Entry(self.top)
self.filter.pack(side=TOP, fill=X)
self.filter.bind('<Return>', self.filter_command)
self.midframe = Frame(self.top)
self.midframe.pack(expand=YES, fill=BOTH)
self.filesbar = Scrollbar(self.midframe)
self.filesbar.pack(side=RIGHT, fill=Y)
self.files = Listbox(self.midframe, exportselection=0,
yscrollcommand=(self.filesbar, 'set'))
self.files.pack(side=RIGHT, expand=YES, fill=BOTH)
btags = self.files.bindtags()
self.files.bindtags(btags[1:] + btags[:1])
self.files.bind('<ButtonRelease-1>', self.files_select_event)
self.files.bind('<Double-ButtonRelease-1>', self.files_double_event)
self.filesbar.config(command=(self.files, 'yview'))
self.dirsbar = Scrollbar(self.midframe)
self.dirsbar.pack(side=LEFT, fill=Y)
self.dirs = Listbox(self.midframe, exportselection=0,
yscrollcommand=(self.dirsbar, 'set'))
self.dirs.pack(side=LEFT, expand=YES, fill=BOTH)
self.dirsbar.config(command=(self.dirs, 'yview'))
btags = self.dirs.bindtags()
self.dirs.bindtags(btags[1:] + btags[:1])
self.dirs.bind('<ButtonRelease-1>', self.dirs_select_event)
self.dirs.bind('<Double-ButtonRelease-1>', self.dirs_double_event)
self.ok_button = Button(self.botframe,
text="OK",
command=self.ok_command)
self.ok_button.pack(side=LEFT)
self.filter_button = Button(self.botframe,
text="Filter",
command=self.filter_command)
self.filter_button.pack(side=LEFT, expand=YES)
self.cancel_button = Button(self.botframe,
text="Cancel",
command=self.cancel_command)
self.cancel_button.pack(side=RIGHT)
self.top.protocol('WM_DELETE_WINDOW', self.cancel_command)
# XXX Are the following okay for a general audience?
self.top.bind('<Alt-w>', self.cancel_command)
self.top.bind('<Alt-W>', self.cancel_command)
def go(self, dir_or_file=os.curdir, pattern="*", default="", key=None):
if key and key in dialogstates:
self.directory, pattern = dialogstates[key]
else:
dir_or_file = os.path.expanduser(dir_or_file)
if os.path.isdir(dir_or_file):
self.directory = dir_or_file
else:
self.directory, default = os.path.split(dir_or_file)
self.set_filter(self.directory, pattern)
self.set_selection(default)
self.filter_command()
self.selection.focus_set()
self.top.wait_visibility() # window needs to be visible for the grab
self.top.grab_set()
self.how = None
self.master.mainloop() # Exited by self.quit(how)
if key:
directory, pattern = self.get_filter()
if self.how:
directory = os.path.dirname(self.how)
dialogstates[key] = directory, pattern
self.top.destroy()
return self.how
def quit(self, how=None):
self.how = how
self.master.quit() # Exit mainloop()
def dirs_double_event(self, event):
self.filter_command()
def dirs_select_event(self, event):
dir, pat = self.get_filter()
subdir = self.dirs.get('active')
dir = os.path.normpath(os.path.join(self.directory, subdir))
self.set_filter(dir, pat)
def files_double_event(self, event):
self.ok_command()
def files_select_event(self, event):
file = self.files.get('active')
self.set_selection(file)
def ok_event(self, event):
self.ok_command()
def ok_command(self):
self.quit(self.get_selection())
def filter_command(self, event=None):
dir, pat = self.get_filter()
try:
names = os.listdir(dir)
except os.error:
self.master.bell()
return
self.directory = dir
self.set_filter(dir, pat)
names.sort()
subdirs = [os.pardir]
matchingfiles = []
for name in names:
fullname = os.path.join(dir, name)
if os.path.isdir(fullname):
subdirs.append(name)
elif fnmatch.fnmatch(name, pat):
matchingfiles.append(name)
self.dirs.delete(0, END)
for name in subdirs:
self.dirs.insert(END, name)
self.files.delete(0, END)
for name in matchingfiles:
self.files.insert(END, name)
head, tail = os.path.split(self.get_selection())
if tail == os.curdir: tail = ''
self.set_selection(tail)
def get_filter(self):
filter = self.filter.get()
filter = os.path.expanduser(filter)
if filter[-1:] == os.sep or os.path.isdir(filter):
filter = os.path.join(filter, "*")
return os.path.split(filter)
def get_selection(self):
file = self.selection.get()
file = os.path.expanduser(file)
return file
def cancel_command(self, event=None):
self.quit()
def set_filter(self, dir, pat):
if not os.path.isabs(dir):
try:
pwd = os.getcwd()
except os.error:
pwd = None
if pwd:
dir = os.path.join(pwd, dir)
dir = os.path.normpath(dir)
self.filter.delete(0, END)
self.filter.insert(END, os.path.join(dir or os.curdir, pat or "*"))
def set_selection(self, file):
self.selection.delete(0, END)
self.selection.insert(END, os.path.join(self.directory, file))
class LoadFileDialog(FileDialog):
"""File selection dialog which checks that the file exists."""
title = "Load File Selection Dialog"
def ok_command(self):
file = self.get_selection()
if not os.path.isfile(file):
self.master.bell()
else:
self.quit(file)
class SaveFileDialog(FileDialog):
"""File selection dialog which checks that the file may be created."""
title = "Save File Selection Dialog"
def ok_command(self):
file = self.get_selection()
if os.path.exists(file):
if os.path.isdir(file):
self.master.bell()
return
d = Dialog(self.top,
title="Overwrite Existing File Question",
text="Overwrite existing file %r?" % (file,),
bitmap='questhead',
default=1,
strings=("Yes", "Cancel"))
if d.num != 0:
return
else:
head, tail = os.path.split(file)
if not os.path.isdir(head):
self.master.bell()
return
self.quit(file)
def test():
"""Simple test program."""
root = Tk()
root.withdraw()
fd = LoadFileDialog(root)
loadfile = fd.go(key="test")
fd = SaveFileDialog(root)
savefile = fd.go(key="test")
print loadfile, savefile
if __name__ == '__main__':
test()
| gpl-3.0 |
darkleons/odoo | addons/base_report_designer/plugin/openerp_report_designer/bin/script/LoginTest.py | 384 | 1320 | #########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
#############################################################################
if __name__<>"package":
from ServerParameter import *
from lib.gui import *
class LoginTest:
def __init__(self):
if not loginstatus:
Change(None)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zfergus2/Seam-Minimization | src/null_space_method.py | 2 | 2891 | # -*- coding: utf-8 -*-
"""
Use Affine null space to preform A Priori Lexicographical Multi-Objective
Optimization.
Written by Zachary Ferguson and Alec Jacobson.
"""
import pdb
import numpy
import scipy.sparse
from affine_null_space import affine_null_space
def NullSpaceMethod(H, f, method="qr", bounds=None):
"""
LEXMIN Solve the multi-objective minimization problem:
min {E1(x), E2(x), ... , Ek(x)}
x
where
Ei = 0.5 * x.T * H[i] * x + x.T * f[i]
and Ei is deemed "more important" than Ei+1 (lexicographical ordering):
https://en.wikipedia.org/wiki/Multi-objective_optimization#A_priori_methods
Inputs:
H - k-cell list of n by n sparse matrices, so that H[i] contains the
quadratic coefficients of the ith energy.
f - k-cell list of n by 1 vectors, so that f[i] contains the linear
coefficients of the ith energy
Outputs:
Z - n by 1 solution vector
"""
# import scipy.io
# scipy.io.savemat( "NullSpaceMethod.mat", { 'H': H, 'f': f } )
# print( "Saved: NullSpaceMethod.mat" )
k = len(H)
assert k > 0
assert k == len(f)
n = H[0].shape[0]
assert n == H[0].shape[1]
assert n == f[0].shape[0]
# Start with "full" search space and 0s as feasible solution
# N = 1;% aka speye(n,n)
N = scipy.sparse.identity(n)
# Z = zeros(n,1);
Z = numpy.zeros(f[0].shape)
# For i in range(k)
for Hi, fi in zip(H, f):
# Original ith energy: 0.5 * x.T * Hi * x + x.T * fi
# Restrict to running affine subspace, by abuse of notation:
# x = N*y + z
# fi = N' * (Hi * Z + fi)
fi = N.T.dot(Hi.dot(Z) + fi)
# Hi = N'*Hi*N
Hi = N.T.dot(Hi.dot(N))
# Sparse QR Factorization
# [Ni, xi] = affine_null_space(Hi,-fi,'Method',null_space_method)
# Ni is the null space of Hi
# xi is a particular solution to Hi * x = fi
Ni, xi = affine_null_space(Hi, -fi, method=method, bounds=bounds)
if(len(xi.shape) < 2):
xi = xi.reshape(-1, 1)
# Update feasible solution
Z = N.dot(xi) + Z
# If Z is fully determined, exit loop early
if(Ni.shape[1] == 0):
break
# Otherwise, N spans the null space of Hi
N = N.dot(Ni)
# Update the bounds
# TODO: Solve for the bounds
if not (bounds is None):
# bounds = (0 - x0 - x1 - ... - xi, 1 - x0 - x1 - ... - xi)
bounds[0] -= xi
bounds[1] -= xi
# (If i<k then) the feasible solution Z is now the unique solution.
# E = numpy.zeros((k, f[0].shape[1]))
# for i in range(k):
# Hi, fi = H[i], f[i]
# # E(i) = 0.5*(Z'*(H{i}*Z)) + Z'*f{i};
# E[i] = (0.5 * (Z.T.dot(Hi.dot(Z))) + Z.T.dot(fi)).diagonal()
# return Z, E
return Z
| mit |
matt-kwong/grpc | tools/run_tests/python_utils/report_utils.py | 11 | 5434 | # Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate XML and HTML test reports."""
from __future__ import print_function
try:
from mako.runtime import Context
from mako.template import Template
from mako import exceptions
except (ImportError):
pass # Mako not installed but it is ok.
import datetime
import os
import string
import xml.etree.cElementTree as ET
import six
def _filter_msg(msg, output_format):
"""Filters out nonprintable and illegal characters from the message."""
if output_format in ['XML', 'HTML']:
# keep whitespaces but remove formfeed and vertical tab characters
# that make XML report unparseable.
filtered_msg = filter(
lambda x: x in string.printable and x != '\f' and x != '\v',
msg.decode('UTF-8', 'ignore'))
if output_format == 'HTML':
filtered_msg = filtered_msg.replace('"', '"')
return filtered_msg
else:
return msg
def new_junit_xml_tree():
return ET.ElementTree(ET.Element('testsuites'))
def render_junit_xml_report(resultset, report_file, suite_package='grpc',
suite_name='tests'):
"""Generate JUnit-like XML report."""
tree = new_junit_xml_tree()
append_junit_xml_results(tree, resultset, suite_package, suite_name, '1')
create_xml_report_file(tree, report_file)
def create_xml_report_file(tree, report_file):
"""Generate JUnit-like report file from xml tree ."""
# ensure the report directory exists
report_dir = os.path.dirname(os.path.abspath(report_file))
if not os.path.exists(report_dir):
os.makedirs(report_dir)
tree.write(report_file, encoding='UTF-8')
def append_junit_xml_results(tree, resultset, suite_package, suite_name, id):
"""Append a JUnit-like XML report tree with test results as a new suite."""
testsuite = ET.SubElement(tree.getroot(), 'testsuite',
id=id, package=suite_package, name=suite_name,
timestamp=datetime.datetime.now().isoformat())
failure_count = 0
error_count = 0
for shortname, results in six.iteritems(resultset):
for result in results:
xml_test = ET.SubElement(testsuite, 'testcase', name=shortname)
if result.elapsed_time:
xml_test.set('time', str(result.elapsed_time))
filtered_msg = _filter_msg(result.message, 'XML')
if result.state == 'FAILED':
ET.SubElement(xml_test, 'failure', message='Failure').text = filtered_msg
failure_count += 1
elif result.state == 'TIMEOUT':
ET.SubElement(xml_test, 'error', message='Timeout').text = filtered_msg
error_count += 1
elif result.state == 'SKIPPED':
ET.SubElement(xml_test, 'skipped', message='Skipped')
testsuite.set('failures', str(failure_count))
testsuite.set('errors', str(error_count))
def render_interop_html_report(
client_langs, server_langs, test_cases, auth_test_cases, http2_cases,
http2_server_cases, resultset,
num_failures, cloud_to_prod, prod_servers, http2_interop):
"""Generate HTML report for interop tests."""
template_file = 'tools/run_tests/interop/interop_html_report.template'
try:
mytemplate = Template(filename=template_file, format_exceptions=True)
except NameError:
print('Mako template is not installed. Skipping HTML report generation.')
return
except IOError as e:
print('Failed to find the template %s: %s' % (template_file, e))
return
sorted_test_cases = sorted(test_cases)
sorted_auth_test_cases = sorted(auth_test_cases)
sorted_http2_cases = sorted(http2_cases)
sorted_http2_server_cases = sorted(http2_server_cases)
sorted_client_langs = sorted(client_langs)
sorted_server_langs = sorted(server_langs)
sorted_prod_servers = sorted(prod_servers)
args = {'client_langs': sorted_client_langs,
'server_langs': sorted_server_langs,
'test_cases': sorted_test_cases,
'auth_test_cases': sorted_auth_test_cases,
'http2_cases': sorted_http2_cases,
'http2_server_cases': sorted_http2_server_cases,
'resultset': resultset,
'num_failures': num_failures,
'cloud_to_prod': cloud_to_prod,
'prod_servers': sorted_prod_servers,
'http2_interop': http2_interop}
html_report_out_dir = 'reports'
if not os.path.exists(html_report_out_dir):
os.mkdir(html_report_out_dir)
html_file_path = os.path.join(html_report_out_dir, 'index.html')
try:
with open(html_file_path, 'w') as output_file:
mytemplate.render_context(Context(output_file, **args))
except:
print(exceptions.text_error_template().render())
raise
def render_perf_profiling_results(output_filepath, profile_names):
with open(output_filepath, 'w') as output_file:
output_file.write('<ul>\n')
for name in profile_names:
output_file.write('<li><a href=%s>%s</a></li>\n' % (name, name))
output_file.write('</ul>\n')
| apache-2.0 |
jonyroda97/redbot-amigosprovaveis | lib/numpy/ma/tests/test_subclassing.py | 14 | 13959 | # pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for MaskedArray & subclassing.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: test_subclassing.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite, assert_raises, dec
from numpy.ma.testutils import assert_equal
from numpy.ma.core import (
array, arange, masked, MaskedArray, masked_array, log, add, hypot,
divide, asarray, asanyarray, nomask
)
# from numpy.ma.core import (
class SubArray(np.ndarray):
# Defines a generic np.ndarray subclass, that stores some metadata
# in the dictionary `info`.
def __new__(cls,arr,info={}):
x = np.asanyarray(arr).view(cls)
x.info = info.copy()
return x
def __array_finalize__(self, obj):
if callable(getattr(super(SubArray, self),
'__array_finalize__', None)):
super(SubArray, self).__array_finalize__(obj)
self.info = getattr(obj, 'info', {}).copy()
return
def __add__(self, other):
result = super(SubArray, self).__add__(other)
result.info['added'] = result.info.get('added', 0) + 1
return result
def __iadd__(self, other):
result = super(SubArray, self).__iadd__(other)
result.info['iadded'] = result.info.get('iadded', 0) + 1
return result
subarray = SubArray
class SubMaskedArray(MaskedArray):
"""Pure subclass of MaskedArray, keeping some info on subclass."""
def __new__(cls, info=None, **kwargs):
obj = super(SubMaskedArray, cls).__new__(cls, **kwargs)
obj._optinfo['info'] = info
return obj
class MSubArray(SubArray, MaskedArray):
def __new__(cls, data, info={}, mask=nomask):
subarr = SubArray(data, info)
_data = MaskedArray.__new__(cls, data=subarr, mask=mask)
_data.info = subarr.info
return _data
def _get_series(self):
_view = self.view(MaskedArray)
_view._sharedmask = False
return _view
_series = property(fget=_get_series)
msubarray = MSubArray
class MMatrix(MaskedArray, np.matrix,):
def __new__(cls, data, mask=nomask):
mat = np.matrix(data)
_data = MaskedArray.__new__(cls, data=mat, mask=mask)
return _data
def __array_finalize__(self, obj):
np.matrix.__array_finalize__(self, obj)
MaskedArray.__array_finalize__(self, obj)
return
def _get_series(self):
_view = self.view(MaskedArray)
_view._sharedmask = False
return _view
_series = property(fget=_get_series)
mmatrix = MMatrix
# Also a subclass that overrides __str__, __repr__ and __setitem__, disallowing
# setting to non-class values (and thus np.ma.core.masked_print_option)
# and overrides __array_wrap__, updating the info dict, to check that this
# doesn't get destroyed by MaskedArray._update_from. But this one also needs
# its own iterator...
class CSAIterator(object):
"""
Flat iterator object that uses its own setter/getter
(works around ndarray.flat not propagating subclass setters/getters
see https://github.com/numpy/numpy/issues/4564)
roughly following MaskedIterator
"""
def __init__(self, a):
self._original = a
self._dataiter = a.view(np.ndarray).flat
def __iter__(self):
return self
def __getitem__(self, indx):
out = self._dataiter.__getitem__(indx)
if not isinstance(out, np.ndarray):
out = out.__array__()
out = out.view(type(self._original))
return out
def __setitem__(self, index, value):
self._dataiter[index] = self._original._validate_input(value)
def __next__(self):
return next(self._dataiter).__array__().view(type(self._original))
next = __next__
class ComplicatedSubArray(SubArray):
def __str__(self):
return 'myprefix {0} mypostfix'.format(self.view(SubArray))
def __repr__(self):
# Return a repr that does not start with 'name('
return '<{0} {1}>'.format(self.__class__.__name__, self)
def _validate_input(self, value):
if not isinstance(value, ComplicatedSubArray):
raise ValueError("Can only set to MySubArray values")
return value
def __setitem__(self, item, value):
# validation ensures direct assignment with ndarray or
# masked_print_option will fail
super(ComplicatedSubArray, self).__setitem__(
item, self._validate_input(value))
def __getitem__(self, item):
# ensure getter returns our own class also for scalars
value = super(ComplicatedSubArray, self).__getitem__(item)
if not isinstance(value, np.ndarray): # scalar
value = value.__array__().view(ComplicatedSubArray)
return value
@property
def flat(self):
return CSAIterator(self)
@flat.setter
def flat(self, value):
y = self.ravel()
y[:] = value
def __array_wrap__(self, obj, context=None):
obj = super(ComplicatedSubArray, self).__array_wrap__(obj, context)
if context is not None and context[0] is np.multiply:
obj.info['multiplied'] = obj.info.get('multiplied', 0) + 1
return obj
class TestSubclassing(TestCase):
# Test suite for masked subclasses of ndarray.
def setUp(self):
x = np.arange(5, dtype='float')
mx = mmatrix(x, mask=[0, 1, 0, 0, 0])
self.data = (x, mx)
def test_data_subclassing(self):
# Tests whether the subclass is kept.
x = np.arange(5)
m = [0, 0, 1, 0, 0]
xsub = SubArray(x)
xmsub = masked_array(xsub, mask=m)
self.assertTrue(isinstance(xmsub, MaskedArray))
assert_equal(xmsub._data, xsub)
self.assertTrue(isinstance(xmsub._data, SubArray))
def test_maskedarray_subclassing(self):
# Tests subclassing MaskedArray
(x, mx) = self.data
self.assertTrue(isinstance(mx._data, np.matrix))
def test_masked_unary_operations(self):
# Tests masked_unary_operation
(x, mx) = self.data
with np.errstate(divide='ignore'):
self.assertTrue(isinstance(log(mx), mmatrix))
assert_equal(log(x), np.log(x))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
(x, mx) = self.data
# Result should be a mmatrix
self.assertTrue(isinstance(add(mx, mx), mmatrix))
self.assertTrue(isinstance(add(mx, x), mmatrix))
# Result should work
assert_equal(add(mx, x), mx+x)
self.assertTrue(isinstance(add(mx, mx)._data, np.matrix))
self.assertTrue(isinstance(add.outer(mx, mx), mmatrix))
self.assertTrue(isinstance(hypot(mx, mx), mmatrix))
self.assertTrue(isinstance(hypot(mx, x), mmatrix))
def test_masked_binary_operations2(self):
# Tests domained_masked_binary_operation
(x, mx) = self.data
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
self.assertTrue(isinstance(divide(mx, mx), mmatrix))
self.assertTrue(isinstance(divide(mx, x), mmatrix))
assert_equal(divide(mx, mx), divide(xmx, xmx))
def test_attributepropagation(self):
x = array(arange(5), mask=[0]+[1]*4)
my = masked_array(subarray(x))
ym = msubarray(x)
#
z = (my+1)
self.assertTrue(isinstance(z, MaskedArray))
self.assertTrue(not isinstance(z, MSubArray))
self.assertTrue(isinstance(z._data, SubArray))
assert_equal(z._data.info, {})
#
z = (ym+1)
self.assertTrue(isinstance(z, MaskedArray))
self.assertTrue(isinstance(z, MSubArray))
self.assertTrue(isinstance(z._data, SubArray))
self.assertTrue(z._data.info['added'] > 0)
# Test that inplace methods from data get used (gh-4617)
ym += 1
self.assertTrue(isinstance(ym, MaskedArray))
self.assertTrue(isinstance(ym, MSubArray))
self.assertTrue(isinstance(ym._data, SubArray))
self.assertTrue(ym._data.info['iadded'] > 0)
#
ym._set_mask([1, 0, 0, 0, 1])
assert_equal(ym._mask, [1, 0, 0, 0, 1])
ym._series._set_mask([0, 0, 0, 0, 1])
assert_equal(ym._mask, [0, 0, 0, 0, 1])
#
xsub = subarray(x, info={'name':'x'})
mxsub = masked_array(xsub)
self.assertTrue(hasattr(mxsub, 'info'))
assert_equal(mxsub.info, xsub.info)
def test_subclasspreservation(self):
# Checks that masked_array(...,subok=True) preserves the class.
x = np.arange(5)
m = [0, 0, 1, 0, 0]
xinfo = [(i, j) for (i, j) in zip(x, m)]
xsub = MSubArray(x, mask=m, info={'xsub':xinfo})
#
mxsub = masked_array(xsub, subok=False)
self.assertTrue(not isinstance(mxsub, MSubArray))
self.assertTrue(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = asarray(xsub)
self.assertTrue(not isinstance(mxsub, MSubArray))
self.assertTrue(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = masked_array(xsub, subok=True)
self.assertTrue(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, xsub._mask)
#
mxsub = asanyarray(xsub)
self.assertTrue(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, m)
def test_subclass_items(self):
"""test that getter and setter go via baseclass"""
x = np.arange(5)
xcsub = ComplicatedSubArray(x)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
# getter should return a ComplicatedSubArray, even for single item
# first check we wrote ComplicatedSubArray correctly
self.assertTrue(isinstance(xcsub[1], ComplicatedSubArray))
self.assertTrue(isinstance(xcsub[1,...], ComplicatedSubArray))
self.assertTrue(isinstance(xcsub[1:4], ComplicatedSubArray))
# now that it propagates inside the MaskedArray
self.assertTrue(isinstance(mxcsub[1], ComplicatedSubArray))
self.assertTrue(isinstance(mxcsub[1,...].data, ComplicatedSubArray))
self.assertTrue(mxcsub[0] is masked)
self.assertTrue(isinstance(mxcsub[0,...].data, ComplicatedSubArray))
self.assertTrue(isinstance(mxcsub[1:4].data, ComplicatedSubArray))
# also for flattened version (which goes via MaskedIterator)
self.assertTrue(isinstance(mxcsub.flat[1].data, ComplicatedSubArray))
self.assertTrue(mxcsub.flat[0] is masked)
self.assertTrue(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray))
# setter should only work with ComplicatedSubArray input
# first check we wrote ComplicatedSubArray correctly
assert_raises(ValueError, xcsub.__setitem__, 1, x[4])
# now that it propagates inside the MaskedArray
assert_raises(ValueError, mxcsub.__setitem__, 1, x[4])
assert_raises(ValueError, mxcsub.__setitem__, slice(1, 4), x[1:4])
mxcsub[1] = xcsub[4]
mxcsub[1:4] = xcsub[1:4]
# also for flattened version (which goes via MaskedIterator)
assert_raises(ValueError, mxcsub.flat.__setitem__, 1, x[4])
assert_raises(ValueError, mxcsub.flat.__setitem__, slice(1, 4), x[1:4])
mxcsub.flat[1] = xcsub[4]
mxcsub.flat[1:4] = xcsub[1:4]
def test_subclass_nomask_items(self):
x = np.arange(5)
xcsub = ComplicatedSubArray(x)
mxcsub_nomask = masked_array(xcsub)
self.assertTrue(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray))
self.assertTrue(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray))
self.assertTrue(isinstance(mxcsub_nomask[1], ComplicatedSubArray))
self.assertTrue(isinstance(mxcsub_nomask[0], ComplicatedSubArray))
def test_subclass_repr(self):
"""test that repr uses the name of the subclass
and 'array' for np.ndarray"""
x = np.arange(5)
mx = masked_array(x, mask=[True, False, True, False, False])
self.assertTrue(repr(mx).startswith('masked_array'))
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
self.assertTrue(repr(mxsub).startswith(
'masked_{0}(data = [-- 1 -- 3 4]'.format(SubArray.__name__)))
def test_subclass_str(self):
"""test str with subclass that has overridden str, setitem"""
# first without override
x = np.arange(5)
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
self.assertTrue(str(mxsub) == '[-- 1 -- 3 4]')
xcsub = ComplicatedSubArray(x)
assert_raises(ValueError, xcsub.__setitem__, 0,
np.ma.core.masked_print_option)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
self.assertTrue(str(mxcsub) == 'myprefix [-- 1 -- 3 4] mypostfix')
def test_pure_subclass_info_preservation(self):
# Test that ufuncs and methods conserve extra information consistently;
# see gh-7122.
arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6])
arr2 = SubMaskedArray(data=[0,1,2,3,4,5])
diff1 = np.subtract(arr1, arr2)
self.assertTrue('info' in diff1._optinfo)
self.assertTrue(diff1._optinfo['info'] == 'test')
diff2 = arr1 - arr2
self.assertTrue('info' in diff2._optinfo)
self.assertTrue(diff2._optinfo['info'] == 'test')
###############################################################################
if __name__ == '__main__':
run_module_suite()
| gpl-3.0 |
hynekcer/django | tests/template_tests/syntax_tests/test_width_ratio.py | 342 | 6095 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from django.utils import six
from ..utils import setup
class WidthRatioTagTests(SimpleTestCase):
libraries = {'custom': 'template_tests.templatetags.custom'}
@setup({'widthratio01': '{% widthratio a b 0 %}'})
def test_widthratio01(self):
output = self.engine.render_to_string('widthratio01', {'a': 50, 'b': 100})
self.assertEqual(output, '0')
@setup({'widthratio02': '{% widthratio a b 100 %}'})
def test_widthratio02(self):
output = self.engine.render_to_string('widthratio02', {'a': 0, 'b': 0})
self.assertEqual(output, '0')
@setup({'widthratio03': '{% widthratio a b 100 %}'})
def test_widthratio03(self):
output = self.engine.render_to_string('widthratio03', {'a': 0, 'b': 100})
self.assertEqual(output, '0')
@setup({'widthratio04': '{% widthratio a b 100 %}'})
def test_widthratio04(self):
output = self.engine.render_to_string('widthratio04', {'a': 50, 'b': 100})
self.assertEqual(output, '50')
@setup({'widthratio05': '{% widthratio a b 100 %}'})
def test_widthratio05(self):
output = self.engine.render_to_string('widthratio05', {'a': 100, 'b': 100})
self.assertEqual(output, '100')
@setup({'widthratio06': '{% widthratio a b 100 %}'})
def test_widthratio06(self):
"""
62.5 should round to 63 on Python 2 and 62 on Python 3
See http://docs.python.org/py3k/whatsnew/3.0.html
"""
output = self.engine.render_to_string('widthratio06', {'a': 50, 'b': 80})
self.assertEqual(output, '62' if six.PY3 else '63')
@setup({'widthratio07': '{% widthratio a b 100 %}'})
def test_widthratio07(self):
"""
71.4 should round to 71
"""
output = self.engine.render_to_string('widthratio07', {'a': 50, 'b': 70})
self.assertEqual(output, '71')
# Raise exception if we don't have 3 args, last one an integer
@setup({'widthratio08': '{% widthratio %}'})
def test_widthratio08(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('widthratio08')
@setup({'widthratio09': '{% widthratio a b %}'})
def test_widthratio09(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('widthratio09', {'a': 50, 'b': 100})
@setup({'widthratio10': '{% widthratio a b 100.0 %}'})
def test_widthratio10(self):
output = self.engine.render_to_string('widthratio10', {'a': 50, 'b': 100})
self.assertEqual(output, '50')
@setup({'widthratio11': '{% widthratio a b c %}'})
def test_widthratio11(self):
"""
#10043: widthratio should allow max_width to be a variable
"""
output = self.engine.render_to_string('widthratio11', {'a': 50, 'c': 100, 'b': 100})
self.assertEqual(output, '50')
# #18739: widthratio should handle None args consistently with
# non-numerics
@setup({'widthratio12a': '{% widthratio a b c %}'})
def test_widthratio12a(self):
output = self.engine.render_to_string('widthratio12a', {'a': 'a', 'c': 100, 'b': 100})
self.assertEqual(output, '')
@setup({'widthratio12b': '{% widthratio a b c %}'})
def test_widthratio12b(self):
output = self.engine.render_to_string('widthratio12b', {'a': None, 'c': 100, 'b': 100})
self.assertEqual(output, '')
@setup({'widthratio13a': '{% widthratio a b c %}'})
def test_widthratio13a(self):
output = self.engine.render_to_string('widthratio13a', {'a': 0, 'c': 100, 'b': 'b'})
self.assertEqual(output, '')
@setup({'widthratio13b': '{% widthratio a b c %}'})
def test_widthratio13b(self):
output = self.engine.render_to_string('widthratio13b', {'a': 0, 'c': 100, 'b': None})
self.assertEqual(output, '')
@setup({'widthratio14a': '{% widthratio a b c %}'})
def test_widthratio14a(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('widthratio14a', {'a': 0, 'c': 'c', 'b': 100})
@setup({'widthratio14b': '{% widthratio a b c %}'})
def test_widthratio14b(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('widthratio14b', {'a': 0, 'c': None, 'b': 100})
@setup({'widthratio15': '{% load custom %}{% widthratio a|noop:"x y" b 0 %}'})
def test_widthratio15(self):
"""
Test whitespace in filter argument
"""
output = self.engine.render_to_string('widthratio15', {'a': 50, 'b': 100})
self.assertEqual(output, '0')
# Widthratio with variable assignment
@setup({'widthratio16': '{% widthratio a b 100 as variable %}-{{ variable }}-'})
def test_widthratio16(self):
output = self.engine.render_to_string('widthratio16', {'a': 50, 'b': 100})
self.assertEqual(output, '-50-')
@setup({'widthratio17': '{% widthratio a b 100 as variable %}-{{ variable }}-'})
def test_widthratio17(self):
output = self.engine.render_to_string('widthratio17', {'a': 100, 'b': 100})
self.assertEqual(output, '-100-')
@setup({'widthratio18': '{% widthratio a b 100 as %}'})
def test_widthratio18(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('widthratio18')
@setup({'widthratio19': '{% widthratio a b 100 not_as variable %}'})
def test_widthratio19(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('widthratio19')
@setup({'widthratio20': '{% widthratio a b 100 %}'})
def test_widthratio20(self):
output = self.engine.render_to_string('widthratio20', {'a': float('inf'), 'b': float('inf')})
self.assertEqual(output, '')
@setup({'widthratio21': '{% widthratio a b 100 %}'})
def test_widthratio21(self):
output = self.engine.render_to_string('widthratio21', {'a': float('inf'), 'b': 2})
self.assertEqual(output, '')
| bsd-3-clause |
opentrials/processors | tests/processors/base/writers/test_source.py | 2 | 1581 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import processors.base.writers as writers
class TestSourceWriter(object):
def test_skips_source_with_invalid_url(self, conn):
source = {
'id': 'id',
'name': 'name',
'type': 'other',
'source_url': 'invalid_url',
'terms_and_conditions_url': 'http://example.org',
}
assert writers.write_source(conn, source) is None
def test_skips_source_with_invalid_terms_and_conditions_url(self, conn):
source = {
'id': 'id',
'name': 'name',
'type': 'other',
'source_url': 'http://example.org',
'terms_and_conditions_url': 'invalid_url',
}
assert writers.write_source(conn, source) is None
def test_writes_source_with_valid_source_url_and_terms_and_conditions_url(self, conn):
source = {
'id': 'id',
'name': 'name',
'type': 'other',
'source_url': 'https://clinicaltrials.gov',
'terms_and_conditions_url': 'https://clinicaltrials.gov/ct2/about-site/terms-conditions',
}
assert writers.write_source(conn, source) is not None
def test_writes_source_without_urls(self, conn):
source = {
'id': 'id',
'name': 'name',
'type': 'other',
}
assert writers.write_source(conn, source) is not None
| mit |
kawamon/hue | desktop/core/ext-py/boto-2.46.1/boto/mturk/layoutparam.py | 170 | 2045 | # Copyright (c) 2008 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class LayoutParameters(object):
def __init__(self, layoutParameters=None):
if layoutParameters is None:
layoutParameters = []
self.layoutParameters = layoutParameters
def add(self, req):
self.layoutParameters.append(req)
def get_as_params(self):
params = {}
assert(len(self.layoutParameters) <= 25)
for n, layoutParameter in enumerate(self.layoutParameters):
kv = layoutParameter.get_as_params()
for key in kv:
params['HITLayoutParameter.%s.%s' % ((n+1), key) ] = kv[key]
return params
class LayoutParameter(object):
"""
Representation of a single HIT layout parameter
"""
def __init__(self, name, value):
self.name = name
self.value = value
def get_as_params(self):
params = {
"Name": self.name,
"Value": self.value,
}
return params
| apache-2.0 |
umuzungu/zipline | zipline/finance/cancel_policy.py | 3 | 2063 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from abc import abstractmethod
from six import with_metaclass
from zipline.gens.sim_engine import DAY_END
class CancelPolicy(with_metaclass(abc.ABCMeta)):
"""Abstract cancellation policy interface.
"""
@abstractmethod
def should_cancel(self, event):
"""Should all open orders be cancelled?
Parameters
----------
event : enum-value
An event type, one of:
- :data:`zipline.gens.sim_engine.BAR`
- :data:`zipline.gens.sim_engine.DAY_START`
- :data:`zipline.gens.sim_engine.DAY_END`
- :data:`zipline.gens.sim_engine.MINUTE_END`
Returns
-------
should_cancel : bool
Should all open orders be cancelled?
"""
pass
class EODCancel(CancelPolicy):
"""This policy cancels open orders at the end of the day. For now,
Zipline will only apply this policy to minutely simulations.
Parameters
----------
warn_on_cancel : bool, optional
Should a warning be raised if this causes an order to be cancelled?
"""
def __init__(self, warn_on_cancel=True):
self.warn_on_cancel = warn_on_cancel
def should_cancel(self, event):
return event == DAY_END
class NeverCancel(CancelPolicy):
"""Orders are never automatically canceled.
"""
def __init__(self):
self.warn_on_cancel = False
def should_cancel(self, event):
return False
| apache-2.0 |
wolfv/AutobahnPython | examples/twisted/wamp/work/pubsub/client.py | 14 | 4915 | ###############################################################################
##
## Copyright (C) 2013 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from autobahn.wamp2.websocket import WampWebSocketClientProtocol, \
WampWebSocketClientFactory
from autobahn.wamp2.serializer import WampJsonSerializer, WampMsgPackSerializer
class PubSubClientProtocol(WampWebSocketClientProtocol):
"""
"""
def onSessionOpen(self):
print "WAMP session opened", self.websocket_protocol_in_use
def onMyEvent1(topic, event):
print "Received event:", event
d = self.subscribe("http://example.com/myEvent1", onMyEvent1)
def subscribeSuccess(subscriptionid):
print "Subscribe Success", subscriptionid
def subscribeError(error):
print "Subscribe Error", error
d.addCallbacks(subscribeSuccess, subscribeError)
if self.factory.period:
self.counter = 0
def sendMyEvent1():
self.counter += 1
self.publish("http://example.com/myEvent1",
{
"msg": "Hello from Python!",
"counter": self.counter
}
)
reactor.callLater(self.factory.period, sendMyEvent1)
sendMyEvent1()
def onSessionClose(self):
print "WAMP session closed"
self.factory.reactor.stop()
class PubSubClientFactory(WampWebSocketClientFactory):
"""
"""
protocol = PubSubClientProtocol
def __init__(self, url, serializers = None, period = 0, debug = False):
WampWebSocketClientFactory.__init__(self, url, serializers = serializers, debug = debug)
self.period = period
if __name__ == '__main__':
import sys, argparse
from twisted.python import log
from twisted.internet.endpoints import clientFromString
## parse command line arguments
##
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--debug", action = "store_true",
help = "Enable debug output.")
parser.add_argument("--websocket", default = "tcp:localhost:9000",
help = 'WebSocket client Twisted endpoint descriptor, e.g. "tcp:localhost:9000" or "unix:/tmp/mywebsocket".')
parser.add_argument("--wsurl", default = "ws://localhost:9000",
help = 'WebSocket URL (must suit the endpoint), e.g. "ws://localhost:9000".')
parser.add_argument("--period", type = float, default = 2,
help = 'Auto-publication period in seconds.')
parser.add_argument("--serializers", type = str, default = None,
help = 'If set, use this (comma separated) list of WAMP serializers, e.g. "json" or "msgpack,json"')
#parser.add_argument("--period", type = float, default = 2,
# help = 'Auto-publication period in seconds.')
args = parser.parse_args()
## start Twisted logging to stdout
##
log.startLogging(sys.stdout)
## we use an Autobahn utility to import the "best" available Twisted reactor
##
from autobahn.choosereactor import install_reactor
reactor = install_reactor()
print "Running on reactor", reactor
## start a WebSocket client
##
if args.serializers:
serializers = []
for id in args.serializers.split(','):
if id.strip() == WampJsonSerializer.SERIALIZER_ID:
serializers.append(WampJsonSerializer())
elif id.strip() == WampMsgPackSerializer.SERIALIZER_ID:
serializers.append(WampMsgPackSerializer())
else:
raise Exception("unknown WAMP serializer %s" % id)
if len(serializers) == 0:
raise Exception("no WAMP serializers selected")
else:
serializers = [WampMsgPackSerializer(), WampJsonSerializer()]
wsfactory = PubSubClientFactory(args.wsurl, serializers = serializers, period = args.period, debug = args.debug)
wsclient = clientFromString(reactor, args.websocket)
d = wsclient.connect(wsfactory)
def connected(proto):
print "Endpoint connected:", proto
def disconnected(err):
print "Endpoint disconnected:", err
reactor.stop()
d.addCallbacks(connected, disconnected)
## now enter the Twisted reactor loop
##
reactor.run()
| apache-2.0 |
klen/python-mode | pymode/libs/logilab-common-1.4.1/logilab/common/textutils.py | 3 | 17345 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""Some text manipulation utility functions.
:group text formatting: normalize_text, normalize_paragraph, pretty_match,\
unquote, colorize_ansi
:group text manipulation: searchall, splitstrip
:sort: text formatting, text manipulation
:type ANSI_STYLES: dict(str)
:var ANSI_STYLES: dictionary mapping style identifier to ANSI terminal code
:type ANSI_COLORS: dict(str)
:var ANSI_COLORS: dictionary mapping color identifier to ANSI terminal code
:type ANSI_PREFIX: str
:var ANSI_PREFIX:
ANSI terminal code notifying the start of an ANSI escape sequence
:type ANSI_END: str
:var ANSI_END:
ANSI terminal code notifying the end of an ANSI escape sequence
:type ANSI_RESET: str
:var ANSI_RESET:
ANSI terminal code resetting format defined by a previous ANSI escape sequence
"""
__docformat__ = "restructuredtext en"
import sys
import re
import os.path as osp
from warnings import warn
from unicodedata import normalize as _uninormalize
try:
from os import linesep
except ImportError:
linesep = '\n' # gae
from logilab.common.deprecation import deprecated
MANUAL_UNICODE_MAP = {
u'\xa1': u'!', # INVERTED EXCLAMATION MARK
u'\u0142': u'l', # LATIN SMALL LETTER L WITH STROKE
u'\u2044': u'/', # FRACTION SLASH
u'\xc6': u'AE', # LATIN CAPITAL LETTER AE
u'\xa9': u'(c)', # COPYRIGHT SIGN
u'\xab': u'"', # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xe6': u'ae', # LATIN SMALL LETTER AE
u'\xae': u'(r)', # REGISTERED SIGN
u'\u0153': u'oe', # LATIN SMALL LIGATURE OE
u'\u0152': u'OE', # LATIN CAPITAL LIGATURE OE
u'\xd8': u'O', # LATIN CAPITAL LETTER O WITH STROKE
u'\xf8': u'o', # LATIN SMALL LETTER O WITH STROKE
u'\xbb': u'"', # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xdf': u'ss', # LATIN SMALL LETTER SHARP S
u'\u2013': u'-', # HYPHEN
u'\u2019': u"'", # SIMPLE QUOTE
}
def unormalize(ustring, ignorenonascii=None, substitute=None):
"""replace diacritical characters with their corresponding ascii characters
Convert the unicode string to its long normalized form (unicode character
will be transform into several characters) and keep the first one only.
The normal form KD (NFKD) will apply the compatibility decomposition, i.e.
replace all compatibility characters with their equivalents.
:type substitute: str
:param substitute: replacement character to use if decomposition fails
:see: Another project about ASCII transliterations of Unicode text
http://pypi.python.org/pypi/Unidecode
"""
# backward compatibility, ignorenonascii was a boolean
if ignorenonascii is not None:
warn("ignorenonascii is deprecated, use substitute named parameter instead",
DeprecationWarning, stacklevel=2)
if ignorenonascii:
substitute = ''
res = []
for letter in ustring[:]:
try:
replacement = MANUAL_UNICODE_MAP[letter]
except KeyError:
replacement = _uninormalize('NFKD', letter)[0]
if ord(replacement) >= 2 ** 7:
if substitute is None:
raise ValueError("can't deal with non-ascii based characters")
replacement = substitute
res.append(replacement)
return u''.join(res)
def unquote(string):
"""remove optional quotes (simple or double) from the string
:type string: str or unicode
:param string: an optionally quoted string
:rtype: str or unicode
:return: the unquoted string (or the input string if it wasn't quoted)
"""
if not string:
return string
if string[0] in '"\'':
string = string[1:]
if string[-1] in '"\'':
string = string[:-1]
return string
_BLANKLINES_RGX = re.compile('\r?\n\r?\n')
_NORM_SPACES_RGX = re.compile('\s+')
def normalize_text(text, line_len=80, indent='', rest=False):
"""normalize a text to display it with a maximum line size and
optionally arbitrary indentation. Line jumps are normalized but blank
lines are kept. The indentation string may be used to insert a
comment (#) or a quoting (>) mark for instance.
:type text: str or unicode
:param text: the input text to normalize
:type line_len: int
:param line_len: expected maximum line's length, default to 80
:type indent: str or unicode
:param indent: optional string to use as indentation
:rtype: str or unicode
:return:
the input text normalized to fit on lines with a maximized size
inferior to `line_len`, and optionally prefixed by an
indentation string
"""
if rest:
normp = normalize_rest_paragraph
else:
normp = normalize_paragraph
result = []
for text in _BLANKLINES_RGX.split(text):
result.append(normp(text, line_len, indent))
return ('%s%s%s' % (linesep, indent, linesep)).join(result)
def normalize_paragraph(text, line_len=80, indent=''):
"""normalize a text to display it with a maximum line size and
optionally arbitrary indentation. Line jumps are normalized. The
indentation string may be used top insert a comment mark for
instance.
:type text: str or unicode
:param text: the input text to normalize
:type line_len: int
:param line_len: expected maximum line's length, default to 80
:type indent: str or unicode
:param indent: optional string to use as indentation
:rtype: str or unicode
:return:
the input text normalized to fit on lines with a maximized size
inferior to `line_len`, and optionally prefixed by an
indentation string
"""
text = _NORM_SPACES_RGX.sub(' ', text)
line_len = line_len - len(indent)
lines = []
while text:
aline, text = splittext(text.strip(), line_len)
lines.append(indent + aline)
return linesep.join(lines)
def normalize_rest_paragraph(text, line_len=80, indent=''):
"""normalize a ReST text to display it with a maximum line size and
optionally arbitrary indentation. Line jumps are normalized. The
indentation string may be used top insert a comment mark for
instance.
:type text: str or unicode
:param text: the input text to normalize
:type line_len: int
:param line_len: expected maximum line's length, default to 80
:type indent: str or unicode
:param indent: optional string to use as indentation
:rtype: str or unicode
:return:
the input text normalized to fit on lines with a maximized size
inferior to `line_len`, and optionally prefixed by an
indentation string
"""
toreport = ''
lines = []
line_len = line_len - len(indent)
for line in text.splitlines():
line = toreport + _NORM_SPACES_RGX.sub(' ', line.strip())
toreport = ''
while len(line) > line_len:
# too long line, need split
line, toreport = splittext(line, line_len)
lines.append(indent + line)
if toreport:
line = toreport + ' '
toreport = ''
else:
line = ''
if line:
lines.append(indent + line.strip())
return linesep.join(lines)
def splittext(text, line_len):
"""split the given text on space according to the given max line size
return a 2-uple:
* a line <= line_len if possible
* the rest of the text which has to be reported on another line
"""
if len(text) <= line_len:
return text, ''
pos = min(len(text)-1, line_len)
while pos > 0 and text[pos] != ' ':
pos -= 1
if pos == 0:
pos = min(len(text), line_len)
while len(text) > pos and text[pos] != ' ':
pos += 1
return text[:pos], text[pos+1:].strip()
def splitstrip(string, sep=','):
"""return a list of stripped string by splitting the string given as
argument on `sep` (',' by default). Empty string are discarded.
>>> splitstrip('a, b, c , 4,,')
['a', 'b', 'c', '4']
>>> splitstrip('a')
['a']
>>>
:type string: str or unicode
:param string: a csv line
:type sep: str or unicode
:param sep: field separator, default to the comma (',')
:rtype: str or unicode
:return: the unquoted string (or the input string if it wasn't quoted)
"""
return [word.strip() for word in string.split(sep) if word.strip()]
get_csv = deprecated('get_csv is deprecated, use splitstrip')(splitstrip)
def split_url_or_path(url_or_path):
"""return the latest component of a string containing either an url of the
form <scheme>://<path> or a local file system path
"""
if '://' in url_or_path:
return url_or_path.rstrip('/').rsplit('/', 1)
return osp.split(url_or_path.rstrip(osp.sep))
def text_to_dict(text):
"""parse multilines text containing simple 'key=value' lines and return a
dict of {'key': 'value'}. When the same key is encountered multiple time,
value is turned into a list containing all values.
>>> d = text_to_dict('''multiple=1
... multiple= 2
... single =3
... ''')
>>> d['single']
'3'
>>> d['multiple']
['1', '2']
"""
res = {}
if not text:
return res
for line in text.splitlines():
line = line.strip()
if line and not line.startswith('#'):
key, value = [w.strip() for w in line.split('=', 1)]
if key in res:
try:
res[key].append(value)
except AttributeError:
res[key] = [res[key], value]
else:
res[key] = value
return res
_BLANK_URE = r'(\s|,)+'
_BLANK_RE = re.compile(_BLANK_URE)
__VALUE_URE = r'-?(([0-9]+\.[0-9]*)|((0x?)?[0-9]+))'
__UNITS_URE = r'[a-zA-Z]+'
_VALUE_RE = re.compile(r'(?P<value>%s)(?P<unit>%s)?'%(__VALUE_URE, __UNITS_URE))
_VALIDATION_RE = re.compile(r'^((%s)(%s))*(%s)?$' % (__VALUE_URE, __UNITS_URE,
__VALUE_URE))
BYTE_UNITS = {
"b": 1,
"kb": 1024,
"mb": 1024 ** 2,
"gb": 1024 ** 3,
"tb": 1024 ** 4,
}
TIME_UNITS = {
"ms": 0.0001,
"s": 1,
"min": 60,
"h": 60 * 60,
"d": 60 * 60 *24,
}
def apply_units(string, units, inter=None, final=float, blank_reg=_BLANK_RE,
value_reg=_VALUE_RE):
"""Parse the string applying the units defined in units
(e.g.: "1.5m",{'m',60} -> 80).
:type string: str or unicode
:param string: the string to parse
:type units: dict (or any object with __getitem__ using basestring key)
:param units: a dict mapping a unit string repr to its value
:type inter: type
:param inter: used to parse every intermediate value (need __sum__)
:type blank_reg: regexp
:param blank_reg: should match every blank char to ignore.
:type value_reg: regexp with "value" and optional "unit" group
:param value_reg: match a value and it's unit into the
"""
if inter is None:
inter = final
fstring = _BLANK_RE.sub('', string)
if not (fstring and _VALIDATION_RE.match(fstring)):
raise ValueError("Invalid unit string: %r." % string)
values = []
for match in value_reg.finditer(fstring):
dic = match.groupdict()
lit, unit = dic["value"], dic.get("unit")
value = inter(lit)
if unit is not None:
try:
value *= units[unit.lower()]
except KeyError:
raise KeyError('invalid unit %s. valid units are %s' %
(unit, units.keys()))
values.append(value)
return final(sum(values))
_LINE_RGX = re.compile('\r\n|\r+|\n')
def pretty_match(match, string, underline_char='^'):
"""return a string with the match location underlined:
>>> import re
>>> print(pretty_match(re.search('mange', 'il mange du bacon'), 'il mange du bacon'))
il mange du bacon
^^^^^
>>>
:type match: _sre.SRE_match
:param match: object returned by re.match, re.search or re.finditer
:type string: str or unicode
:param string:
the string on which the regular expression has been applied to
obtain the `match` object
:type underline_char: str or unicode
:param underline_char:
character to use to underline the matched section, default to the
carret '^'
:rtype: str or unicode
:return:
the original string with an inserted line to underline the match
location
"""
start = match.start()
end = match.end()
string = _LINE_RGX.sub(linesep, string)
start_line_pos = string.rfind(linesep, 0, start)
if start_line_pos == -1:
start_line_pos = 0
result = []
else:
result = [string[:start_line_pos]]
start_line_pos += len(linesep)
offset = start - start_line_pos
underline = ' ' * offset + underline_char * (end - start)
end_line_pos = string.find(linesep, end)
if end_line_pos == -1:
string = string[start_line_pos:]
result.append(string)
result.append(underline)
else:
end = string[end_line_pos + len(linesep):]
string = string[start_line_pos:end_line_pos]
result.append(string)
result.append(underline)
result.append(end)
return linesep.join(result).rstrip()
# Ansi colorization ###########################################################
ANSI_PREFIX = '\033['
ANSI_END = 'm'
ANSI_RESET = '\033[0m'
ANSI_STYLES = {
'reset': "0",
'bold': "1",
'italic': "3",
'underline': "4",
'blink': "5",
'inverse': "7",
'strike': "9",
}
ANSI_COLORS = {
'reset': "0",
'black': "30",
'red': "31",
'green': "32",
'yellow': "33",
'blue': "34",
'magenta': "35",
'cyan': "36",
'white': "37",
}
def _get_ansi_code(color=None, style=None):
"""return ansi escape code corresponding to color and style
:type color: str or None
:param color:
the color name (see `ANSI_COLORS` for available values)
or the color number when 256 colors are available
:type style: str or None
:param style:
style string (see `ANSI_COLORS` for available values). To get
several style effects at the same time, use a coma as separator.
:raise KeyError: if an unexistent color or style identifier is given
:rtype: str
:return: the built escape code
"""
ansi_code = []
if style:
style_attrs = splitstrip(style)
for effect in style_attrs:
ansi_code.append(ANSI_STYLES[effect])
if color:
if color.isdigit():
ansi_code.extend(['38', '5'])
ansi_code.append(color)
else:
ansi_code.append(ANSI_COLORS[color])
if ansi_code:
return ANSI_PREFIX + ';'.join(ansi_code) + ANSI_END
return ''
def colorize_ansi(msg, color=None, style=None):
"""colorize message by wrapping it with ansi escape codes
:type msg: str or unicode
:param msg: the message string to colorize
:type color: str or None
:param color:
the color identifier (see `ANSI_COLORS` for available values)
:type style: str or None
:param style:
style string (see `ANSI_COLORS` for available values). To get
several style effects at the same time, use a coma as separator.
:raise KeyError: if an unexistent color or style identifier is given
:rtype: str or unicode
:return: the ansi escaped string
"""
# If both color and style are not defined, then leave the text as is
if color is None and style is None:
return msg
escape_code = _get_ansi_code(color, style)
# If invalid (or unknown) color, don't wrap msg with ansi codes
if escape_code:
return '%s%s%s' % (escape_code, msg, ANSI_RESET)
return msg
DIFF_STYLE = {'separator': 'cyan', 'remove': 'red', 'add': 'green'}
def diff_colorize_ansi(lines, out=sys.stdout, style=DIFF_STYLE):
for line in lines:
if line[:4] in ('--- ', '+++ '):
out.write(colorize_ansi(line, style['separator']))
elif line[0] == '-':
out.write(colorize_ansi(line, style['remove']))
elif line[0] == '+':
out.write(colorize_ansi(line, style['add']))
elif line[:4] == '--- ':
out.write(colorize_ansi(line, style['separator']))
elif line[:4] == '+++ ':
out.write(colorize_ansi(line, style['separator']))
else:
out.write(line)
| lgpl-3.0 |
2014c2g5/cd0505 | static/Brython3.1.1-20150328-091302/Lib/calendar.py | 828 | 22940 | """Calendar printing functions
Note when comparing these calendars to the ones printed by cal(1): By
default, these calendars have Monday as the first day of the week, and
Sunday as the last (the European convention). Use setfirstweekday() to
set the first day of the week (0=Monday, 6=Sunday)."""
import sys
import datetime
import locale as _locale
__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
"firstweekday", "isleap", "leapdays", "weekday", "monthrange",
"monthcalendar", "prmonth", "month", "prcal", "calendar",
"timegm", "month_name", "month_abbr", "day_name", "day_abbr"]
# Exception raised for bad input (with string parameter for details)
error = ValueError
# Exceptions raised for bad input
class IllegalMonthError(ValueError):
def __init__(self, month):
self.month = month
def __str__(self):
return "bad month number %r; must be 1-12" % self.month
class IllegalWeekdayError(ValueError):
def __init__(self, weekday):
self.weekday = weekday
def __str__(self):
return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
# Constants for months referenced later
January = 1
February = 2
# Number of days per month (except for February in leap years)
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# This module used to have hard-coded lists of day and month names, as
# English strings. The classes following emulate a read-only version of
# that, but supply localized names. Note that the values are computed
# fresh on each call, in case the user changes locale between calls.
class _localized_month:
_months = [datetime.date(2001, i+1, 1).strftime for i in range(12)]
_months.insert(0, lambda x: "")
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._months[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 13
class _localized_day:
# January 1, 2001, was a Monday.
_days = [datetime.date(2001, 1, i+1).strftime for i in range(7)]
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._days[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 7
# Full and abbreviated names of weekdays
day_name = _localized_day('%A')
day_abbr = _localized_day('%a')
# Full and abbreviated names of months (1-based arrays!!!)
month_name = _localized_month('%B')
month_abbr = _localized_month('%b')
# Constants for weekdays
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
def isleap(year):
"""Return True for leap years, False for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def leapdays(y1, y2):
"""Return number of leap years in range [y1, y2).
Assume y1 <= y2."""
y1 -= 1
y2 -= 1
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
def weekday(year, month, day):
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
day (1-31)."""
return datetime.date(year, month, day).weekday()
def monthrange(year, month):
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
year, month."""
if not 1 <= month <= 12:
raise IllegalMonthError(month)
day1 = weekday(year, month, 1)
ndays = mdays[month] + (month == February and isleap(year))
return day1, ndays
class Calendar(object):
"""
Base calendar class. This class doesn't do any formatting. It simply
provides data to subclasses.
"""
def __init__(self, firstweekday=0):
self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
def getfirstweekday(self):
return self._firstweekday % 7
def setfirstweekday(self, firstweekday):
self._firstweekday = firstweekday
firstweekday = property(getfirstweekday, setfirstweekday)
def iterweekdays(self):
"""
Return a iterator for one week of weekday numbers starting with the
configured first one.
"""
for i in range(self.firstweekday, self.firstweekday + 7):
yield i%7
def itermonthdates(self, year, month):
"""
Return an iterator for one month. The iterator will yield datetime.date
values and will always iterate through complete weeks, so it will yield
dates outside the specified month.
"""
date = datetime.date(year, month, 1)
# Go back to the beginning of the week
days = (date.weekday() - self.firstweekday) % 7
date -= datetime.timedelta(days=days)
oneday = datetime.timedelta(days=1)
while True:
yield date
try:
date += oneday
except OverflowError:
# Adding one day could fail after datetime.MAXYEAR
break
if date.month != month and date.weekday() == self.firstweekday:
break
def itermonthdays2(self, year, month):
"""
Like itermonthdates(), but will yield (day number, weekday number)
tuples. For days outside the specified month the day number is 0.
"""
for date in self.itermonthdates(year, month):
if date.month != month:
yield (0, date.weekday())
else:
yield (date.day, date.weekday())
def itermonthdays(self, year, month):
"""
Like itermonthdates(), but will yield day numbers. For days outside
the specified month the day number is 0.
"""
for date in self.itermonthdates(year, month):
if date.month != month:
yield 0
else:
yield date.day
def monthdatescalendar(self, year, month):
"""
Return a matrix (list of lists) representing a month's calendar.
Each row represents a week; week entries are datetime.date values.
"""
dates = list(self.itermonthdates(year, month))
return [ dates[i:i+7] for i in range(0, len(dates), 7) ]
def monthdays2calendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; week entries are
(day number, weekday number) tuples. Day numbers outside this month
are zero.
"""
days = list(self.itermonthdays2(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def monthdayscalendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; days outside this month are zero.
"""
days = list(self.itermonthdays(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def yeardatescalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting. The return
value is a list of month rows. Each month row contains up to width months.
Each month contains between 4 and 6 weeks and each week contains 1-7
days. Days are datetime.date objects.
"""
months = [
self.monthdatescalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardays2calendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are
(day number, weekday number) tuples. Day numbers outside this month are
zero.
"""
months = [
self.monthdays2calendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardayscalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are day numbers.
Day numbers outside this month are zero.
"""
months = [
self.monthdayscalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
class TextCalendar(Calendar):
"""
Subclass of Calendar that outputs a calendar as a simple plain text
similar to the UNIX program cal.
"""
def prweek(self, theweek, width):
"""
Print a single week (no newline).
"""
print(self.formatweek(theweek, width), end=' ')
def formatday(self, day, weekday, width):
"""
Returns a formatted day.
"""
if day == 0:
s = ''
else:
s = '%2i' % day # right-align single-digit days
return s.center(width)
def formatweek(self, theweek, width):
"""
Returns a single week in a string (no newline).
"""
return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
def formatweekday(self, day, width):
"""
Returns a formatted week day name.
"""
if width >= 9:
names = day_name
else:
names = day_abbr
return names[day][:width].center(width)
def formatweekheader(self, width):
"""
Return a header for a week.
"""
return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
def formatmonthname(self, theyear, themonth, width, withyear=True):
"""
Return a formatted month name.
"""
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
def prmonth(self, theyear, themonth, w=0, l=0):
"""
Print a month's calendar.
"""
print(self.formatmonth(theyear, themonth, w, l), end=' ')
def formatmonth(self, theyear, themonth, w=0, l=0):
"""
Return a month's calendar string (multi-line).
"""
w = max(2, w)
l = max(1, l)
s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
s = s.rstrip()
s += '\n' * l
s += self.formatweekheader(w).rstrip()
s += '\n' * l
for week in self.monthdays2calendar(theyear, themonth):
s += self.formatweek(week, w).rstrip()
s += '\n' * l
return s
def formatyear(self, theyear, w=2, l=1, c=6, m=3):
"""
Returns a year's calendar as a multi-line string.
"""
w = max(2, w)
l = max(1, l)
c = max(2, c)
colwidth = (w + 1) * 7 - 1
v = []
a = v.append
a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
a('\n'*l)
header = self.formatweekheader(w)
for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
# months in this row
months = range(m*i+1, min(m*(i+1)+1, 13))
a('\n'*l)
names = (self.formatmonthname(theyear, k, colwidth, False)
for k in months)
a(formatstring(names, colwidth, c).rstrip())
a('\n'*l)
headers = (header for k in months)
a(formatstring(headers, colwidth, c).rstrip())
a('\n'*l)
# max number of weeks for this row
height = max(len(cal) for cal in row)
for j in range(height):
weeks = []
for cal in row:
if j >= len(cal):
weeks.append('')
else:
weeks.append(self.formatweek(cal[j], w))
a(formatstring(weeks, colwidth, c).rstrip())
a('\n' * l)
return ''.join(v)
def pryear(self, theyear, w=0, l=0, c=6, m=3):
"""Print a year's calendar."""
print(self.formatyear(theyear, w, l, c, m))
class HTMLCalendar(Calendar):
"""
This calendar returns complete HTML pages.
"""
# CSS classes for the day <td>s
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
def formatday(self, day, weekday):
"""
Return a day as a table cell.
"""
if day == 0:
return '<td class="noday"> </td>' # day outside month
else:
return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
def formatweek(self, theweek):
"""
Return a complete week as a table row.
"""
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
return '<tr>%s</tr>' % s
def formatweekday(self, day):
"""
Return a weekday name as a table header.
"""
return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
def formatweekheader(self):
"""
Return a header for a week as a table row.
"""
s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
return '<tr>%s</tr>' % s
def formatmonthname(self, theyear, themonth, withyear=True):
"""
Return a month name as a table row.
"""
if withyear:
s = '%s %s' % (month_name[themonth], theyear)
else:
s = '%s' % month_name[themonth]
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
def formatmonth(self, theyear, themonth, withyear=True):
"""
Return a formatted month as a table.
"""
v = []
a = v.append
a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
a('\n')
a(self.formatmonthname(theyear, themonth, withyear=withyear))
a('\n')
a(self.formatweekheader())
a('\n')
for week in self.monthdays2calendar(theyear, themonth):
a(self.formatweek(week))
a('\n')
a('</table>')
a('\n')
return ''.join(v)
def formatyear(self, theyear, width=3):
"""
Return a formatted year as a table of tables.
"""
v = []
a = v.append
width = max(width, 1)
a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
a('\n')
a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
for i in range(January, January+12, width):
# months in this row
months = range(i, min(i+width, 13))
a('<tr>')
for m in months:
a('<td>')
a(self.formatmonth(theyear, m, withyear=False))
a('</td>')
a('</tr>')
a('</table>')
return ''.join(v)
def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
"""
Return a formatted year as a complete HTML page.
"""
if encoding is None:
encoding = sys.getdefaultencoding()
v = []
a = v.append
a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
a('<html>\n')
a('<head>\n')
a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
if css is not None:
a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
a('<title>Calendar for %d</title>\n' % theyear)
a('</head>\n')
a('<body>\n')
a(self.formatyear(theyear, width))
a('</body>\n')
a('</html>\n')
return ''.join(v).encode(encoding, "xmlcharrefreplace")
class different_locale:
def __init__(self, locale):
self.locale = locale
def __enter__(self):
self.oldlocale = _locale.getlocale(_locale.LC_TIME)
_locale.setlocale(_locale.LC_TIME, self.locale)
def __exit__(self, *args):
_locale.setlocale(_locale.LC_TIME, self.oldlocale)
class LocaleTextCalendar(TextCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
TextCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day, width):
with different_locale(self.locale):
if width >= 9:
names = day_name
else:
names = day_abbr
name = names[day]
return name[:width].center(width)
def formatmonthname(self, theyear, themonth, width, withyear=True):
with different_locale(self.locale):
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
class LocaleHTMLCalendar(HTMLCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
HTMLCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day):
with different_locale(self.locale):
s = day_abbr[day]
return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
def formatmonthname(self, theyear, themonth, withyear=True):
with different_locale(self.locale):
s = month_name[themonth]
if withyear:
s = '%s %s' % (s, theyear)
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
# Support for old module level interface
c = TextCalendar()
firstweekday = c.getfirstweekday
def setfirstweekday(firstweekday):
if not MONDAY <= firstweekday <= SUNDAY:
raise IllegalWeekdayError(firstweekday)
c.firstweekday = firstweekday
monthcalendar = c.monthdayscalendar
prweek = c.prweek
week = c.formatweek
weekheader = c.formatweekheader
prmonth = c.prmonth
month = c.formatmonth
calendar = c.formatyear
prcal = c.pryear
# Spacing of month columns for multi-column year calendar
_colwidth = 7*3 - 1 # Amount printed by prweek()
_spacing = 6 # Number of spaces between columns
def format(cols, colwidth=_colwidth, spacing=_spacing):
"""Prints multi-column formatting for year calendars"""
print(formatstring(cols, colwidth, spacing))
def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
"""Returns a string formatted from n strings, centered within n columns."""
spacing *= ' '
return spacing.join(c.center(colwidth) for c in cols)
EPOCH = 1970
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
def timegm(tuple):
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
year, month, day, hour, minute, second = tuple[:6]
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
def main(args):
import optparse
parser = optparse.OptionParser(usage="usage: %prog [options] [year [month]]")
parser.add_option(
"-w", "--width",
dest="width", type="int", default=2,
help="width of date column (default 2, text only)"
)
parser.add_option(
"-l", "--lines",
dest="lines", type="int", default=1,
help="number of lines for each week (default 1, text only)"
)
parser.add_option(
"-s", "--spacing",
dest="spacing", type="int", default=6,
help="spacing between months (default 6, text only)"
)
parser.add_option(
"-m", "--months",
dest="months", type="int", default=3,
help="months per row (default 3, text only)"
)
parser.add_option(
"-c", "--css",
dest="css", default="calendar.css",
help="CSS to use for page (html only)"
)
parser.add_option(
"-L", "--locale",
dest="locale", default=None,
help="locale to be used from month and weekday names"
)
parser.add_option(
"-e", "--encoding",
dest="encoding", default=None,
help="Encoding to use for output."
)
parser.add_option(
"-t", "--type",
dest="type", default="text",
choices=("text", "html"),
help="output type (text or html)"
)
(options, args) = parser.parse_args(args)
if options.locale and not options.encoding:
parser.error("if --locale is specified --encoding is required")
sys.exit(1)
locale = options.locale, options.encoding
if options.type == "html":
if options.locale:
cal = LocaleHTMLCalendar(locale=locale)
else:
cal = HTMLCalendar()
encoding = options.encoding
if encoding is None:
encoding = sys.getdefaultencoding()
optdict = dict(encoding=encoding, css=options.css)
write = sys.stdout.buffer.write
if len(args) == 1:
write(cal.formatyearpage(datetime.date.today().year, **optdict))
elif len(args) == 2:
write(cal.formatyearpage(int(args[1]), **optdict))
else:
parser.error("incorrect number of arguments")
sys.exit(1)
else:
if options.locale:
cal = LocaleTextCalendar(locale=locale)
else:
cal = TextCalendar()
optdict = dict(w=options.width, l=options.lines)
if len(args) != 3:
optdict["c"] = options.spacing
optdict["m"] = options.months
if len(args) == 1:
result = cal.formatyear(datetime.date.today().year, **optdict)
elif len(args) == 2:
result = cal.formatyear(int(args[1]), **optdict)
elif len(args) == 3:
result = cal.formatmonth(int(args[1]), int(args[2]), **optdict)
else:
parser.error("incorrect number of arguments")
sys.exit(1)
write = sys.stdout.write
if options.encoding:
result = result.encode(options.encoding)
write = sys.stdout.buffer.write
write(result)
if __name__ == "__main__":
main(sys.argv)
| agpl-3.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/dnspython-1.12.0/dns/rdtypes/nsbase.py | 100 | 2994 | # Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""NS-like base classes."""
import cStringIO
import dns.exception
import dns.rdata
import dns.name
class NSBase(dns.rdata.Rdata):
"""Base class for rdata that is like an NS record.
@ivar target: the target name of the rdata
@type target: dns.name.Name object"""
__slots__ = ['target']
def __init__(self, rdclass, rdtype, target):
super(NSBase, self).__init__(rdclass, rdtype)
self.target = target
def to_text(self, origin=None, relativize=True, **kw):
target = self.target.choose_relativity(origin, relativize)
return str(target)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
target = tok.get_name()
target = target.choose_relativity(origin, relativize)
tok.get_eol()
return cls(rdclass, rdtype, target)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
self.target.to_wire(file, compress, origin)
def to_digestable(self, origin = None):
return self.target.to_digestable(origin)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(target, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
if cused != rdlen:
raise dns.exception.FormError
if not origin is None:
target = target.relativize(origin)
return cls(rdclass, rdtype, target)
from_wire = classmethod(from_wire)
def choose_relativity(self, origin = None, relativize = True):
self.target = self.target.choose_relativity(origin, relativize)
def _cmp(self, other):
return cmp(self.target, other.target)
class UncompressedNS(NSBase):
"""Base class for rdata that is like an NS record, but whose name
is not compressed when convert to DNS wire format, and whose
digestable form is not downcased."""
def to_wire(self, file, compress = None, origin = None):
super(UncompressedNS, self).to_wire(file, None, origin)
def to_digestable(self, origin = None):
f = cStringIO.StringIO()
self.to_wire(f, None, origin)
return f.getvalue()
| mit |
Maikflow/django_test | lib/python2.7/site-packages/Django-1.7.1-py2.7.egg/django/contrib/gis/tests/gis_migrations/test_operations.py | 9 | 4776 | from __future__ import unicode_literals
from unittest import skipUnless
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB, mysql
from django.db import connection, migrations, models
from django.db.migrations.migration import Migration
from django.db.migrations.state import ProjectState
from django.test import TransactionTestCase
if HAS_SPATIAL_DB:
from django.contrib.gis.db.models import fields
try:
from django.contrib.gis.models import GeometryColumns
HAS_GEOMETRY_COLUMNS = True
except ImportError:
HAS_GEOMETRY_COLUMNS = False
@skipUnless(HAS_SPATIAL_DB, "Spatial db is required.")
class OperationTests(TransactionTestCase):
available_apps = ["django.contrib.gis.tests.gis_migrations"]
def tearDown(self):
# Delete table after testing
self.apply_operations('gis', self.current_state, [migrations.DeleteModel("Neighborhood")])
super(OperationTests, self).tearDown()
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def apply_operations(self, app_label, project_state, operations):
migration = Migration('name', app_label)
migration.operations = operations
with connection.schema_editor() as editor:
return migration.apply(project_state, editor)
def set_up_test_model(self):
operations = [migrations.CreateModel(
"Neighborhood",
[
("id", models.AutoField(primary_key=True)),
('name', models.CharField(max_length=100, unique=True)),
('geom', fields.MultiPolygonField(srid=4326)),
],
)]
return self.apply_operations('gis', ProjectState(), operations)
def assertGeometryColumnsCount(self, expected_count):
table_name = "gis_neighborhood"
if connection.features.uppercases_column_names:
table_name = table_name.upper()
self.assertEqual(
GeometryColumns.objects.filter(**{
GeometryColumns.table_name_col(): table_name,
}).count(),
expected_count
)
def test_add_gis_field(self):
"""
Tests the AddField operation with a GIS-enabled column.
"""
project_state = self.set_up_test_model()
operation = migrations.AddField(
"Neighborhood",
"path",
fields.LineStringField(srid=4326),
)
new_state = project_state.clone()
operation.state_forwards("gis", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("gis", editor, project_state, new_state)
self.current_state = new_state
self.assertColumnExists("gis_neighborhood", "path")
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
if self.has_spatial_indexes:
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, "gis_neighborhood")
self.assertIn('path', indexes)
def test_remove_gis_field(self):
"""
Tests the RemoveField operation with a GIS-enabled column.
"""
project_state = self.set_up_test_model()
operation = migrations.RemoveField("Neighborhood", "geom")
new_state = project_state.clone()
operation.state_forwards("gis", new_state)
with connection.schema_editor() as editor:
operation.database_forwards("gis", editor, project_state, new_state)
self.current_state = new_state
self.assertColumnNotExists("gis_neighborhood", "geom")
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(0)
def test_create_model_spatial_index(self):
self.current_state = self.set_up_test_model()
if not self.has_spatial_indexes:
self.skipTest("No support for Spatial indexes")
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, "gis_neighborhood")
self.assertIn('geom', indexes)
@property
def has_spatial_indexes(self):
if mysql:
with connection.cursor() as cursor:
return connection.introspection.supports_spatial_index(cursor, "gis_neighborhood")
return True
| gpl-2.0 |
zsoltdudas/lis-tempest | tempest/api/compute/servers/test_create_server.py | 3 | 15240 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
import testtools
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class ServersTestJSON(base.BaseV2ComputeTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersTestJSON, cls).setup_clients()
cls.client = cls.servers_client
cls.network_client = cls.os.network_client
cls.networks_client = cls.os.networks_client
cls.subnets_client = cls.os.subnets_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersTestJSON, cls).resource_setup()
cls.meta = {'hello': 'world'}
cls.accessIPv4 = '1.1.1.1'
cls.accessIPv6 = '0000:0000:0000:0000:0000:babe:220.12.22.2'
cls.name = data_utils.rand_name('server')
cls.password = data_utils.rand_password()
disk_config = cls.disk_config
cls.server_initial = cls.create_test_server(
validatable=True,
wait_until='ACTIVE',
name=cls.name,
metadata=cls.meta,
accessIPv4=cls.accessIPv4,
accessIPv6=cls.accessIPv6,
disk_config=disk_config,
adminPass=cls.password)
cls.server = (cls.client.show_server(cls.server_initial['id'])
['server'])
def _create_net_subnet_ret_net_from_cidr(self, cidr):
name_net = data_utils.rand_name(self.__class__.__name__)
net = self.networks_client.create_network(name=name_net)
self.addCleanup(self.networks_client.delete_network,
net['network']['id'])
subnet = self.subnets_client.create_subnet(
network_id=net['network']['id'],
cidr=cidr,
ip_version=4)
self.addCleanup(self.subnets_client.delete_subnet,
subnet['subnet']['id'])
return net
@test.attr(type='smoke')
@test.idempotent_id('5de47127-9977-400a-936f-abcfbec1218f')
def test_verify_server_details(self):
# Verify the specified server attributes are set correctly
self.assertEqual(self.accessIPv4, self.server['accessIPv4'])
# NOTE(maurosr): See http://tools.ietf.org/html/rfc5952 (section 4)
# Here we compare directly with the canonicalized format.
self.assertEqual(self.server['accessIPv6'],
str(netaddr.IPAddress(self.accessIPv6)))
self.assertEqual(self.name, self.server['name'])
self.assertEqual(self.image_ref, self.server['image']['id'])
self.assertEqual(self.flavor_ref, self.server['flavor']['id'])
self.assertEqual(self.meta, self.server['metadata'])
@test.attr(type='smoke')
@test.idempotent_id('9a438d88-10c6-4bcd-8b5b-5b6e25e1346f')
def test_list_servers(self):
# The created server should be in the list of all servers
body = self.client.list_servers()
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('585e934c-448e-43c4-acbf-d06a9b899997')
def test_list_servers_with_detail(self):
# The created server should be in the detailed list of all servers
body = self.client.list_servers(detail=True)
servers = body['servers']
found = any([i for i in servers if i['id'] == self.server['id']])
self.assertTrue(found)
@test.idempotent_id('cbc0f52f-05aa-492b-bdc1-84b575ca294b')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_vcpus(self):
# Verify that the number of vcpus reported by the instance matches
# the amount stated by the flavor
flavor = self.flavors_client.show_flavor(self.flavor_ref)['flavor']
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'])
self.assertEqual(flavor['vcpus'], linux_client.get_number_of_vcpus())
@test.idempotent_id('ac1ad47f-984b-4441-9274-c9079b7a0666')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_host_name_is_same_as_server_name(self):
# Verify the instance host name is the same as the server name
linux_client = remote_client.RemoteClient(
self.get_server_ip(self.server),
self.ssh_user,
self.password,
self.validation_resources['keypair']['private_key'])
self.assertTrue(linux_client.hostname_equals_servername(self.name))
@test.idempotent_id('ed20d3fb-9d1f-4329-b160-543fbd5d9811')
def test_create_server_with_scheduler_hint_group(self):
# Create a server with the scheduler hint "group".
name = data_utils.rand_name('server_group')
policies = ['affinity']
body = self.server_groups_client.create_server_group(
name=name, policies=policies)['server_group']
group_id = body['id']
self.addCleanup(self.server_groups_client.delete_server_group,
group_id)
hints = {'group': group_id}
server = self.create_test_server(scheduler_hints=hints,
wait_until='ACTIVE')
# Check a server is in the group
server_group = (self.server_groups_client.show_server_group(group_id)
['server_group'])
self.assertIn(server['id'], server_group['members'])
@test.idempotent_id('0578d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
def test_verify_multiple_nics_order(self):
# Verify that the networks order given at the server creation is
# preserved within the server.
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
# Cleanup server; this is needed in the test case because with the LIFO
# nature of the cleanups, if we don't delete the server first, the port
# will still be part of the subnet and we'll get a 409 from Neutron
# when trying to delete the subnet. The tear down in the base class
# will try to delete the server and get a 404 but it's ignored so
# we're OK.
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
# We can't predict the ip addresses assigned to the server on networks.
# Sometimes the assigned addresses are ['19.80.0.2', '19.86.0.2'], at
# other times ['19.80.0.3', '19.86.0.3']. So we check if the first
# address is in first network, similarly second address is in second
# network.
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
@test.idempotent_id('1678d144-ed74-43f8-8e57-ab10dbf9b3c2')
@testtools.skipUnless(CONF.service_available.neutron,
'Neutron service must be available.')
# The below skipUnless should be removed once Kilo-eol happens.
@testtools.skipUnless(CONF.compute_feature_enabled.
allow_duplicate_networks,
'Duplicate networks must be allowed')
def test_verify_duplicate_network_nics(self):
# Verify that server creation does not fail when more than one nic
# is created on the same network.
net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
networks = [{'uuid': net1['network']['id']},
{'uuid': net2['network']['id']},
{'uuid': net1['network']['id']}]
server_multi_nics = self.create_test_server(
networks=networks, wait_until='ACTIVE')
def cleanup_server():
self.client.delete_server(server_multi_nics['id'])
waiters.wait_for_server_termination(self.client,
server_multi_nics['id'])
self.addCleanup(cleanup_server)
addresses = (self.client.list_addresses(server_multi_nics['id'])
['addresses'])
addr = [addresses[net1['network']['name']][0]['addr'],
addresses[net2['network']['name']][0]['addr'],
addresses[net1['network']['name']][1]['addr']]
networks = [netaddr.IPNetwork('19.80.0.0/24'),
netaddr.IPNetwork('19.86.0.0/24'),
netaddr.IPNetwork('19.80.0.0/24')]
for address, network in zip(addr, networks):
self.assertIn(address, network)
class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
disk_config = 'AUTO'
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
super(ServersWithSpecificFlavorTestJSON, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(ServersWithSpecificFlavorTestJSON, cls).setup_clients()
cls.flavor_client = cls.os_adm.flavors_client
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
cls.set_validation_resources()
super(ServersWithSpecificFlavorTestJSON, cls).resource_setup()
@test.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
@testtools.skipUnless(CONF.validation.run_validation,
'Instance validation tests are disabled.')
def test_verify_created_server_ephemeral_disk(self):
# Verify that the ephemeral disk is created when creating server
flavor_base = self.flavors_client.show_flavor(
self.flavor_ref)['flavor']
def create_flavor_with_extra_specs():
flavor_with_eph_disk_name = data_utils.rand_name('eph_flavor')
flavor_with_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
# Create a flavor with extra specs
flavor = (self.flavor_client.
create_flavor(name=flavor_with_eph_disk_name,
ram=ram, vcpus=vcpus, disk=disk,
id=flavor_with_eph_disk_id,
ephemeral=1))['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def create_flavor_without_extra_specs():
flavor_no_eph_disk_name = data_utils.rand_name('no_eph_flavor')
flavor_no_eph_disk_id = data_utils.rand_int_id(start=1000)
ram = flavor_base['ram']
vcpus = flavor_base['vcpus']
disk = flavor_base['disk']
# Create a flavor without extra specs
flavor = (self.flavor_client.
create_flavor(name=flavor_no_eph_disk_name,
ram=ram, vcpus=vcpus, disk=disk,
id=flavor_no_eph_disk_id))['flavor']
self.addCleanup(flavor_clean_up, flavor['id'])
return flavor['id']
def flavor_clean_up(flavor_id):
self.flavor_client.delete_flavor(flavor_id)
self.flavor_client.wait_for_resource_deletion(flavor_id)
flavor_with_eph_disk_id = create_flavor_with_extra_specs()
flavor_no_eph_disk_id = create_flavor_without_extra_specs()
admin_pass = self.image_ssh_password
server_no_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_no_eph_disk_id)
# Get partition number of server without extra specs.
server_no_eph_disk = self.client.show_server(
server_no_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_no_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'])
partition_num = len(linux_client.get_partitions().split('\n'))
# Explicit server deletion necessary for Juno compatibility
self.client.delete_server(server_no_eph_disk['id'])
server_with_eph_disk = self.create_test_server(
validatable=True,
wait_until='ACTIVE',
adminPass=admin_pass,
flavor=flavor_with_eph_disk_id)
server_with_eph_disk = self.client.show_server(
server_with_eph_disk['id'])['server']
linux_client = remote_client.RemoteClient(
self.get_server_ip(server_with_eph_disk),
self.ssh_user,
admin_pass,
self.validation_resources['keypair']['private_key'])
partition_num_emph = len(linux_client.get_partitions().split('\n'))
self.assertEqual(partition_num + 1, partition_num_emph)
class ServersTestManualDisk(ServersTestJSON):
disk_config = 'MANUAL'
@classmethod
def skip_checks(cls):
super(ServersTestManualDisk, cls).skip_checks()
if not CONF.compute_feature_enabled.disk_config:
msg = "DiskConfig extension not enabled."
raise cls.skipException(msg)
| apache-2.0 |
hellodata/hellodate | 2/site-packages/django/contrib/formtools/tests/wizard/wizardtests/forms.py | 103 | 2165 | import os
import tempfile
from django import forms
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
from django.forms.formsets import formset_factory
from django.forms.models import modelformset_factory
from django.http import HttpResponse
from django.template import Template, Context
from django.contrib.formtools.wizard.views import WizardView
temp_storage_location = tempfile.mkdtemp(dir=os.environ.get('DJANGO_TEST_TEMP_DIR'))
temp_storage = FileSystemStorage(location=temp_storage_location)
class Page1(forms.Form):
name = forms.CharField(max_length=100)
user = forms.ModelChoiceField(queryset=User.objects.all())
thirsty = forms.NullBooleanField()
class Page2(forms.Form):
address1 = forms.CharField(max_length=100)
address2 = forms.CharField(max_length=100)
file1 = forms.FileField()
class Page3(forms.Form):
random_crap = forms.CharField(max_length=100)
Page4 = formset_factory(Page3, extra=2)
class ContactWizard(WizardView):
file_storage = temp_storage
def done(self, form_list, **kwargs):
c = Context({
'form_list': [x.cleaned_data for x in form_list],
'all_cleaned_data': self.get_all_cleaned_data(),
})
for form in self.form_list.keys():
c[form] = self.get_cleaned_data_for_step(form)
c['this_will_fail'] = self.get_cleaned_data_for_step('this_will_fail')
return HttpResponse(Template('').render(c))
def get_context_data(self, form, **kwargs):
context = super(ContactWizard, self).get_context_data(form, **kwargs)
if self.storage.current_step == 'form2':
context.update({'another_var': True})
return context
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('username', 'email')
UserFormSet = modelformset_factory(User, form=UserForm)
class SessionContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
class CookieContactWizard(ContactWizard):
storage_name = 'django.contrib.formtools.wizard.storage.cookie.CookieStorage'
| lgpl-3.0 |
cboling/xos | xos/core/models/instance.py | 2 | 9964 | import os
from django.db import models
from django.db.models import Q
from django.core import exceptions
from core.models import PlCoreBase,PlCoreBaseManager,PlCoreBaseDeletionManager
from core.models.plcorebase import StrippedCharField
from core.models import Image
from core.models import Slice, SlicePrivilege
from core.models import Node
from core.models import Site
from core.models import Deployment
from core.models import Controller
from core.models import User
from core.models import Tag
from core.models import Flavor
from django.contrib.contenttypes import generic
from xos.config import Config
from django.core.exceptions import PermissionDenied, ValidationError
config = Config()
def get_default_flavor(controller = None):
# Find a default flavor that can be used for a instance. This is particularly
# useful in evolution. It's also intended this helper function can be used
# for admin.py when users
if controller:
flavors = controller.flavors.all()
else:
flavors = Flavor.objects.all()
if not flavors:
return None
for flavor in flavors:
if flavor.default:
return flavor
return flavors[0]
class InstanceDeletionManager(PlCoreBaseDeletionManager):
def get_queryset(self):
parent=super(InstanceDeletionManager, self)
try:
backend_type = config.observer_backend_type
except AttributeError:
backend_type = None
parent_queryset = parent.get_queryset() if hasattr(parent, "get_queryset") else parent.get_query_set()
if (backend_type):
return parent_queryset.filter(Q(node__controller__backend_type=backend_type))
else:
return parent_queryset
# deprecated in django 1.7 in favor of get_queryset().
def get_query_set(self):
return self.get_queryset()
class InstanceManager(PlCoreBaseManager):
def get_queryset(self):
parent=super(InstanceManager, self)
try:
backend_type = config.observer_backend_type
except AttributeError:
backend_type = None
parent_queryset = parent.get_queryset() if hasattr(parent, "get_queryset") else parent.get_query_set()
if backend_type:
return parent_queryset.filter(Q(node__controller__backend_type=backend_type))
else:
return parent_queryset
# deprecated in django 1.7 in favor of get_queryset().
def get_query_set(self):
return self.get_queryset()
# Create your models here.
class Instance(PlCoreBase):
ISOLATION_CHOICES = (('vm', 'Virtual Machine'), ('container', 'Container'), ('container_vm', 'Container In VM'))
objects = InstanceManager()
deleted_objects = InstanceDeletionManager()
instance_id = StrippedCharField(null=True, blank=True, max_length=200, help_text="Nova instance id")
instance_uuid = StrippedCharField(null=True, blank=True, max_length=200, help_text="Nova instance uuid")
name = StrippedCharField(max_length=200, help_text="Instance name")
instance_name = StrippedCharField(blank=True, null=True, max_length=200, help_text="OpenStack generated name")
ip = models.GenericIPAddressField(help_text="Instance ip address", blank=True, null=True)
image = models.ForeignKey(Image, related_name='instances')
creator = models.ForeignKey(User, related_name='instances', blank=True, null=True)
slice = models.ForeignKey(Slice, related_name='instances')
deployment = models.ForeignKey(Deployment, verbose_name='deployment', related_name='instance_deployment')
node = models.ForeignKey(Node, related_name='instances')
numberCores = models.IntegerField(verbose_name="Number of Cores", help_text="Number of cores for instance", default=0)
flavor = models.ForeignKey(Flavor, help_text="Flavor of this instance", default=get_default_flavor)
tags = generic.GenericRelation(Tag)
userData = models.TextField(blank=True, null=True, help_text="user_data passed to instance during creation")
isolation = models.CharField(null=False, blank=False, max_length=30, choices=ISOLATION_CHOICES, default="vm")
volumes = models.TextField(null=True, blank=True, help_text="Comma-separated list of directories to expose to parent context")
parent = models.ForeignKey("Instance", null=True, blank=True, help_text="Parent Instance for containers nested inside of VMs")
def get_controller (self):
return self.node.site_deployment.controller
def tologdict(self):
d=super(Instance,self).tologdict()
try:
d['slice_name']=self.slice.name
d['controller_name']=self.get_controller().name
except:
pass
return d
def __unicode__(self):
if self.name and Slice.objects.filter(id=self.slice_id) and (self.name != self.slice.name):
# NOTE: The weird check on self.slice_id was due to a problem when
# deleting the slice before the instance.
return u'%s' % self.name
elif self.instance_name:
return u'%s' % (self.instance_name)
elif self.id:
return u'uninstantiated-%s' % str(self.id)
elif self.slice:
return u'unsaved-instance on %s' % self.slice.name
else:
return u'unsaved-instance'
def save(self, *args, **kwds):
if not self.name:
self.name = self.slice.name
if not self.creator and hasattr(self, 'caller'):
self.creator = self.caller
if not self.creator:
raise ValidationError('instance has no creator')
if (self.isolation == "container") or (self.isolation == "container_vm"):
if (self.image.kind != "container"):
raise ValidationError("Container instance must use container image")
elif (self.isolation == "vm"):
if (self.image.kind != "vm"):
raise ValidationError("VM instance must use VM image")
if (self.isolation == "container_vm") and (not self.parent):
raise ValidationError("Container-vm instance must have a parent")
if (self.parent) and (self.isolation != "container_vm"):
raise ValidationError("Parent field can only be set on Container-vm instances")
if (self.slice.creator != self.creator):
# Check to make sure there's a slice_privilege for the user. If there
# isn't, then keystone will throw an exception inside the observer.
slice_privs = SlicePrivilege.objects.filter(slice=self.slice, user=self.creator)
if not slice_privs:
raise ValidationError('instance creator has no privileges on slice')
# XXX smbaker - disabled for now, was causing fault in tenant view create slice
# if not self.controllerNetwork.test_acl(slice=self.slice):
# raise exceptions.ValidationError("Deployment %s's ACL does not allow any of this slice %s's users" % (self.controllerNetwork.name, self.slice.name))
super(Instance, self).save(*args, **kwds)
def can_update(self, user):
return user.can_update_slice(self.slice)
def all_ips(self):
ips={}
for ns in self.ports.all():
if ns.ip:
ips[ns.network.name] = ns.ip
return ips
def all_ips_string(self):
result = []
ips = self.all_ips()
for key in sorted(ips.keys()):
#result.append("%s = %s" % (key, ips[key]))
result.append(ips[key])
return ", ".join(result)
all_ips_string.short_description = "addresses"
def get_public_ip(self):
for ns in self.ports.all():
if (ns.ip) and (ns.network.template.visibility=="public") and (ns.network.template.translation=="none"):
return ns.ip
return None
# return an address on nat-net
def get_network_ip(self, pattern):
for ns in self.ports.all():
if pattern in ns.network.name.lower():
return ns.ip
return None
# return an address that the synchronizer can use to SSH to the instance
def get_ssh_ip(self):
management=self.get_network_ip("management")
if management:
return management
return self.get_network_ip("nat")
@staticmethod
def select_by_user(user):
if user.is_admin:
qs = Instance.objects.all()
else:
slices = Slice.select_by_user(user)
qs = Instance.objects.filter(slice__in=slices)
return qs
def get_cpu_stats(self):
filter = 'instance_id=%s'%self.instance_id
return monitor.get_meter('cpu',filter,None)
def get_bw_stats(self):
filter = 'instance_id=%s'%self.instance_id
return monitor.get_meter('network.outgoing.bytes',filter,None)
def get_node_stats(self):
# Note sure what should go back here
return 1
def get_ssh_command(self):
if (not self.instance_id) or (not self.node) or (not self.instance_name):
return None
else:
return 'ssh -o "ProxyCommand ssh -q %s@%s" ubuntu@%s' % (self.instance_id, self.node.name, self.instance_name)
def get_public_keys(self):
slice_memberships = SlicePrivilege.objects.filter(slice=self.slice)
pubkeys = set([sm.user.public_key for sm in slice_memberships if sm.user.public_key])
if self.creator.public_key:
pubkeys.add(self.creator.public_key)
if self.slice.creator.public_key:
pubkeys.add(self.slice.creator.public_key)
if self.slice.service and self.slice.service.public_key:
pubkeys.add(self.slice.service.public_key)
return pubkeys
def controller_setter(instance, **kwargs):
try:
instance.controller = instance.node.site_deployment.controller
except:
instance.controller = None
models.signals.post_init.connect(controller_setter, Instance)
| apache-2.0 |
kronoscode/Booktype | lib/booki/editor/management/commands/bookexport.py | 3 | 2740 | # This file is part of Booktype.
# Copyright (c) 2012 Aleksandar Erkalovic <aleksandar.erkalovic@sourcefabric.org>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from booki.editor import common, models
import shutil
class Command(BaseCommand):
help = "Export book content as a ZIP file. For now, only the content of one book version will be exported and you will not get the history data."
args = "<book name>"
option_list = BaseCommand.option_list + (
make_option('--book-version',
action='store',
dest='book_version',
default=None,
help='Book version, e.g. "1.0".'),
make_option('--output',
action='store',
dest='output_name',
default=None,
help='Output filename or -- for STDOUT, e.g. "my_book.zip".'),
)
requires_model_validation = False
def handle(self, *args, **options):
if len(args) == 0:
raise CommandError("You must specify book name!")
try:
book = models.Book.objects.get(url_title__iexact=args[0])
except models.Book.DoesNotExist:
raise CommandError('Book "%s" does not exist.' % args[0])
book_version = book.getVersion(options['book_version'])
if not book_version:
raise CommandError('Book version %s does not exist.' % options['book_version'])
fileName = common.exportBook(book_version)
exportFileName = None
if options['output_name']:
if options['output_name'] == '--':
print open(fileName,'rb').read(),
import os
os.unlink(fileName)
return
else:
exportFileName = options['output_name']
else:
exportFileName = 'export-%s.zip' % book.url_title
shutil.move(fileName, exportFileName)
if options['verbosity'] in ['1', '2']:
print 'Book successfully exported into "%s" file.' % exportFileName
| agpl-3.0 |
endlessm/chromium-browser | third_party/Python-Markdown/markdown/__init__.py | 63 | 20784 | """
Python Markdown
===============
Python Markdown converts Markdown to HTML and can be used as a library or
called from the command line.
## Basic usage as a module:
import markdown
html = markdown.markdown(your_text_string)
See <https://pythonhosted.org/Markdown/> for more
information and instructions on how to extend the functionality of
Python Markdown. Read that before you try modifying this file.
## Authors and License
Started by [Manfred Stienstra](http://www.dwerg.net/). Continued and
maintained by [Yuri Takhteyev](http://www.freewisdom.org), [Waylan
Limberg](http://achinghead.com/) and [Artem Yunusov](http://blog.splyer.com).
Contact: markdown@freewisdom.org
Copyright 2007-2013 The Python Markdown Project (v. 1.7 and later)
Copyright 200? Django Software Foundation (OrderedDict implementation)
Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
Copyright 2004 Manfred Stienstra (the original version)
License: BSD (see LICENSE for details).
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from .__version__ import version, version_info # noqa
import codecs
import sys
import logging
import warnings
import importlib
from . import util
from .preprocessors import build_preprocessors
from .blockprocessors import build_block_parser
from .treeprocessors import build_treeprocessors
from .inlinepatterns import build_inlinepatterns
from .postprocessors import build_postprocessors
from .extensions import Extension
from .serializers import to_html_string, to_xhtml_string
__all__ = ['Markdown', 'markdown', 'markdownFromFile']
logger = logging.getLogger('MARKDOWN')
class Markdown(object):
"""Convert Markdown to HTML."""
doc_tag = "div" # Element used to wrap document - later removed
option_defaults = {
'html_replacement_text': '[HTML_REMOVED]',
'tab_length': 4,
'enable_attributes': True,
'smart_emphasis': True,
'lazy_ol': True,
}
output_formats = {
'html': to_html_string,
'html4': to_html_string,
'html5': to_html_string,
'xhtml': to_xhtml_string,
'xhtml1': to_xhtml_string,
'xhtml5': to_xhtml_string,
}
ESCAPED_CHARS = ['\\', '`', '*', '_', '{', '}', '[', ']',
'(', ')', '>', '#', '+', '-', '.', '!']
def __init__(self, *args, **kwargs):
"""
Creates a new Markdown instance.
Keyword arguments:
* extensions: A list of extensions.
If they are of type string, the module mdx_name.py will be loaded.
If they are a subclass of markdown.Extension, they will be used
as-is.
* extension_configs: Configuration settings for extensions.
* output_format: Format of output. Supported formats are:
* "xhtml1": Outputs XHTML 1.x. Default.
* "xhtml5": Outputs XHTML style tags of HTML 5
* "xhtml": Outputs latest supported version of XHTML
(currently XHTML 1.1).
* "html4": Outputs HTML 4
* "html5": Outputs HTML style tags of HTML 5
* "html": Outputs latest supported version of HTML
(currently HTML 4).
Note that it is suggested that the more specific formats ("xhtml1"
and "html4") be used as "xhtml" or "html" may change in the future
if it makes sense at that time.
* safe_mode: Deprecated! Disallow raw html. One of "remove", "replace"
or "escape".
* html_replacement_text: Deprecated! Text used when safe_mode is set
to "replace".
* tab_length: Length of tabs in the source. Default: 4
* enable_attributes: Enable the conversion of attributes. Default: True
* smart_emphasis: Treat `_connected_words_` intelligently Default: True
* lazy_ol: Ignore number of first item of ordered lists. Default: True
"""
# For backward compatibility, loop through old positional args
pos = ['extensions', 'extension_configs', 'safe_mode', 'output_format']
for c, arg in enumerate(args):
if pos[c] not in kwargs:
kwargs[pos[c]] = arg
if c+1 == len(pos): # pragma: no cover
# ignore any additional args
break
if len(args):
warnings.warn('Positional arguments are deprecated in Markdown. '
'Use keyword arguments only.',
DeprecationWarning)
# Loop through kwargs and assign defaults
for option, default in self.option_defaults.items():
setattr(self, option, kwargs.get(option, default))
self.safeMode = kwargs.get('safe_mode', False)
if self.safeMode and 'enable_attributes' not in kwargs:
# Disable attributes in safeMode when not explicitly set
self.enable_attributes = False
if 'safe_mode' in kwargs:
warnings.warn('"safe_mode" is deprecated in Python-Markdown. '
'Use an HTML sanitizer (like '
'Bleach http://bleach.readthedocs.org/) '
'if you are parsing untrusted markdown text. '
'See the 2.6 release notes for more info',
DeprecationWarning)
if 'html_replacement_text' in kwargs:
warnings.warn('The "html_replacement_text" keyword is '
'deprecated along with "safe_mode".',
DeprecationWarning)
self.registeredExtensions = []
self.docType = ""
self.stripTopLevelTags = True
self.build_parser()
self.references = {}
self.htmlStash = util.HtmlStash()
self.registerExtensions(extensions=kwargs.get('extensions', []),
configs=kwargs.get('extension_configs', {}))
self.set_output_format(kwargs.get('output_format', 'xhtml1'))
self.reset()
def build_parser(self):
""" Build the parser from the various parts. """
self.preprocessors = build_preprocessors(self)
self.parser = build_block_parser(self)
self.inlinePatterns = build_inlinepatterns(self)
self.treeprocessors = build_treeprocessors(self)
self.postprocessors = build_postprocessors(self)
return self
def registerExtensions(self, extensions, configs):
"""
Register extensions with this instance of Markdown.
Keyword arguments:
* extensions: A list of extensions, which can either
be strings or objects. See the docstring on Markdown.
* configs: A dictionary mapping module names to config options.
"""
for ext in extensions:
if isinstance(ext, util.string_type):
ext = self.build_extension(ext, configs.get(ext, {}))
if isinstance(ext, Extension):
ext.extendMarkdown(self, globals())
logger.debug(
'Successfully loaded extension "%s.%s".'
% (ext.__class__.__module__, ext.__class__.__name__)
)
elif ext is not None:
raise TypeError(
'Extension "%s.%s" must be of type: "markdown.Extension"'
% (ext.__class__.__module__, ext.__class__.__name__))
return self
def build_extension(self, ext_name, configs):
"""Build extension by name, then return the module.
The extension name may contain arguments as part of the string in the
following format: "extname(key1=value1,key2=value2)"
"""
configs = dict(configs)
# Parse extensions config params (ignore the order)
pos = ext_name.find("(") # find the first "("
if pos > 0:
ext_args = ext_name[pos+1:-1]
ext_name = ext_name[:pos]
pairs = [x.split("=") for x in ext_args.split(",")]
configs.update([(x.strip(), y.strip()) for (x, y) in pairs])
warnings.warn('Setting configs in the Named Extension string is '
'deprecated. It is recommended that you '
'pass an instance of the extension class to '
'Markdown or use the "extension_configs" keyword. '
'The current behavior will raise an error in version 2.7. '
'See the Release Notes for Python-Markdown version '
'2.6 for more info.', DeprecationWarning)
# Get class name (if provided): `path.to.module:ClassName`
ext_name, class_name = ext_name.split(':', 1) \
if ':' in ext_name else (ext_name, '')
# Try loading the extension first from one place, then another
try:
# Assume string uses dot syntax (`path.to.some.module`)
module = importlib.import_module(ext_name)
logger.debug(
'Successfuly imported extension module "%s".' % ext_name
)
# For backward compat (until deprecation)
# check that this is an extension.
if ('.' not in ext_name and not (hasattr(module, 'makeExtension') or
(class_name and hasattr(module, class_name)))):
# We have a name conflict
# eg: extensions=['tables'] and PyTables is installed
raise ImportError
except ImportError:
# Preppend `markdown.extensions.` to name
module_name = '.'.join(['markdown.extensions', ext_name])
try:
module = importlib.import_module(module_name)
logger.debug(
'Successfuly imported extension module "%s".' %
module_name
)
warnings.warn('Using short names for Markdown\'s builtin '
'extensions is deprecated. Use the '
'full path to the extension with Python\'s dot '
'notation (eg: "%s" instead of "%s"). The '
'current behavior will raise an error in version '
'2.7. See the Release Notes for '
'Python-Markdown version 2.6 for more info.' %
(module_name, ext_name),
DeprecationWarning)
except ImportError:
# Preppend `mdx_` to name
module_name_old_style = '_'.join(['mdx', ext_name])
try:
module = importlib.import_module(module_name_old_style)
logger.debug(
'Successfuly imported extension module "%s".' %
module_name_old_style)
warnings.warn('Markdown\'s behavior of prepending "mdx_" '
'to an extension name is deprecated. '
'Use the full path to the '
'extension with Python\'s dot notation '
'(eg: "%s" instead of "%s"). The current '
'behavior will raise an error in version 2.7. '
'See the Release Notes for Python-Markdown '
'version 2.6 for more info.' %
(module_name_old_style, ext_name),
DeprecationWarning)
except ImportError as e:
message = "Failed loading extension '%s' from '%s', '%s' " \
"or '%s'" % (ext_name, ext_name, module_name,
module_name_old_style)
e.args = (message,) + e.args[1:]
raise
if class_name:
# Load given class name from module.
return getattr(module, class_name)(**configs)
else:
# Expect makeExtension() function to return a class.
try:
return module.makeExtension(**configs)
except AttributeError as e:
message = e.args[0]
message = "Failed to initiate extension " \
"'%s': %s" % (ext_name, message)
e.args = (message,) + e.args[1:]
raise
def registerExtension(self, extension):
""" This gets called by the extension """
self.registeredExtensions.append(extension)
return self
def reset(self):
"""
Resets all state variables so that we can start with a new text.
"""
self.htmlStash.reset()
self.references.clear()
for extension in self.registeredExtensions:
if hasattr(extension, 'reset'):
extension.reset()
return self
def set_output_format(self, format):
""" Set the output format for the class instance. """
self.output_format = format.lower()
try:
self.serializer = self.output_formats[self.output_format]
except KeyError as e:
valid_formats = list(self.output_formats.keys())
valid_formats.sort()
message = 'Invalid Output Format: "%s". Use one of %s.' \
% (self.output_format,
'"' + '", "'.join(valid_formats) + '"')
e.args = (message,) + e.args[1:]
raise
return self
def convert(self, source):
"""
Convert markdown to serialized XHTML or HTML.
Keyword arguments:
* source: Source text as a Unicode string.
Markdown processing takes place in five steps:
1. A bunch of "preprocessors" munge the input text.
2. BlockParser() parses the high-level structural elements of the
pre-processed text into an ElementTree.
3. A bunch of "treeprocessors" are run against the ElementTree. One
such treeprocessor runs InlinePatterns against the ElementTree,
detecting inline markup.
4. Some post-processors are run against the text after the ElementTree
has been serialized into text.
5. The output is written to a string.
"""
# Fixup the source text
if not source.strip():
return '' # a blank unicode string
try:
source = util.text_type(source)
except UnicodeDecodeError as e:
# Customise error message while maintaining original trackback
e.reason += '. -- Note: Markdown only accepts unicode input!'
raise
# Split into lines and run the line preprocessors.
self.lines = source.split("\n")
for prep in self.preprocessors.values():
self.lines = prep.run(self.lines)
# Parse the high-level elements.
root = self.parser.parseDocument(self.lines).getroot()
# Run the tree-processors
for treeprocessor in self.treeprocessors.values():
newRoot = treeprocessor.run(root)
if newRoot is not None:
root = newRoot
# Serialize _properly_. Strip top-level tags.
output = self.serializer(root)
if self.stripTopLevelTags:
try:
start = output.index(
'<%s>' % self.doc_tag) + len(self.doc_tag) + 2
end = output.rindex('</%s>' % self.doc_tag)
output = output[start:end].strip()
except ValueError: # pragma: no cover
if output.strip().endswith('<%s />' % self.doc_tag):
# We have an empty document
output = ''
else:
# We have a serious problem
raise ValueError('Markdown failed to strip top-level '
'tags. Document=%r' % output.strip())
# Run the text post-processors
for pp in self.postprocessors.values():
output = pp.run(output)
return output.strip()
def convertFile(self, input=None, output=None, encoding=None):
"""Converts a Markdown file and returns the HTML as a Unicode string.
Decodes the file using the provided encoding (defaults to utf-8),
passes the file content to markdown, and outputs the html to either
the provided stream or the file with provided name, using the same
encoding as the source file. The 'xmlcharrefreplace' error handler is
used when encoding the output.
**Note:** This is the only place that decoding and encoding of Unicode
takes place in Python-Markdown. (All other code is Unicode-in /
Unicode-out.)
Keyword arguments:
* input: File object or path. Reads from stdin if `None`.
* output: File object or path. Writes to stdout if `None`.
* encoding: Encoding of input and output files. Defaults to utf-8.
"""
encoding = encoding or "utf-8"
# Read the source
if input:
if isinstance(input, util.string_type):
input_file = codecs.open(input, mode="r", encoding=encoding)
else:
input_file = codecs.getreader(encoding)(input)
text = input_file.read()
input_file.close()
else:
text = sys.stdin.read()
if not isinstance(text, util.text_type):
text = text.decode(encoding)
text = text.lstrip('\ufeff') # remove the byte-order mark
# Convert
html = self.convert(text)
# Write to file or stdout
if output:
if isinstance(output, util.string_type):
output_file = codecs.open(output, "w",
encoding=encoding,
errors="xmlcharrefreplace")
output_file.write(html)
output_file.close()
else:
writer = codecs.getwriter(encoding)
output_file = writer(output, errors="xmlcharrefreplace")
output_file.write(html)
# Don't close here. User may want to write more.
else:
# Encode manually and write bytes to stdout.
html = html.encode(encoding, "xmlcharrefreplace")
try:
# Write bytes directly to buffer (Python 3).
sys.stdout.buffer.write(html)
except AttributeError:
# Probably Python 2, which works with bytes by default.
sys.stdout.write(html)
return self
"""
EXPORTED FUNCTIONS
=============================================================================
Those are the two functions we really mean to export: markdown() and
markdownFromFile().
"""
def markdown(text, *args, **kwargs):
"""Convert a Markdown string to HTML and return HTML as a Unicode string.
This is a shortcut function for `Markdown` class to cover the most
basic use case. It initializes an instance of Markdown, loads the
necessary extensions and runs the parser on the given text.
Keyword arguments:
* text: Markdown formatted text as Unicode or ASCII string.
* Any arguments accepted by the Markdown class.
Returns: An HTML document as a string.
"""
md = Markdown(*args, **kwargs)
return md.convert(text)
def markdownFromFile(*args, **kwargs):
"""Read markdown code from a file and write it to a file or a stream.
This is a shortcut function which initializes an instance of Markdown,
and calls the convertFile method rather than convert.
Keyword arguments:
* input: a file name or readable object.
* output: a file name or writable object.
* encoding: Encoding of input and output.
* Any arguments accepted by the Markdown class.
"""
# For backward compatibility loop through positional args
pos = ['input', 'output', 'extensions', 'encoding']
c = 0
for arg in args:
if pos[c] not in kwargs:
kwargs[pos[c]] = arg
c += 1
if c == len(pos):
break
if len(args):
warnings.warn('Positional arguments are depreacted in '
'Markdown and will raise an error in version 2.7. '
'Use keyword arguments only.',
DeprecationWarning)
md = Markdown(**kwargs)
md.convertFile(kwargs.get('input', None),
kwargs.get('output', None),
kwargs.get('encoding', None))
| bsd-3-clause |
ged-lab/khmer | sandbox/count-kmers-single.py | 2 | 4762 | #! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
# pylint: disable=missing-docstring,invalid-name
"""
Produce k-mer counts for all the k-mers in the given sequence file,
using the given countgraph.
% python sandbox/count-kmers-single.py <fasta/fastq>
Use '-h' for parameter help.
"""
import sys
import khmer
import argparse
import screed
import csv
from khmer.khmer_args import (build_counting_args, report_on_config, info,
add_threading_args)
from khmer.kfile import (check_input_files, check_space,
check_space_for_graph)
import threading
def get_parser():
parser = build_counting_args(
descr="Output abundances of the k-mers in the sequence file.")
add_threading_args(parser)
parser.add_argument('input_sequence_filename', help='The input'
' FAST[AQ] sequence file.')
parser.add_argument('-o', '--out', metavar="output_file",
dest='output_file',
type=argparse.FileType('w'),
default=None, help='output counts to this file')
return parser
def main():
info('count-kmers-single.py', ['counting'])
args = get_parser().parse_args()
check_input_files(args.input_sequence_filename, False)
print ('making k-mer countgraph', file=sys.stderr)
countgraph = khmer.Countgraph(args.ksize, args.max_tablesize,
args.n_tables)
# @CTB countgraph.set_use_bigcount(args.bigcount)
kmer_size = countgraph.ksize()
hashsizes = countgraph.hashsizes()
tracking = khmer.Nodegraph( # pylint: disable=protected-access
kmer_size, 1, 1, primes=hashsizes)
print ('kmer_size: %s' % countgraph.ksize(), file=sys.stderr)
print ('k-mer countgraph sizes: %s' % (countgraph.hashsizes(),),
file=sys.stderr)
if args.output_file is None:
args.output_file = sys.stdout
writer = csv.writer(args.output_file)
# start loading
rparser = khmer.ReadParser(args.input_sequence_filename)
threads = []
print ('consuming input, round 1 -- %s' % (args.input_sequence_filename),
file=sys.stderr)
for _ in range(args.threads):
thread = \
threading.Thread(
target=countgraph.consume_seqfile,
args=(rparser, )
)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
for record in screed.open(args.input_sequence_filename):
seq = record.sequence.replace('N', 'A')
for i in range(len(seq) - kmer_size + 1):
kmer = seq[i:i+kmer_size]
if not tracking.get(kmer):
tracking.count(kmer)
writer.writerow([kmer, str(countgraph.get(kmer))])
print ('Total number of unique k-mers: {0}'.format(
countgraph.n_unique_kmers()), file=sys.stderr)
if __name__ == '__main__':
main()
# vim: set filetype=python tabstop=4 softtabstop=4 shiftwidth=4 expandtab:
# vim: set textwidth=79:
| bsd-3-clause |
rht/zulip | zerver/lib/response.py | 2 | 2481 | from django.http import HttpResponse, HttpResponseNotAllowed
import ujson
from typing import Optional, Any, Dict, List
from zerver.lib.exceptions import JsonableError
class HttpResponseUnauthorized(HttpResponse):
status_code = 401
def __init__(self, realm: str, www_authenticate: Optional[str]=None) -> None:
HttpResponse.__init__(self)
if www_authenticate is None:
self["WWW-Authenticate"] = 'Basic realm="%s"' % (realm,)
elif www_authenticate == "session":
self["WWW-Authenticate"] = 'Session realm="%s"' % (realm,)
else:
raise AssertionError("Invalid www_authenticate value!")
def json_unauthorized(message: str, www_authenticate: Optional[str]=None) -> HttpResponse:
resp = HttpResponseUnauthorized("zulip", www_authenticate=www_authenticate)
resp.content = (ujson.dumps({"result": "error",
"msg": message}) + "\n").encode()
return resp
def json_method_not_allowed(methods: List[str]) -> HttpResponseNotAllowed:
resp = HttpResponseNotAllowed(methods)
resp.content = ujson.dumps({"result": "error",
"msg": "Method Not Allowed",
"allowed_methods": methods}).encode()
return resp
def json_response(res_type: str="success",
msg: str="",
data: Optional[Dict[str, Any]]=None,
status: int=200) -> HttpResponse:
content = {"result": res_type, "msg": msg}
if data is not None:
content.update(data)
return HttpResponse(content=ujson.dumps(content) + "\n",
content_type='application/json', status=status)
def json_success(data: Optional[Dict[str, Any]]=None) -> HttpResponse:
return json_response(data=data)
def json_response_from_error(exception: JsonableError) -> HttpResponse:
'''
This should only be needed in middleware; in app code, just raise.
When app code raises a JsonableError, the JsonErrorHandler
middleware takes care of transforming it into a response by
calling this function.
'''
return json_response('error',
msg=exception.msg,
data=exception.data,
status=exception.http_status_code)
def json_error(msg: str, data: Optional[Dict[str, Any]]=None, status: int=400) -> HttpResponse:
return json_response(res_type="error", msg=msg, data=data, status=status)
| apache-2.0 |
kislyuk/domovoi | domovoi/app.py | 1 | 11227 | from __future__ import absolute_import, division, print_function, unicode_literals
import json, gzip, base64, logging
from chalice.app import Chalice, LambdaFunction, DecoratorAPI as ChaliceDecoratorAPI
class DomovoiException(Exception):
pass
class ARN:
fields = "arn partition service region account_id resource".split()
def __init__(self, arn="arn:aws::::", **kwargs):
self.__dict__.update(dict(zip(self.fields, arn.split(":", 5)), **kwargs))
def __str__(self):
return ":".join(getattr(self, field) for field in self.fields)
class StateMachine:
def __init__(self, app, client=None):
self.app = app
self._client = client
@property
def stepfunctions(self):
if self._client is None:
import boto3
self._client = boto3.client("stepfunctions")
return self._client
def start_execution(self, **input):
return self.start_named_execution(None, **input)
def start_named_execution(self, name, **input):
lambda_arn = ARN(self.app.lambda_context.invoked_function_arn)
lambda_name = lambda_arn.resource.split(":")[1]
state_machine_arn = ARN(str(lambda_arn), service="states", resource="stateMachine:" + lambda_name)
start_execution_args = dict(stateMachineArn=str(state_machine_arn), input=json.dumps(input))
if name is not None:
start_execution_args.update(name=name)
return self.stepfunctions.start_execution(**start_execution_args)
class Domovoi(Chalice):
cloudwatch_events_rules = {}
sns_subscribers = {}
sqs_subscribers = {}
s3_subscribers = {}
sfn_tasks = {}
cwl_sub_filters = {}
dynamodb_event_sources = {}
alb_targets = {}
sqs_default_queue_attributes = {"VisibilityTimeout": "320"}
def unsupported_decorator(*args, **kwargs):
raise NotImplementedError("Domovoi does not support this Chalice decorator")
def __init__(self, app_name="Domovoi", configure_logs=True):
Chalice.__init__(self, app_name=app_name, configure_logs=configure_logs)
self.pure_lambda_functions = [LambdaFunction(self, name=app_name, handler_string="app.app")]
for f in dir(ChaliceDecoratorAPI):
if callable(getattr(ChaliceDecoratorAPI, f)) and not f.startswith("_"):
setattr(self, f, Domovoi.unsupported_decorator)
def _configure_log_level(self):
if self._debug:
level = logging.DEBUG
else:
level = logging.INFO
self.log.setLevel(level)
def alb_target(self, prefix=""):
def register_alb_target(func):
self.alb_targets[prefix] = dict(func=func, prefix=prefix)
return func
return register_alb_target
def scheduled_function(self, schedule, rule_name=None):
return self.cloudwatch_rule(schedule_expression=schedule, event_pattern=None, rule_name=rule_name)
def sns_topic_subscriber(self, topic_name):
def register_sns_subscriber(func):
self.sns_subscribers[topic_name] = func
return func
return register_sns_subscriber
def sqs_queue_subscriber(self, queue_name, batch_size=None, queue_attributes=None):
def register_sqs_subscriber(func):
self.sqs_subscribers[queue_name] = dict(func=func, batch_size=batch_size, queue_attributes=queue_attributes)
return func
return register_sqs_subscriber
def dynamodb_stream_handler(self, table_name, batch_size=None):
def register_dynamodb_event_source(func):
self.dynamodb_event_sources[table_name] = dict(batch_size=batch_size, func=func)
return func
return register_dynamodb_event_source
def kinesis_stream_handler(self, **kwargs):
raise NotImplementedError()
def email_receipt_handler(self):
# http://boto3.readthedocs.io/en/latest/reference/services/ses.html#SES.Client.create_receipt_rule
raise NotImplementedError()
def cloudwatch_logs_sub_filter_handler(self, log_group_name, filter_pattern):
def register_cwl_subscription_filter(func):
self.cwl_sub_filters[log_group_name] = dict(log_group_name=log_group_name, filter_pattern=filter_pattern,
func=func)
return func
return register_cwl_subscription_filter
def cloudwatch_event_handler(self, **kwargs):
return self.cloudwatch_rule(schedule_expression=None, event_pattern=kwargs)
def s3_event_handler(self, bucket, events, prefix=None, suffix=None, use_sns=True, use_sqs=False, sqs_batch_size=1,
sqs_queue_attributes=None):
def register_s3_subscriber(func):
self.s3_subscribers[bucket] = dict(events=events, prefix=prefix, suffix=suffix, func=func, use_sns=use_sns,
use_sqs=use_sqs, sqs_batch_size=sqs_batch_size,
sqs_queue_attributes=sqs_queue_attributes)
return func
return register_s3_subscriber
def cloudwatch_rule(self, schedule_expression, event_pattern, rule_name=None):
def register_rule(func):
_rule_name = rule_name or func.__name__
if _rule_name in self.cloudwatch_events_rules:
raise KeyError(func.__name__)
rule = dict(schedule_expression=schedule_expression, event_pattern=event_pattern, func=func)
self.cloudwatch_events_rules[_rule_name] = rule
return func
return register_rule
def step_function_task(self, state_name, state_machine_definition):
def register_sfn_task(func):
if state_name in self.sfn_tasks:
raise KeyError(state_name)
self.sfn_tasks[state_name] = dict(state_name=state_name,
state_machine_definition=state_machine_definition,
func=func)
return func
return register_sfn_task
def register_state_machine(self, state_machine_definition):
for state_name, state_data in self.get_all_states(state_machine_definition).items():
if callable(state_data.get("Resource", None)):
self.step_function_task(state_name, state_machine_definition)(state_data["Resource"])
@classmethod
def get_all_states(cls, state_machine):
states = dict(state_machine["States"])
for state_name, state_data in state_machine["States"].items():
for sub_sm in state_data.get("Branches", []):
states.update(cls.get_all_states(sub_sm))
return states
@property
def state_machine(self):
return StateMachine(app=self)
def _find_forwarded_s3_event(self, s3_event_envelope, forwarding_service):
assert forwarding_service in {"sns", "sqs"}
if forwarding_service == "sns":
assert s3_event_envelope['Records'][0]["Sns"]["Subject"] == "Amazon S3 Notification"
s3_event = json.loads(s3_event_envelope['Records'][0]["Sns"]["Message"])
elif forwarding_service == "sqs":
forwarded_event = json.loads(s3_event_envelope["Records"][0]["body"])
if forwarded_event.get("TopicArn") and forwarded_event.get("Subject") == "Amazon S3 Notification":
s3_event = json.loads(forwarded_event["Message"])
else:
s3_event = forwarded_event
assert s3_event.get("Event") == "s3:TestEvent" or s3_event['Records'][0].get("eventSource") == "aws:s3"
s3_bucket_name = s3_event.get("Bucket") or s3_event['Records'][0]["s3"]["bucket"]["name"]
handler = self.s3_subscribers[s3_bucket_name]["func"] if s3_bucket_name in self.s3_subscribers else None
return s3_event, handler
def __call__(self, event, context):
self.log.info("Domovoi dispatch of event %s", event)
self.lambda_context = context
invoked_function_arn = ARN(context.invoked_function_arn)
handler = None
if "requestContext" in event and "elb" in event["requestContext"]:
target = None
# TODO: use suffix tree to avoid O(N) scan of route table
for prefix, alb_target in self.alb_targets.items():
if event["path"].startswith(prefix):
if target is None or len(target["prefix"]) < len(alb_target["prefix"]):
target = alb_target
handler = target["func"]
if "task_name" in event:
if event["task_name"] not in self.cloudwatch_events_rules:
raise DomovoiException("Received CloudWatch event for a task with no known handler")
handler = self.cloudwatch_events_rules[event["task_name"]]["func"]
event = event["event"]
elif "Records" in event and "s3" in event["Records"][0]:
s3_bucket_name = event["Records"][0]["s3"]["bucket"]["name"]
if s3_bucket_name not in self.s3_subscribers:
raise DomovoiException("Received S3 event for a bucket with no known handler")
handler = self.s3_subscribers[s3_bucket_name]["func"]
elif "Records" in event and "Sns" in event["Records"][0]:
try:
event, handler = self._find_forwarded_s3_event(event, forwarding_service="sns")
except Exception:
sns_topic = ARN(event["Records"][0]["Sns"]["TopicArn"]).resource
if sns_topic not in self.sns_subscribers:
raise DomovoiException("Received SNS or S3-SNS event with no known handler")
handler = self.sns_subscribers[sns_topic]
elif "Records" in event and event["Records"][0].get("eventSource") == "aws:sqs":
try:
event, handler = self._find_forwarded_s3_event(event, forwarding_service="sqs")
except Exception:
queue_name = ARN(event["Records"][0]["eventSourceARN"]).resource
handler = self.sqs_subscribers[queue_name]["func"]
elif "Records" in event and "dynamodb" in event["Records"][0]:
event_source_arn = ARN(event["Records"][0]["eventSourceARN"])
table_name = event_source_arn.resource.split("/")[1]
handler = self.dynamodb_event_sources[table_name]["func"]
elif "awslogs" in event:
event = json.loads(gzip.decompress(base64.b64decode(event["awslogs"]["data"])))
handler = self.cwl_sub_filters[event["logGroup"]]["func"]
elif "domovoi-stepfunctions-task" in invoked_function_arn.resource:
_, lambda_name, lambda_alias = invoked_function_arn.resource.split(":")
assert lambda_alias.startswith("domovoi-stepfunctions-task-")
task_name = lambda_alias[len("domovoi-stepfunctions-task-"):]
context.stepfunctions_task_name = task_name
handler = self.sfn_tasks[task_name]["func"]
if handler is None:
raise DomovoiException("No handler found for event {}".format(event))
result = handler(event, context)
self.log.info("%s", result)
return result
| apache-2.0 |
TRUFIL/erpnext | erpnext/config/crm.py | 24 | 3497 | from frappe import _
def get_data():
return [
{
"label": _("Sales Pipeline"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Lead",
"description": _("Database of potential customers."),
},
{
"type": "doctype",
"name": "Opportunity",
"description": _("Potential opportunities for selling."),
},
{
"type": "doctype",
"name": "Customer",
"description": _("Customer database."),
},
{
"type": "doctype",
"name": "Contact",
"description": _("All Contacts."),
},
]
},
{
"label": _("Reports"),
"icon": "fa fa-list",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Lead Details",
"doctype": "Lead"
},
{
"type": "page",
"name": "sales-funnel",
"label": _("Sales Funnel"),
"icon": "fa fa-bar-chart",
},
{
"type": "report",
"name": "Prospects Engaged But Not Converted",
"doctype": "Lead",
"is_query_report": True
},
{
"type": "report",
"name": "Minutes to First Response for Opportunity",
"doctype": "Opportunity",
"is_query_report": True
},
{
"type": "report",
"is_query_report": True,
"name": "Customer Addresses And Contacts",
"doctype": "Contact"
},
{
"type": "report",
"is_query_report": True,
"name": "Inactive Customers",
"doctype": "Sales Order"
},
{
"type": "report",
"is_query_report": True,
"name": "Campaign Efficiency",
"doctype": "Lead"
},
{
"type": "report",
"is_query_report": True,
"name": "Lead Owner Efficiency",
"doctype": "Lead"
}
]
},
{
"label": _("Communication"),
"icon": "fa fa-star",
"items": [
{
"type": "doctype",
"name": "Communication",
"description": _("Record of all communications of type email, phone, chat, visit, etc."),
},
]
},
{
"label": _("Setup"),
"icon": "fa fa-cog",
"items": [
{
"type": "doctype",
"name": "Campaign",
"description": _("Sales campaigns."),
},
{
"type": "doctype",
"label": _("Customer Group"),
"name": "Customer Group",
"icon": "fa fa-sitemap",
"link": "Tree/Customer Group",
"description": _("Manage Customer Group Tree."),
},
{
"type": "doctype",
"label": _("Territory"),
"name": "Territory",
"icon": "fa fa-sitemap",
"link": "Tree/Territory",
"description": _("Manage Territory Tree."),
},
{
"type": "doctype",
"label": _("Sales Person"),
"name": "Sales Person",
"icon": "fa fa-sitemap",
"link": "Tree/Sales Person",
"description": _("Manage Sales Person Tree."),
},
]
},
{
"label": _("SMS"),
"icon": "fa fa-wrench",
"items": [
{
"type": "doctype",
"name": "SMS Center",
"description":_("Send mass SMS to your contacts"),
},
{
"type": "doctype",
"name": "SMS Log",
"description":_("Logs for maintaining sms delivery status"),
},
{
"type": "doctype",
"name": "SMS Settings",
"description": _("Setup SMS gateway settings")
}
]
},
{
"label": _("Help"),
"items": [
{
"type": "help",
"label": _("Lead to Quotation"),
"youtube_id": "TxYX4r4JAKA"
},
{
"type": "help",
"label": _("Newsletters"),
"youtube_id": "muLKsCrrDRo"
},
]
},
]
| gpl-3.0 |
gitchs/tornado | demos/benchmark/stack_context_benchmark.py | 99 | 2265 | #!/usr/bin/env python
"""Benchmark for stack_context functionality."""
import collections
import contextlib
import functools
import subprocess
import sys
from tornado import stack_context
class Benchmark(object):
def enter_exit(self, count):
"""Measures the overhead of the nested "with" statements
when using many contexts.
"""
if count < 0:
return
with self.make_context():
self.enter_exit(count - 1)
def call_wrapped(self, count):
"""Wraps and calls a function at each level of stack depth
to measure the overhead of the wrapped function.
"""
# This queue is analogous to IOLoop.add_callback, but lets us
# benchmark the stack_context in isolation without system call
# overhead.
queue = collections.deque()
self.call_wrapped_inner(queue, count)
while queue:
queue.popleft()()
def call_wrapped_inner(self, queue, count):
if count < 0:
return
with self.make_context():
queue.append(stack_context.wrap(
functools.partial(self.call_wrapped_inner, queue, count - 1)))
class StackBenchmark(Benchmark):
def make_context(self):
return stack_context.StackContext(self.__context)
@contextlib.contextmanager
def __context(self):
yield
class ExceptionBenchmark(Benchmark):
def make_context(self):
return stack_context.ExceptionStackContext(self.__handle_exception)
def __handle_exception(self, typ, value, tb):
pass
def main():
base_cmd = [
sys.executable, '-m', 'timeit', '-s',
'from stack_context_benchmark import StackBenchmark, ExceptionBenchmark']
cmds = [
'StackBenchmark().enter_exit(50)',
'StackBenchmark().call_wrapped(50)',
'StackBenchmark().enter_exit(500)',
'StackBenchmark().call_wrapped(500)',
'ExceptionBenchmark().enter_exit(50)',
'ExceptionBenchmark().call_wrapped(50)',
'ExceptionBenchmark().enter_exit(500)',
'ExceptionBenchmark().call_wrapped(500)',
]
for cmd in cmds:
print(cmd)
subprocess.check_call(base_cmd + [cmd])
if __name__ == '__main__':
main()
| apache-2.0 |
cgar/servo | tests/wpt/css-tests/tools/wptserve/tests/functional/test_cookies.py | 109 | 1946 | import unittest
import wptserve
from .base import TestUsingServer
class TestResponseSetCookie(TestUsingServer):
def test_name_value(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_cookie("name", "value")
return "Test"
route = ("GET", "/test/name_value", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(resp.info()["Set-Cookie"], "name=value; Path=/")
def test_unset(self):
@wptserve.handlers.handler
def handler(request, response):
response.set_cookie("name", "value")
response.unset_cookie("name")
return "Test"
route = ("GET", "/test/unset", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertTrue("Set-Cookie" not in resp.info())
def test_delete(self):
@wptserve.handlers.handler
def handler(request, response):
response.delete_cookie("name")
return "Test"
route = ("GET", "/test/delete", handler)
self.server.router.register(*route)
resp = self.request(route[1])
parts = dict(item.split("=") for
item in resp.info()["Set-Cookie"].split("; ") if item)
self.assertEqual(parts["name"], "")
self.assertEqual(parts["Path"], "/")
#Should also check that expires is in the past
class TestRequestCookies(TestUsingServer):
def test_set_cookie(self):
@wptserve.handlers.handler
def handler(request, response):
return request.cookies["name"].value
route = ("GET", "/test/set_cookie", handler)
self.server.router.register(*route)
resp = self.request(route[1], headers={"Cookie": "name=value"})
self.assertEqual(resp.read(), b"value")
if __name__ == '__main__':
unittest.main()
| mpl-2.0 |
pombredanne/flask-babel | tests/tests.py | 12 | 6247 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
import unittest
from decimal import Decimal
import flask
from datetime import datetime
from flaskext import babel
from flaskext.babel import gettext, ngettext, lazy_gettext
class DateFormattingTestCase(unittest.TestCase):
def test_basics(self):
app = flask.Flask(__name__)
b = babel.Babel(app)
d = datetime(2010, 4, 12, 13, 46)
with app.test_request_context():
assert babel.format_datetime(d) == 'Apr 12, 2010 1:46:00 PM'
assert babel.format_date(d) == 'Apr 12, 2010'
assert babel.format_time(d) == '1:46:00 PM'
with app.test_request_context():
app.config['BABEL_DEFAULT_TIMEZONE'] = 'Europe/Vienna'
assert babel.format_datetime(d) == 'Apr 12, 2010 3:46:00 PM'
assert babel.format_date(d) == 'Apr 12, 2010'
assert babel.format_time(d) == '3:46:00 PM'
with app.test_request_context():
app.config['BABEL_DEFAULT_LOCALE'] = 'de_DE'
assert babel.format_datetime(d, 'long') == \
'12. April 2010 15:46:00 MESZ'
def test_init_app(self):
b = babel.Babel()
app = flask.Flask(__name__)
b.init_app(app)
d = datetime(2010, 4, 12, 13, 46)
with app.test_request_context():
assert babel.format_datetime(d) == 'Apr 12, 2010 1:46:00 PM'
assert babel.format_date(d) == 'Apr 12, 2010'
assert babel.format_time(d) == '1:46:00 PM'
with app.test_request_context():
app.config['BABEL_DEFAULT_TIMEZONE'] = 'Europe/Vienna'
assert babel.format_datetime(d) == 'Apr 12, 2010 3:46:00 PM'
assert babel.format_date(d) == 'Apr 12, 2010'
assert babel.format_time(d) == '3:46:00 PM'
with app.test_request_context():
app.config['BABEL_DEFAULT_LOCALE'] = 'de_DE'
assert babel.format_datetime(d, 'long') == \
'12. April 2010 15:46:00 MESZ'
def test_custom_formats(self):
app = flask.Flask(__name__)
app.config.update(
BABEL_DEFAULT_LOCALE='en_US',
BABEL_DEFAULT_TIMEZONE='Pacific/Johnston'
)
b = babel.Babel(app)
b.date_formats['datetime'] = 'long'
b.date_formats['datetime.long'] = 'MMMM d, yyyy h:mm:ss a'
d = datetime(2010, 4, 12, 13, 46)
with app.test_request_context():
assert babel.format_datetime(d) == 'April 12, 2010 3:46:00 AM'
def test_custom_locale_selector(self):
app = flask.Flask(__name__)
b = babel.Babel(app)
d = datetime(2010, 4, 12, 13, 46)
the_timezone = 'UTC'
the_locale = 'en_US'
@b.localeselector
def select_locale():
return the_locale
@b.timezoneselector
def select_timezone():
return the_timezone
with app.test_request_context():
assert babel.format_datetime(d) == 'Apr 12, 2010 1:46:00 PM'
the_locale = 'de_DE'
the_timezone = 'Europe/Vienna'
with app.test_request_context():
assert babel.format_datetime(d) == '12.04.2010 15:46:00'
def test_refreshing(self):
app = flask.Flask(__name__)
b = babel.Babel(app)
d = datetime(2010, 4, 12, 13, 46)
with app.test_request_context():
assert babel.format_datetime(d) == 'Apr 12, 2010 1:46:00 PM'
app.config['BABEL_DEFAULT_TIMEZONE'] = 'Europe/Vienna'
babel.refresh()
assert babel.format_datetime(d) == 'Apr 12, 2010 3:46:00 PM'
class NumberFormattingTestCase(unittest.TestCase):
def test_basics(self):
app = flask.Flask(__name__)
b = babel.Babel(app)
n = 1099
with app.test_request_context():
assert babel.format_number(n) == u'1,099'
assert babel.format_decimal(Decimal('1010.99')) == u'1,010.99'
assert babel.format_currency(n, 'USD') == '$1,099.00'
assert babel.format_percent(0.19) == '19%'
assert babel.format_scientific(10000) == u'1E4'
class GettextTestCase(unittest.TestCase):
def test_basics(self):
app = flask.Flask(__name__)
b = babel.Babel(app, default_locale='de_DE')
with app.test_request_context():
assert gettext(u'Hello %(name)s!', name='Peter') == 'Hallo Peter!'
assert ngettext(u'%(num)s Apple', u'%(num)s Apples', 3) == u'3 Äpfel'
assert ngettext(u'%(num)s Apple', u'%(num)s Apples', 1) == u'1 Apfel'
def test_template_basics(self):
app = flask.Flask(__name__)
b = babel.Babel(app, default_locale='de_DE')
t = lambda x: flask.render_template_string('{{ %s }}' % x)
with app.test_request_context():
assert t("gettext('Hello %(name)s!', name='Peter')") == 'Hallo Peter!'
assert t("ngettext('%(num)s Apple', '%(num)s Apples', 3)") == u'3 Äpfel'
assert t("ngettext('%(num)s Apple', '%(num)s Apples', 1)") == u'1 Apfel'
assert flask.render_template_string('''
{% trans %}Hello {{ name }}!{% endtrans %}
''', name='Peter').strip() == 'Hallo Peter!'
assert flask.render_template_string('''
{% trans num=3 %}{{ num }} Apple
{%- pluralize %}{{ num }} Apples{% endtrans %}
''', name='Peter').strip() == u'3 Äpfel'
def test_lazy_gettext(self):
app = flask.Flask(__name__)
b = babel.Babel(app, default_locale='de_DE')
yes = lazy_gettext(u'Yes')
with app.test_request_context():
assert unicode(yes) == 'Ja'
app.config['BABEL_DEFAULT_LOCALE'] = 'en_US'
with app.test_request_context():
assert unicode(yes) == 'Yes'
def test_list_translations(self):
app = flask.Flask(__name__)
b = babel.Babel(app, default_locale='de_DE')
translations = b.list_translations()
assert len(translations) == 1
assert str(translations[0]) == 'de'
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
lukesanantonio/inpassing-backend | inpassing/tests/data.py | 1 | 1799 | # Copyright (c) 2016 Luke San Antonio Bialecki
# All rights reserved.
import datetime
from fixture import DataSet
class Org(DataSet):
class locust_valley:
name = 'Locust Valley High School'
class Daystate(DataSet):
class a_day:
org_id = Org.locust_valley.ref('id')
identifier = 'A'
greeting = 'Today is an A day'
class b_day:
org_id = Org.locust_valley.ref('id')
identifier = 'B'
greeting = 'Today is a B day'
class User(DataSet):
class mod:
first_name = 'Moddy'
last_name = 'Moderator'
email = 'admin@madeupdomain.com'
# This is literally 'password'
password=b'$2b$12$tb.KU6CZmjXFkivFD3qSAeQW.V3JopcaPVzQK01IIiyejlryshcMC'
moderates = [Org.locust_valley]
class user:
first_name = 'John'
last_name = 'Smitch'
email = 'testemail@madeupdomain.com'
password=b'$2b$12$tb.KU6CZmjXFkivFD3qSAeQW.V3JopcaPVzQK01IIiyejlryshcMC'
participates = [Org.locust_valley]
class Pass(DataSet):
class user_pass:
org_id = Org.locust_valley.ref('id')
owner_id = User.user.ref('id')
requested_state_id = Daystate.a_day.ref('id')
requested_spot_num = 20
request_time = datetime.datetime.now()
assigned_state_id = Daystate.a_day.ref('id')
assigned_spot_num = 40
assigned_time = datetime.datetime.now()
class other_pass:
org_id = Org.locust_valley.ref('id')
owner_id = User.user.ref('id')
requested_state_id = Daystate.b_day.ref('id')
requested_spot_num = 30
request_time = datetime.datetime.now()
assigned_state_id = None
assigned_spot_num = None
assigned_time = None
all_data = (Org, Daystate, User, Pass)
| mit |
NinjaMSP/crossbar | crossbar/worker/worker.py | 1 | 14051 | #####################################################################################
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g.
# you have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import
import os
import sys
import pkg_resources
import jinja2
import signal
from twisted.internet.error import ReactorNotRunning
from twisted.internet.defer import inlineCallbacks
from autobahn.util import utcnow
from autobahn.wamp.exception import ApplicationError
from autobahn.wamp.types import PublishOptions
from autobahn import wamp
from txaio import make_logger
from crossbar.common.reloader import TrackingModuleReloader
from crossbar.common.process import NativeProcessSession
from crossbar.common.profiler import PROFILERS
__all__ = ('NativeWorkerSession',)
class NativeWorkerSession(NativeProcessSession):
"""
A native Crossbar.io worker process. The worker will be connected
to the node's management router running inside the node controller
via WAMP-over-stdio.
"""
WORKER_TYPE = 'native'
log = make_logger()
def onConnect(self):
"""
Called when the worker has connected to the node's management router.
"""
self._node_id = self.config.extra.node
self._worker_id = self.config.extra.worker
self._uri_prefix = u'crossbar.worker.{}'.format(self._worker_id)
NativeProcessSession.onConnect(self, False)
self._module_tracker = TrackingModuleReloader()
self._profiles = {}
# flag indicating when worker is shutting down
self._is_shutting_down = False
# Jinja2 templates for Web (like WS status page et al)
#
templates_dir = os.path.abspath(pkg_resources.resource_filename("crossbar", "web/templates"))
self.log.debug("Using Web templates from {templates_dir}",
templates_dir=templates_dir)
self._templates = jinja2.Environment(loader=jinja2.FileSystemLoader(templates_dir))
self.join(self.config.realm)
@inlineCallbacks
def onJoin(self, details, publish_ready=True):
"""
Called when worker process has joined the node's management realm.
"""
yield NativeProcessSession.onJoin(self, details)
# above upcall registers all our "@wamp.register(None)" methods
# setup SIGTERM handler to orderly shutdown the worker
def shutdown(sig, frame):
self.log.warn("Native worker received SIGTERM - shutting down ..")
self.shutdown()
signal.signal(signal.SIGTERM, shutdown)
# the worker is ready for work!
if publish_ready:
yield self.publish_ready()
def onLeave(self, details):
self.log.debug("Worker-to-controller session detached")
self.disconnect()
def onDisconnect(self):
self.log.debug("Worker-to-controller session disconnected")
# when the native worker is done, stop the reactor
try:
self._reactor.stop()
except ReactorNotRunning:
pass
@inlineCallbacks
def publish_ready(self):
# signal that this worker is ready for setup. the actual setup procedure
# will either be sequenced from the local node configuration file or remotely
# from a management service
yield self.publish(
u'{}.on_worker_ready'.format(self._uri_prefix),
{
u'type': self.WORKER_TYPE,
u'id': self.config.extra.worker,
u'pid': os.getpid(),
},
options=PublishOptions(acknowledge=True)
)
self.log.debug("Worker '{worker}' running as PID {pid}",
worker=self.config.extra.worker, pid=os.getpid())
@wamp.register(None)
@inlineCallbacks
def shutdown(self, details=None):
"""
Registered under: ``crossbar.worker.<worker_id>.shutdown``
Event published under: ``crossbar.worker.<worker_id>.on_shutdown_requested``
"""
if self._is_shutting_down:
# ignore: we are already shutting down ..
return
# raise ApplicationError(u'crossbar.error.operation_in_progress', 'cannot shutdown - the worker is already shutting down')
else:
self._is_shutting_down = True
self.log.info("Shutdown of worker requested!")
# publish management API event
#
yield self.publish(
u'{}.on_shutdown_requested'.format(self._uri_prefix),
{
u'who': details.caller if details else None,
u'when': utcnow()
},
options=PublishOptions(exclude=details.caller if details else None, acknowledge=True)
)
# we now call self.leave() to initiate the clean, orderly shutdown of the native worker.
# the call is scheduled to run on the next reactor iteration only, because we want to first
# return from the WAMP call when this procedure is called from the node controller
#
self._reactor.callLater(0, self.leave)
@wamp.register(None)
def set_node_id(self, node_id, details=None):
self._node_id = node_id
@wamp.register(None)
def get_node_id(self, details=None):
return self._node_id
@wamp.register(None)
def get_profilers(self, details=None):
"""
Registered under: ``crossbar.worker.<worker_id>.get_profilers``
Returns available profilers.
:param details: WAMP call details (auto-filled by WAMP).
:type details: obj
:returns: A list of profilers.
:rtype: list of unicode
"""
return [p.marshal() for p in PROFILERS.items()]
@wamp.register(None)
def start_profiler(self, profiler=u'vmprof', runtime=10, async=True, details=None):
"""
Registered under: ``crossbar.worker.<worker_id>.start_profiler``
Start a profiler producing a profile which is stored and can be
queried later.
:param profiler: The profiler to start, e.g. ``vmprof``.
:type profiler: str
:param runtime: Profiling duration in seconds.
:type runtime: float
:param async: Flag to turn on/off asynchronous mode.
:type async: bool
:param details: WAMP call details (auto-filled by WAMP).
:type details: obj
:returns: If running in synchronous mode, the profiling result. Else
a profile ID is returned which later can be used to retrieve the profile.
:rtype: dict or int
"""
if profiler not in PROFILERS:
raise Exception("no such profiler")
profiler = PROFILERS[profiler]
self.log.debug("Starting profiler {profiler}, running for {secs} seconds", profiler=profiler, secs=runtime)
# run the selected profiler, producing a profile. "profile_finished" is a Deferred
# that will fire with the actual profile recorded
profile_id, profile_finished = profiler.start(runtime=runtime)
on_profile_started = u'{}.on_profile_started'.format(self._uri_prefix)
on_profile_finished = u'{}.on_profile_finished'.format(self._uri_prefix)
if async:
publish_options = None
else:
publish_options = PublishOptions(exclude=details.caller)
profile_started = {
u'id': profile_id,
u'who': details.caller,
u'profiler': profiler,
u'runtime': runtime,
u'async': async,
}
self.publish(
on_profile_started,
profile_started,
options=publish_options
)
def on_profile_success(profile_result):
self._profiles[profile_id] = {
u'id': profile_id,
u'profiler': profiler,
u'runtime': runtime,
u'profile': profile_result
}
self.publish(
on_profile_finished,
{
u'id': profile_id,
u'error': None,
u'profile': profile_result
},
options=publish_options
)
return profile_result
def on_profile_failed(error):
self.log.warn('profiling failed: {error}', error=error)
self.publish(
on_profile_finished,
{
u'id': profile_id,
u'error': u'{0}'.format(error),
u'profile': None
},
options=publish_options
)
return error
profile_finished.addCallbacks(on_profile_success, on_profile_failed)
if async:
# if running in async mode, immediately return the ID under
# which the profile can be retrieved later (when it is finished)
return profile_started
else:
# if running in sync mode, return only when the profiling was
# actually finished - and return the complete profile
return profile_finished
@wamp.register(None)
def get_profile(self, profile_id, details=None):
"""
Get a profile previously produced by a profiler run.
This procedure is registered under WAMP URI
``crossbar.worker.<worker_id>.get_profile``.
When no profile with given ID exists, a WAMP error
``crossbar.error.no_such_object`` is raised.
"""
if profile_id in self._profiles:
return self._profiles[profile_id]
else:
raise ApplicationError(u'crossbar.error.no_such_object', 'no profile with ID {} saved'.format(profile_id))
@wamp.register(None)
def get_pythonpath(self, details=None):
"""
Returns the current Python module search paths.
This procedure is registered under WAMP URI
``crossbar.worker.<worker_id>.get_pythonpath``.
:returns: The current module search paths.
:rtype: list of str
"""
self.log.debug("{klass}.get_pythonpath", klass=self.__class__.__name__)
return sys.path
@wamp.register(None)
def add_pythonpath(self, paths, prepend=True, details=None):
"""
Add paths to Python module search paths.
This procedure is registered under WAMP URI
``crossbar.worker.<worker_id>.add_pythonpath``.
:param paths: List of paths. Relative paths will be resolved relative
to the node directory.
:type paths: list of unicode
:param prepend: If `True`, prepend the given paths to the current paths.
Otherwise append.
:type prepend: bool
"""
self.log.debug("{klass}.add_pythonpath", klass=self.__class__.__name__)
paths_added = []
for p in paths:
# transform all paths (relative to cbdir) into absolute paths
#
path_to_add = os.path.abspath(os.path.join(self.config.extra.cbdir, p))
if os.path.isdir(path_to_add):
paths_added.append({'requested': p, 'resolved': path_to_add})
else:
emsg = "Cannot add Python search path '{}': resolved path '{}' is not a directory".format(p, path_to_add)
self.log.error(emsg)
raise ApplicationError(u'crossbar.error.invalid_argument', emsg, requested=p, resolved=path_to_add)
# now extend python module search path
#
paths_added_resolved = [p['resolved'] for p in paths_added]
if prepend:
sys.path = paths_added_resolved + sys.path
else:
sys.path.extend(paths_added_resolved)
# "It is important to note that the global working_set object is initialized from
# sys.path when pkg_resources is first imported, but is only updated if you do all
# future sys.path manipulation via pkg_resources APIs. If you manually modify sys.path,
# you must invoke the appropriate methods on the working_set instance to keep it in sync."
#
# @see: https://pythonhosted.org/setuptools/pkg_resources.html#workingset-objects
#
for p in paths_added_resolved:
pkg_resources.working_set.add_entry(p)
# publish event "on_pythonpath_add" to all but the caller
#
topic = u'{}.on_pythonpath_add'.format(self._uri_prefix)
res = {
u'paths': sys.path,
u'paths_added': paths_added,
u'prepend': prepend,
u'who': details.caller
}
self.publish(topic, res, options=PublishOptions(exclude=details.caller))
return res
| agpl-3.0 |
agallo/pyNetStuff | net1.py | 1 | 1393 | #!/usr/bin/python
import networkx as nx
# create Directed Graph called DG
DG = nx.DiGraph()
# create a list of ASNs
ASpath1=[11039, 174, 14743, 13546]
# add edges to directed graph from a list
# new nodes will be created as needed (ie, if they don't exist in DG, they'll be created, existing nodes
# will not be duplicated)
DG.add_path(ASpath1)
print 'Nodes so far: ' + str((DG.nodes()))
print 'Edges so far: ' + str((DG.edges()))
# add second path
secondpath = [11039, 174, 16657, 5061, 13546]
DG.add_path(secondpath)
print
print 'added a second path:'
print 'Nodes so far: ' + str((DG.nodes()))
print 'Edges so far: ' + str((DG.edges()))
# add a bunch of ASpaths manually
DG.add_path([11039, 174, 3356, 13546])
DG.add_path([11039, 4901, 11164, 29791, 12182, 14743, 13546])
DG.add_path([11039, 4901, 11164, 4323, 5061, 13546])
DG.add_path([11039, 6461, 12182, 12182, 12182, 12182, 12182, 12182, 12182, 12182, 12182, 12182, 12182, 14743, 13546])
DG.add_path([11039, 6461, 174, 14743, 13546])
DG.add_path([11039, 6461, 3356, 13546])
DG.add_path([11039, 6461, 4323, 5061, 13546])
DG.add_path([11039, 11557, 4436, 3257, 12182, 12182, 12182, 14743, 13546])
DG.add_path([11039, 11557, 4436, 3257, 14743, 13546])
DG.add_path([11039, 11557, 4436, 3257, 3356, 13546])
DG.add_path([11039, 11557, 4436, 3257, 3356, 16657, 5061, 13546])
print
print 'Nodes and degrees: ' + str(nx.degree(DG))
| unlicense |
shlomimatichin/inaugurator | inaugurator/udev.py | 3 | 2282 | from inaugurator import pyudev
from inaugurator import sh
import os
import fnmatch
_ALSO = {
'mlx4_core': ['mlx4_en']
}
def loadAllDrivers():
context = pyudev.Context()
aliasTable = _loadAliasTable()
deviceList = list(context.list_devices())
for device in deviceList:
if u'MODALIAS' not in device:
continue
for k, v in device.iteritems():
print "\t%s: %s" % (k, v)
driver = _findDriver(device, aliasTable)
if driver is None:
print "No driver, skipping"
else:
_loadDriver(driver)
def _loadDriver(driver):
"This is for upwards dependency, not modprobe like dependency"
print "Driver: %s, modprobing" % driver
sh.run("busybox modprobe %s" % driver)
if driver in _ALSO:
print "Additional drivers must be loaded for '%s': %s" % (driver, _ALSO[driver])
for also in _ALSO[driver]:
_loadDriver(also)
def _kernelVersion():
return sh.run("busybox uname -r").strip()
def _loadAliasTable():
path = os.path.join("/lib/modules/%s/modules.alias" % _kernelVersion())
table = dict()
with open(path) as f:
for line in f.readlines():
if line.startswith("#"):
continue
alias, driver = line.strip().split(" ")[1:]
if ':' not in alias:
continue
subsystem = alias.split(":")[0]
if subsystem not in table:
table[subsystem] = dict()
print alias
table[subsystem][alias] = driver
return table
def _lookLike(alias, pattern):
parts = pattern.split("*")
for part in parts:
if part not in alias:
return False
return True
def _findDriver(device, aliasTable):
alias = device[u'MODALIAS']
subsystem = alias.split(":")[0]
for pattern in aliasTable.get(subsystem, dict()):
if _lookLike(alias, pattern):
if fnmatch.fnmatch(alias, pattern):
return aliasTable[subsystem][pattern]
return None
if __name__ == "__main__":
global _kernelVersion
ver = _kernelVersion()
_kernelVersion = lambda: ver
def fakeSH(command):
print "COMMAND", command
sh.run = fakeSH
loadAllDrivers()
| apache-2.0 |
jeromecc/doctoctocbot | src/tagging/tasks.py | 1 | 13790 | import os
import unidecode
from typing import Optional, List
import ast
from django.utils.translation import gettext as _
from django.db import transaction, DatabaseError
from django.urls import reverse
from celery.utils.log import get_task_logger
from versions.exceptions import DeletionOfNonCurrentVersionError
from tagging.models import Process, Queue, Category, TagKeyword
from dm.api import senddm
from celery import shared_task
from conversation.models import Tweetdj
from moderation.models import SocialMedia
from community.models import Community
from community.helpers import site_url
from dm.models import DirectMessage
from optin.authorize import has_authorized, create_opt_in
from optin.models import Option
from bot.tweet import hashtag_list
from community.helpers import activate_language
logger = get_task_logger(__name__)
CATEGORY_TAG = "category"
UUID_LENGTH = 36
OPTIN_OPTION = "twitter_dm_category_self"
STOP_CATEGORY = "stop"
def get_optin_option() -> Optional[Option]:
try:
option = Option.objects.get(name=OPTIN_OPTION)
except Option.DoesNotExist:
logger.error(f"Option {OPTIN_OPTION} is not present in the database.")
option = None
return option
def quickreply(process_id):
try:
process = Process.objects.get(id=process_id)
except Process.DoesNotExist:
return
category_lst = Category.objects.filter(community=process.queue.community)
if not category_lst:
return
qr = {
"type": "options",
"options": []
}
options = []
option = {
"label": "?",
"description": "?",
"metadata": "?"
}
for cat in category_lst:
opt = dict(option)
hash = lambda: "#" if cat.hashtag else ""
opt["label"] = f"{hash()}{cat.tag}"
opt["description"] = cat.summary or cat.tag
opt["metadata"] = f"{CATEGORY_TAG}{process.id}{cat.tag}"
options.append(opt)
qr["options"] = options
logger.debug(f"qr: {qr}")
return qr
def tag_dm_text(process):
process.refresh_from_db()
statusid = process.queue.uid
try:
tweetdj = Tweetdj.objects.get(statusid=statusid)
except Tweetdj.DoesNotExist as e:
logger.warn(e)
return
activate_language(process.queue.community)
# underscore will be replaced by Twitter with the screen name of the user
screen_name = tweetdj.socialuser.screen_name_tag() or "_"
logger.debug(screen_name)
sample_category = Category.objects.filter(
community=process.queue.community,
hashtag=True,
).first()
if sample_category:
sample_tag = sample_category.tag
else:
sample_tag = ""
categories_link = f"{site_url(process.queue.community)}{reverse('landing:categories')}"
text = (
_(
"🆕 You can tag your tweet with the corresponding category hashtag.\n"
"For instance, add #{sample_tag} in your next tweet and you "
"will not receive this DM.\n"
"Please help us find the best category for this tweet "
"https://twitter.com/{screen_name}/status/{statusid}\n"
"Please choose a category by clicking on one of the buttons bellow.\n"
"Categories are described in full detail on this page ➡️ "
"{categories_link}"
).format(
screen_name=screen_name,
statusid=statusid,
categories_link=categories_link,
sample_tag=sample_tag,
)
)
logger.debug(text)
return text
def send_tag_dm(process, user_id):
logger.info(f"process.processor.id: {process.processor.id}")
response = senddm(
tag_dm_text(process),
user_id=user_id,
screen_name=process.queue.community.account.username,
return_json=True,
quick_reply=quickreply(process.id)
)
logger.info(response)
return response
@shared_task(bind=True)
def send_tag_request(self, process_id, user_id):
process_mi = Process.objects.get(id=process_id)
if process_mi.queue.socialmedia.name == "twitter":
response = send_tag_dm(process_mi, user_id)
try:
response["event"]["created_timestamp"]
except KeyError:
self.retry(countdown= 2 ** self.request.retries)
@shared_task
def handle_create_tag_queue(statusid, socialmedia_id, community_id):
logger.debug(f"handle_create_tag_queue({statusid}, {socialmedia_id}, {community_id})")
try:
socialmedia = SocialMedia.objects.get(id=socialmedia_id)
except SocialMedia.DoesNotExist as e:
logger.warn(e)
return
logger.debug(socialmedia)
try:
community = Community.objects.get(id=community_id)
except Community.DoesNotExist as e:
logger.warn(e)
return
logger.debug(community)
if not Category.objects.filter(community=community).exists():
logger.warn("There are no tagging categories for this community.")
return
try:
tweetdj = Tweetdj.objects.get(statusid=statusid)
except Tweetdj.DoesNotExist:
return
category_tags = list(
Category.objects.filter(
community=community,
hashtag=True,
).values_list('tag', flat=True)
)
logger.debug(f"{category_tags=}")
category_diacriticless_tags = [unidecode.unidecode(tag).lower() for tag in category_tags]
logger.debug(f"{category_diacriticless_tags=}")
tweet_tags = tweetdj.tags.names()
logger.debug(f"{tweet_tags}")
tweet_diacriticless_tags = [unidecode.unidecode(tag).lower() for tag in tweet_tags]
logger.debug(f"{tweet_diacriticless_tags=}")
# if status does not have any tags corresponding to a category,
# create queue
logger.debug(f"{set(category_diacriticless_tags).isdisjoint(tweet_diacriticless_tags)=}")
if set(category_diacriticless_tags).isdisjoint(tweet_diacriticless_tags):
queue, created = Queue.objects.get_or_create(
uid=statusid,
socialmedia=socialmedia,
community=community
)
if created:
logger.info("Queue created.")
create_tag_process(queue, statusid)
def create_tag_process(queue, statusid):
try:
tweetdj = Tweetdj.objects.get(statusid=statusid)
except Tweetdj.DoesNotExist as e:
logger.warn(e)
return
socialuser = tweetdj.socialuser
if not socialuser:
logger.debug("No SocialUser")
return
option = get_optin_option()
has_opted_in = has_authorized(socialuser, option)
logger.debug(f"_has_authorized: {has_opted_in}")
# If Opt in not yet set for this user, create it as True
if has_opted_in is None:
create_opt_in(socialuser, option, authorize=True)
if has_authorized(socialuser, option):
process, created = Process.objects.get_or_create(
queue=queue,
processor=socialuser
)
if created:
logger.info("Process created.")
send_tag_request.apply_async(args=(process.id, socialuser.user_id))
@shared_task
def poll_tag_dm():
def delete_tag_queue(process_mi):
with transaction.atomic():
try:
process_mi.queue.delete()
except DeletionOfNonCurrentVersionError as e:
logger.info(f"Queue {process_mi.queue} was already deleted. %s" % e)
delete_tag_process(process_mi)
def delete_tag_process(process_mi):
with transaction.atomic():
try:
process_mi.delete()
except DeletionOfNonCurrentVersionError as e:
logger.info(f"Tag process instance {process_mi} was already processed. %s" % e)
def get_tag_metadata(dm):
jsn = dm.jsn
if isinstance(jsn, str):
jsn = ast.literal_eval(jsn)
try:
return jsn['kwargs']['message_create']['message_data']['quick_reply_response']['metadata']
except:
return jsn['quick_reply_response']['metadata']
def get_process_id(dm):
begin_idx = len(CATEGORY_TAG)
end_idx = len(CATEGORY_TAG) + UUID_LENGTH
return get_tag_metadata(dm)[begin_idx:end_idx]
def get_tag_name(dm):
end_idx = len(CATEGORY_TAG) + UUID_LENGTH
return get_tag_metadata(dm)[end_idx:]
def tag(process_mi, tag_name):
# determine id of status
status_id = process_mi.queue.uid
# Tweetdj
try:
tweetdj = Tweetdj.objects.get(statusid=status_id)
logger.debug(f"tweetdj:{tweetdj}")
except Tweetdj.DoesNotExist:
return
#taggit
logger.info(f"Adding tag '{tag_name}' to {tweetdj}")
tweetdj.tags.add(tag_name)
delete_tag_queue(process_mi)
current_process_uuid_lst = [str(process.id) for process in Process.objects.current.all()]
logger.debug(f"{current_process_uuid_lst=}")
# return if no current Moderation object
if not current_process_uuid_lst:
return
bot_id_lst = list(Community.objects.values_list("account__userid", flat=True))
logger.debug(f"{bot_id_lst=}")
dms_new = []
dms_old = []
try:
dms_old = list(
DirectMessage.objects\
.filter(recipient_id__in=bot_id_lst)\
.filter(jsn__kwargs__message_create__message_data__quick_reply_response__metadata__startswith=CATEGORY_TAG)
)
if dms_old:
logger.debug(f"all {CATEGORY_TAG} direct messages answers: {len(dms_old)} {[(dm.id, dm.text,) for dm in dms_old]}")
except Exception:
logger.error("dms_old exception")
try:
dms_new = list(
DirectMessage.objects\
.filter(recipient_id__in=bot_id_lst)\
.filter(jsn__quick_reply_response__metadata__startswith=CATEGORY_TAG)
)
if dms_new:
logger.debug(f"all {CATEGORY_TAG} direct messages answers: {len(dms_new)} {[(dm.id, dm.text,) for dm in dms_new]}")
except Exception:
logger.error("dms_new exception")
dms = dms_old + dms_new
logger.debug(f"{dms=}")
if not dms:
logger.debug("No DM!")
return
logger.info(f"all {CATEGORY_TAG} direct messages answers: {len(dms)} {[(dm.id, dm.text,) for dm in dms]}")
dms_current = []
for dm in dms:
uid = get_process_id(dm)
ok = uid in current_process_uuid_lst
logger.debug(f"{uid} uid in current_process_uuid_lst: {ok}")
if ok:
dms_current.append(dm)
logger.debug(f"{dms_current=}")
for dm in dms_current:
process_id = get_process_id(dm)
tag_name = get_tag_name(dm)
logger.debug(f"dm process_id: {process_id}, cat: {tag_name}")
#retrieve moderation instance
try:
process_mi = Process.objects.get(pk=process_id)
except Process.DoesNotExist as e:
logger.error(f"Process object with id {process_id} "
f"does not exist. Error message: {e}")
continue
logger.debug(f"process_mi:{process_mi}")
# if mod_mi is not the current version, current_version() returns None
# and it means this moderation was already done and we pass
is_current = bool(Process.objects.current_version(process_mi))
if not is_current:
logger.debug(f"Process instance {process_mi} was already processed.")
continue
socialuser = process_mi.processor
if tag_name == STOP_CATEGORY:
opt_out(socialuser)
delete_tag_process(process_mi)
else:
tag(process_mi, tag_name)
def opt_out(socialuser):
option = get_optin_option()
create_opt_in(socialuser=socialuser, option=option, authorize=False)
def keyword_tag(statusid, community):
def category_tagging(tweetdj, community, hashtag):
tag_tpl_lst = diacriticless_category_tags(community)
if hashtag:
for tag in tag_tpl_lst:
# tag[0] has no diacritic
# tag[1] potentially has 1 (or more) diacritic(s)
if tag[0] in hashtag:
tweetdj.tags.add(tag[1])
def keyword_tagging(tweetdj, community, hashtag):
qs = TagKeyword.objects.filter(community__in=[community]).distinct()
for tk in qs:
if hashtag:
if tk.tag.name in hashtag:
tweetdj.tags.add(tk.tag.name)
for keyword in tk.keyword:
if keyword in tweetdj.json["full_text"].lower():
tweetdj.tags.add(tk.tag.name)
try:
tweetdj = Tweetdj.objects.get(statusid=statusid)
except Tweetdj.DoesNotExist:
return
hashtag: List = hashtag_list(tweetdj.json)
if ( len(hashtag) > 1 ):
category_tagging(tweetdj, community, hashtag)
keyword_tagging(tweetdj, community, hashtag)
else:
try:
socialmedia = SocialMedia.objects.get(name='twitter')
except SocialMedia.DoesNotExist:
logger.error("Create a twitter SocialMedia object first.")
return
handle_create_tag_queue.apply_async(args=(statusid, socialmedia.id, community.id))
def diacriticless_category_tags(community):
# return a list of all category tags of a community, without diacritic,
# lowercased
tags = Category.objects.filter(
community=community,
hashtag=True,
).values_list('tag', flat=True)
tag_tpl_lst = [(unidecode.unidecode(tag).lower(), tag) for tag in tags]
logger.debug(f"{tag_tpl_lst=}")
return tag_tpl_lst | mpl-2.0 |
greencoder/hopefullysunny-django | vendor/noaa/stations.py | 1 | 2310 | import os
import shutil
import noaa.models
import noaa.utils
def nearest_stations_with_distance(lat, lon, stations, radius=10.0,
units="miles"):
"""Find all stations within radius of target.
:param lat:
:param lon:
:param stations: list of stations objects to scan
:param radius:
:param units:
:returns: [(dist, station)]
"""
matches = []
for station in stations:
s_lat = station.location.lat
s_lon = station.location.lon
dist = noaa.utils.earth_distance(
s_lat, s_lon, lat, lon, dist_units=units)
if dist <= radius:
matches.append((dist, station))
matches.sort()
return matches
def nearest_station(lat, lon, stations):
"""Find single nearest station.
:param lat:
:param lon:
:param stations: list of stations objects to scan
"""
matches = nearest_stations_with_distance(lat, lon, stations)
if matches:
dist, station = matches[0]
else:
station = None
return station
def get_stations_from_cache(filename):
if not os.path.exists(filename):
resp = noaa.stations.fetch_station_data()
with open(filename, "w") as f:
shutil.copyfileobj(resp, f)
stations = noaa.stations.get_stations_from_file(filename)
return stations
def get_stations_from_web():
resp = fetch_station_data()
stations = _parse_stations(resp)
return stations
def get_stations_from_file(filename):
with open(filename) as f:
stations = _parse_stations(f)
return stations
def fetch_station_data():
STATIONS_URL = "http://www.weather.gov/xml/current_obs/index.xml"
resp = noaa.utils.open_url(STATIONS_URL)
return resp
def _parse_stations(fileobj):
stations = []
tree = noaa.utils.parse_xml(fileobj)
for station_e in tree.getroot().findall('station'):
lat = float(station_e.find('latitude').text)
lon = float(station_e.find('longitude').text)
description = station_e.find('state').text
location = noaa.models.Location(lat, lon, description)
station_id = station_e.find('station_id').text
station = noaa.models.Station(station_id, location)
stations.append(station)
return stations
| mit |
a-tal/pyweet | pyweet/colors.py | 1 | 2316 | """Adds some color to the tweets."""
import re
from blessings import Terminal
class Term(object):
"""Static class to store terminal color info."""
@staticmethod
def colors():
"""Returns the colors in use for this terminal."""
if not hasattr(Term, "_colors"):
Term._colors = {}
term = Terminal()
if term.color:
Term._colors["text"] = term.normal
if term.number_of_colors >= 256:
Term._colors["name"] = term.color(35)
Term._colors["url"] = term.color(45)
Term._colors["hashtag"] = term.color(227)
else:
Term._colors["name"] = term.color(4)
Term._colors["url"] = term.color(6)
Term._colors["hashtag"] = term.color(3)
return Term._colors
@staticmethod
def patterns():
"""Returns the patterns used for searching."""
if not hasattr(Term, "_patterns"):
Term._patterns = {}
if Term.colors():
Term._patterns["url"] = re.compile(
r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|'
r'(?:%[0-9a-fA-F][0-9a-fA-F]))+'
)
Term._patterns["name"] = re.compile(r'(^|[^@\w])@(\w{1,15})\b')
Term._patterns["hashtag"] = re.compile(r'(^|[ ])#\w+')
return Term._patterns
def highlight_tweet(tweet):
"""Highlights the tweet with console colors if supported."""
if not Term.colors():
return tweet
return _re_hl(_re_hl(_re_hl(tweet, "name"), "hashtag"), "url")
def _re_hl(tweet, re_name):
"""Highlights the tweet with the color and pattern of name."""
words = []
colors = Term.colors()
patterns = Term.patterns()
last_match = 0
for match in re.finditer(patterns[re_name], tweet):
span = match.span()
bump = int(span[0] != 0) and re_name != "url"
words.append(tweet[last_match:span[0] + bump])
word = "{0}{1}{2}".format(
colors[re_name],
tweet[span[0] + bump:span[1]],
colors["text"],
)
words.append(word)
last_match = span[1]
words.append(tweet[last_match:])
return "".join(words)
| bsd-3-clause |
b3niup/fackup | fackup/log.py | 1 | 1387 | import logging
import sys
from fackup.config import config
def setup_logging(verbose, quiet, logger=None):
if not logger:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
fmt = logging.Formatter('[%(asctime)s] [%(process)d] ' \
'%(levelname)-5s (%(name)s) %(message)s',
'%Y-%m-%d %H:%M:%S')
logfile = None
if 'logging' in config['general'].keys():
logfile = config['general']['logging'].get('file')
if logfile:
logfile_level = config['general']['logging'].get('level', 'info')
logfile_level = logfile_level.lower()
f = logging.FileHandler(logfile)
if logfile_level == "error":
f.setLevel(logging.ERROR)
elif logfile_level == "debug":
f.setLevel(logging.DEBUG)
else:
f.setLevel(logging.INFO)
f.setFormatter(fmt)
logger.addHandler(f)
if verbose:
stdout = logging.StreamHandler(sys.stdout)
if verbose >= 2:
stdout.setLevel(logging.DEBUG)
else:
stdout.setLevel(logging.INFO)
stdout.setFormatter(fmt)
logger.addHandler(stdout)
if not quiet and not verbose:
stderr = logging.StreamHandler()
stderr.setLevel(logging.ERROR)
stderr.setFormatter(fmt)
logger.addHandler(stderr)
| mit |
tkurnosova/selenium | py/selenium/webdriver/chrome/webdriver.py | 30 | 3633 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.common.exceptions import WebDriverException
from .remote_connection import ChromeRemoteConnection
from .service import Service
from .options import Options
class WebDriver(RemoteWebDriver):
"""
Controls the ChromeDriver and allows you to drive the browser.
You will need to download the ChromeDriver executable from
http://chromedriver.storage.googleapis.com/index.html
"""
def __init__(self, executable_path="chromedriver", port=0,
chrome_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
if chrome_options is None:
# desired_capabilities stays as passed in
if desired_capabilities is None:
desired_capabilities = self.create_options().to_capabilities()
else:
if desired_capabilities is None:
desired_capabilities = chrome_options.to_capabilities()
else:
desired_capabilities.update(chrome_options.to_capabilities())
self.service = Service(executable_path, port=port,
service_args=service_args, log_path=service_log_path)
self.service.start()
try:
RemoteWebDriver.__init__(self,
command_executor=ChromeRemoteConnection(
remote_server_addr=self.service.service_url),
desired_capabilities=desired_capabilities)
except:
self.quit()
raise
self._is_remote = False
def launch_app(self, id):
"""Launches Chrome app specified by id."""
return self.execute("launchApp", {'id': id})
def quit(self):
"""
Closes the browser and shuts down the ChromeDriver executable
that is started when starting the ChromeDriver
"""
try:
RemoteWebDriver.quit(self)
except:
# We don't care about the message because something probably has gone wrong
pass
finally:
self.service.stop()
def create_options(self):
return Options()
| apache-2.0 |
jamesbeebop/CouchPotatoServer | libs/suds/sax/attribute.py | 203 | 5788 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides XML I{attribute} classes.
"""
import suds.sax
from logging import getLogger
from suds import *
from suds.sax import *
from suds.sax.text import Text
log = getLogger(__name__)
class Attribute:
"""
An XML attribute object.
@ivar parent: The node containing this attribute
@type parent: L{element.Element}
@ivar prefix: The I{optional} namespace prefix.
@type prefix: basestring
@ivar name: The I{unqualified} name of the attribute
@type name: basestring
@ivar value: The attribute's value
@type value: basestring
"""
def __init__(self, name, value=None):
"""
@param name: The attribute's name with I{optional} namespace prefix.
@type name: basestring
@param value: The attribute's value
@type value: basestring
"""
self.parent = None
self.prefix, self.name = splitPrefix(name)
self.setValue(value)
def clone(self, parent=None):
"""
Clone this object.
@param parent: The parent for the clone.
@type parent: L{element.Element}
@return: A copy of this object assigned to the new parent.
@rtype: L{Attribute}
"""
a = Attribute(self.qname(), self.value)
a.parent = parent
return a
def qname(self):
"""
Get the B{fully} qualified name of this attribute
@return: The fully qualified name.
@rtype: basestring
"""
if self.prefix is None:
return self.name
else:
return ':'.join((self.prefix, self.name))
def setValue(self, value):
"""
Set the attributes value
@param value: The new value (may be None)
@type value: basestring
@return: self
@rtype: L{Attribute}
"""
if isinstance(value, Text):
self.value = value
else:
self.value = Text(value)
return self
def getValue(self, default=Text('')):
"""
Get the attributes value with optional default.
@param default: An optional value to be return when the
attribute's has not been set.
@type default: basestring
@return: The attribute's value, or I{default}
@rtype: L{Text}
"""
if self.hasText():
return self.value
else:
return default
def hasText(self):
"""
Get whether the attribute has I{text} and that it is not an empty
(zero length) string.
@return: True when has I{text}.
@rtype: boolean
"""
return ( self.value is not None and len(self.value) )
def namespace(self):
"""
Get the attributes namespace. This may either be the namespace
defined by an optional prefix, or its parent's namespace.
@return: The attribute's namespace
@rtype: (I{prefix}, I{name})
"""
if self.prefix is None:
return Namespace.default
else:
return self.resolvePrefix(self.prefix)
def resolvePrefix(self, prefix):
"""
Resolve the specified prefix to a known namespace.
@param prefix: A declared prefix
@type prefix: basestring
@return: The namespace that has been mapped to I{prefix}
@rtype: (I{prefix}, I{name})
"""
ns = Namespace.default
if self.parent is not None:
ns = self.parent.resolvePrefix(prefix)
return ns
def match(self, name=None, ns=None):
"""
Match by (optional) name and/or (optional) namespace.
@param name: The optional attribute tag name.
@type name: str
@param ns: An optional namespace.
@type ns: (I{prefix}, I{name})
@return: True if matched.
@rtype: boolean
"""
if name is None:
byname = True
else:
byname = ( self.name == name )
if ns is None:
byns = True
else:
byns = ( self.namespace()[1] == ns[1] )
return ( byname and byns )
def __eq__(self, rhs):
""" equals operator """
return rhs is not None and \
isinstance(rhs, Attribute) and \
self.prefix == rhs.name and \
self.name == rhs.name
def __repr__(self):
""" get a string representation """
return \
'attr (prefix=%s, name=%s, value=(%s))' %\
(self.prefix, self.name, self.value)
def __str__(self):
""" get an xml string representation """
return unicode(self).encode('utf-8')
def __unicode__(self):
""" get an xml string representation """
n = self.qname()
if self.hasText():
v = self.value.escape()
else:
v = self.value
return u'%s="%s"' % (n, v)
| gpl-3.0 |
CloudCoreo/cloudcoreo-client | tests/script-run-test.py | 1 | 3025 | import subprocess
import os
import time
def print_output(rc, stdo, stde):
print "proc_ret_code: %s" % rc
if stdo:
print " --- begin stdout ---"
print stdo
print " --- end stdout ---"
if stde:
print " --- begin stderr ---"
print stde
print " --- end stderr ---"
def run_cmd(full_script_path):
work_dir = os.path.dirname(full_script_path)
command = "./%s" % os.path.basename(full_script_path)
print "running command: %s" % command
print "cwd=%s" % work_dir
runmode="file"
if runmode == "communicate":
ios = subprocess.PIPE
proc = subprocess.Popen(
command,
cwd=work_dir,
shell=False,
stdin=ios,
stdout=ios,
stderr=ios)
(proc_stdout, proc_stderr) = proc.communicate()
proc_ret_code = proc.returncode
print_output(proc_ret_code, proc_stdout, proc_stderr)
elif runmode == "poll":
ios = subprocess.PIPE
# ios = None
proc = subprocess.Popen(
command,
cwd=work_dir,
shell=False,
stdin=ios,
stdout=ios,
stderr=ios)
# proc_pid = proc.pid
#
# print "got pid: %d" % proc_pid
rc = None
count = 0
while rc is None and count < 10:
rc = proc.poll()
if rc is not None:
break
count += 1
time.sleep(1)
print "wait time for poll: %d" % count
print "poll returns: %s" % rc
(proc_stdout, proc_stderr) = proc.communicate()
print_output(rc, proc_stdout, proc_stderr)
elif runmode == "file":
log_filename = "/tmp/%s.log" % os.path.basename(command)
if os.path.exists(log_filename):
os.remove(log_filename)
with open(log_filename, 'w+') as log_file:
proc = subprocess.Popen(
command,
cwd=work_dir,
shell=False,
stdout=log_file,
stderr=log_file)
count = 0
where = log_file.tell()
while proc.poll() is None:
count += 1
if count % 50 == 0:
count = 0
print "------ still waiting for pid: %d, where: %d" % (proc.pid, where)
log_file.seek(where)
for line in log_file:
print line
where = log_file.tell()
time.sleep(.1)
print "----- return code: %s" % proc.returncode
with open(log_filename, 'r') as log_file:
log_file.seek(where)
for line in log_file:
print line
print "------------ starting test -------------"
# sleeper.sh will never return...
# run_cmd("testdata/sleeper.sh")
run_cmd("testdata/one-and-done.sh")
run_cmd("testdata/daemonizer.sh")
print "============= ending test =============="
| apache-2.0 |
BeDjango/intef-openedx | cms/djangoapps/course_creators/migrations/0003_auto_20161215_0843.py | 1 | 1361 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('course_creators', '0002_auto_20161213_1029'),
]
operations = [
migrations.AlterField(
model_name='coursecreator',
name='note',
field=models.CharField(help_text='Optional notes about this user (for example, why course creation access was denied)', max_length=512, blank=True),
),
migrations.AlterField(
model_name='coursecreator',
name='state',
field=models.CharField(default=b'unrequested', help_text='Current course creator state', max_length=24, choices=[(b'unrequested', 'unrequested'), (b'pending', 'pending'), (b'granted', 'granted'), (b'denied', 'denied')]),
),
migrations.AlterField(
model_name='coursecreator',
name='state_changed',
field=models.DateTimeField(help_text='The date when state was last updated', verbose_name=b'state last updated', auto_now_add=True),
),
migrations.AlterField(
model_name='coursecreator',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL, help_text='Studio user'),
),
]
| agpl-3.0 |
jballanc/openmicroscopy | components/tools/OmeroPy/test/unit/test_model.py | 3 | 7699 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple unit test which makes various calls on the code
generated model.
Copyright 2007 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import pytest
import omero
import omero.clients
from omero_model_ChannelI import ChannelI
from omero_model_PixelsI import PixelsI
from omero_model_ImageI import ImageI
from omero_model_DatasetI import DatasetI
from omero_model_ExperimenterI import ExperimenterI
from omero_model_ExperimenterGroupI import ExperimenterGroupI
from omero_model_GroupExperimenterMapI import GroupExperimenterMapI
from omero_model_DatasetImageLinkI import DatasetImageLinkI
from omero_model_ScriptJobI import ScriptJobI
from omero_model_DetailsI import DetailsI
from omero_model_BooleanAnnotationI import BooleanAnnotationI
from omero_model_ImageAnnotationLinkI import ImageAnnotationLinkI
from omero.rtypes import *
class TestModel(object):
def testVirtual(self):
img = ImageI()
imgI = ImageI()
img.unload()
imgI.unload()
def testUnloadCollections(self):
pix = PixelsI()
assert pix.sizeOfSettings() >= 0
pix.unloadCollections()
assert pix.sizeOfSettings() < 0
def testSimpleCtor(self):
img = ImageI()
assert img.isLoaded()
assert img.sizeOfPixels() >= 0
def testUnloadedCtor(self):
img = ImageI(rlong(1),False)
assert not img.isLoaded()
try:
assert img.sizeOfDatasetLinks() < 0
assert False, "Should throw"
except:
# Is true, but can't test it.
pass
def testUnloadCheckPtr(self):
img = ImageI()
assert img.isLoaded()
assert img.getDetails() # details are auto instantiated
assert not img.getName() # no other single-valued field is
img.unload()
assert not img.isLoaded()
pytest.raises( omero.UnloadedEntityException, img.getDetails )
def testUnloadField(self):
img = ImageI()
assert img.getDetails()
img.unloadDetails()
assert not img.getDetails()
def testSequences(self):
img = ImageI()
assert img.sizeOfAnnotationLinks() >= 0
img.linkAnnotation(None)
img.unload()
try:
assert not img.sizeOfAnnotationLinks() >= 0
assert len(img.copyAnnotationLinks()) == 0
assert False, "can't reach here"
except:
# These are true, but can't be tested
pass
def testAccessors(self):
name = rstring("name")
img = ImageI()
assert not img.getName()
img.setName( name )
assert img.getName()
name = img.getName()
assert name.val == "name"
assert name == name
img.setName(rstring("name2"))
assert img.getName().val == "name2"
assert img.getName()
img.unload()
try:
assert not img.getName()
assert False, "should fail"
except:
# Is true, but cannot test
pass
def testUnloadedAccessThrows(self):
unloaded = ImageI(rlong(1),False)
pytest.raises( omero.UnloadedEntityException, unloaded.getName )
def testIterators(self):
d = DatasetI()
image = ImageI()
image.linkDataset(d)
it = image.iterateDatasetLinks()
count = 0
for i in it:
count += 1
assert count == 1
def testClearSet(self):
img = ImageI()
assert img.sizeOfPixels() >= 0
img.addPixels( PixelsI() )
assert 1==img.sizeOfPixels()
img.clearPixels()
assert img.sizeOfPixels() >= 0
assert 0==img.sizeOfPixels()
def testUnloadSet(self):
img = ImageI()
assert img.sizeOfPixels() >= 0
img.addPixels( PixelsI() )
assert 1==img.sizeOfPixels()
img.unloadPixels()
assert img.sizeOfPixels() < 0
# Can't check size assert 0==img.sizeOfPixels()
def testRemoveFromSet(self):
pix = PixelsI()
img = ImageI()
assert img.sizeOfPixels() >= 0
img.addPixels( pix )
assert 1==img.sizeOfPixels()
img.removePixels( pix )
assert 0==img.sizeOfPixels()
def testLinkGroupAndUser(self):
user = ExperimenterI()
group = ExperimenterGroupI()
link = GroupExperimenterMapI()
link.id = rlong(1)
link.link(group,user)
user.addGroupExperimenterMap( link, False )
group.addGroupExperimenterMap( link, False )
count = 0
for i in user.iterateGroupExperimenterMap():
count += 1
assert count == 1
def testLinkViaLink(self):
user = ExperimenterI()
user.setFirstName(rstring("test"))
user.setLastName(rstring("user"))
user.setOmeName(rstring("UUID"))
# possibly setOmeName() and setOmeName(string) ??
# and then don't need omero/types.h
group = ExperimenterGroupI()
# TODOuser.linkExperimenterGroup(group)
link = GroupExperimenterMapI()
link.parent = group
link.child = user
def testLinkingAndUnlinking(self):
d = DatasetI()
i = ImageI()
d.linkImage(i)
assert d.sizeOfImageLinks() == 1
d.unlinkImage(i)
assert d.sizeOfImageLinks() == 0
d = DatasetI()
i = ImageI()
d.linkImage(i)
assert i.sizeOfDatasetLinks() == 1
i.unlinkDataset(d)
assert d.sizeOfImageLinks() == 0
d = DatasetI()
i = ImageI()
dil = DatasetImageLinkI()
dil.link(d,i)
d.addDatasetImageLink(dil, False)
assert d.sizeOfImageLinks() == 1
assert i.sizeOfDatasetLinks() == 0
def testScriptJobHasLoadedCollections(self):
s = ScriptJobI()
assert s.sizeOfOriginalFileLinks() >= 0
#
# Python specific
#
def testGetAttrGood(self):
i = ImageI()
assert i.loaded
assert i.isLoaded()
assert not i.name
i.name = rstring("name")
assert i.name
i.setName( None )
assert not i.getName()
i.copyAnnotationLinks()
i.linkAnnotation( omero.model.BooleanAnnotationI() )
def testGetAttrBad(self):
i = ImageI()
def assign_loaded():
i.loaded = False
pytest.raises( AttributeError, assign_loaded )
pytest.raises( AttributeError, lambda: i.foo )
def assign_foo():
i.foo = 1
pytest.raises( AttributeError, assign_foo )
pytest.raises( AttributeError, lambda: i.annotationLinks )
pytest.raises( AttributeError, lambda: i.getAnnotationLinks() )
def assign_links():
i.annotationLinks = []
pytest.raises( AttributeError, assign_links)
def testGetAttrSetAttrDetails(self):
d = DetailsI()
assert None == d.owner
d.owner = ExperimenterI()
assert d.owner
d.owner = None
assert None == d.owner
d.ice_preMarshal()
def testProxy(self):
i = ImageI()
pytest.raises(omero.ClientError, i.proxy)
i = ImageI(5, False)
i.proxy()
def testId(self):
i = ImageI(4)
assert 4 == i.id.val
def testOrderedCollectionsTicket2547(self):
pixels = PixelsI()
channels = [ChannelI() for x in range(3)]
pixels.addChannel(channels[0])
assert 1 == pixels.sizeOfChannels()
old = pixels.setChannel(0, channels[1])
assert old == channels[0]
assert 1 == pixels.sizeOfChannels()
| gpl-2.0 |
vertcoin/electrum-vtc | gui/kivy/nfc_scanner/scanner_android.py | 2 | 8458 | '''This is the Android implementatoin of NFC Scanning using the
built in NFC adapter of some android phones.
'''
from kivy.app import App
from kivy.clock import Clock
#Detect which platform we are on
from kivy.utils import platform
if platform != 'android':
raise ImportError
import threading
from electrum_vtc_gui.kivy.nfc_scanner import NFCBase
from jnius import autoclass, cast
from android.runnable import run_on_ui_thread
from android import activity
BUILDVERSION = autoclass('android.os.Build$VERSION').SDK_INT
NfcAdapter = autoclass('android.nfc.NfcAdapter')
PythonActivity = autoclass('org.kivy.android.PythonActivity')
JString = autoclass('java.lang.String')
Charset = autoclass('java.nio.charset.Charset')
locale = autoclass('java.util.Locale')
Intent = autoclass('android.content.Intent')
IntentFilter = autoclass('android.content.IntentFilter')
PendingIntent = autoclass('android.app.PendingIntent')
Ndef = autoclass('android.nfc.tech.Ndef')
NdefRecord = autoclass('android.nfc.NdefRecord')
NdefMessage = autoclass('android.nfc.NdefMessage')
app = None
class ScannerAndroid(NFCBase):
''' This is the class responsible for handling the interace with the
Android NFC adapter. See Module Documentation for deatils.
'''
name = 'NFCAndroid'
def nfc_init(self):
''' This is where we initialize NFC adapter.
'''
# Initialize NFC
global app
app = App.get_running_app()
# Make sure we are listening to new intent
activity.bind(on_new_intent=self.on_new_intent)
# Configure nfc
self.j_context = context = PythonActivity.mActivity
self.nfc_adapter = NfcAdapter.getDefaultAdapter(context)
# Check if adapter exists
if not self.nfc_adapter:
return False
# specify that we want our activity to remain on top whan a new intent
# is fired
self.nfc_pending_intent = PendingIntent.getActivity(context, 0,
Intent(context, context.getClass()).addFlags(
Intent.FLAG_ACTIVITY_SINGLE_TOP), 0)
# Filter for different types of action, by default we enable all.
# These are only for handling different NFC technologies when app is in foreground
self.ndef_detected = IntentFilter(NfcAdapter.ACTION_NDEF_DISCOVERED)
#self.tech_detected = IntentFilter(NfcAdapter.ACTION_TECH_DISCOVERED)
#self.tag_detected = IntentFilter(NfcAdapter.ACTION_TAG_DISCOVERED)
# setup tag discovery for ourt tag type
try:
self.ndef_detected.addCategory(Intent.CATEGORY_DEFAULT)
# setup the foreground dispatch to detect all mime types
self.ndef_detected.addDataType('*/*')
self.ndef_exchange_filters = [self.ndef_detected]
except Exception as err:
raise Exception(repr(err))
return True
def get_ndef_details(self, tag):
''' Get all the details from the tag.
'''
details = {}
try:
#print 'id'
details['uid'] = ':'.join(['{:02x}'.format(bt & 0xff) for bt in tag.getId()])
#print 'technologies'
details['Technologies'] = tech_list = [tech.split('.')[-1] for tech in tag.getTechList()]
#print 'get NDEF tag details'
ndefTag = cast('android.nfc.tech.Ndef', Ndef.get(tag))
#print 'tag size'
details['MaxSize'] = ndefTag.getMaxSize()
#details['usedSize'] = '0'
#print 'is tag writable?'
details['writable'] = ndefTag.isWritable()
#print 'Data format'
# Can be made readonly
# get NDEF message details
ndefMesg = ndefTag.getCachedNdefMessage()
# get size of current records
details['consumed'] = len(ndefMesg.toByteArray())
#print 'tag type'
details['Type'] = ndefTag.getType()
# check if tag is empty
if not ndefMesg:
details['Message'] = None
return details
ndefrecords = ndefMesg.getRecords()
length = len(ndefrecords)
#print 'length', length
# will contain the NDEF record types
recTypes = []
for record in ndefrecords:
recTypes.append({
'type': ''.join(map(unichr, record.getType())),
'payload': ''.join(map(unichr, record.getPayload()))
})
details['recTypes'] = recTypes
except Exception as err:
print str(err)
return details
def on_new_intent(self, intent):
''' This functions is called when the application receives a
new intent, for the ones the application has registered previously,
either in the manifest or in the foreground dispatch setup in the
nfc_init function above.
'''
action_list = (NfcAdapter.ACTION_NDEF_DISCOVERED,)
# get TAG
#tag = cast('android.nfc.Tag', intent.getParcelableExtra(NfcAdapter.EXTRA_TAG))
#details = self.get_ndef_details(tag)
if intent.getAction() not in action_list:
print 'unknow action, avoid.'
return
rawmsgs = intent.getParcelableArrayExtra(NfcAdapter.EXTRA_NDEF_MESSAGES)
if not rawmsgs:
return
for message in rawmsgs:
message = cast(NdefMessage, message)
payload = message.getRecords()[0].getPayload()
print 'payload: {}'.format(''.join(map(chr, payload)))
def nfc_disable(self):
'''Disable app from handling tags.
'''
self.disable_foreground_dispatch()
def nfc_enable(self):
'''Enable app to handle tags when app in foreground.
'''
self.enable_foreground_dispatch()
def create_AAR(self):
'''Create the record responsible for linking our application to the tag.
'''
return NdefRecord.createApplicationRecord(JString("org.electrum_vtc.kivy"))
def create_TNF_EXTERNAL(self, data):
'''Create our actual payload record.
'''
if BUILDVERSION >= 14:
domain = "org.electrum_vtc"
stype = "externalType"
extRecord = NdefRecord.createExternal(domain, stype, data)
else:
# Creating the NdefRecord manually:
extRecord = NdefRecord(
NdefRecord.TNF_EXTERNAL_TYPE,
"org.electrum_vtc:externalType",
'',
data)
return extRecord
def create_ndef_message(self, *recs):
''' Create the Ndef message that will written to tag
'''
records = []
for record in recs:
if record:
records.append(record)
return NdefMessage(records)
@run_on_ui_thread
def disable_foreground_dispatch(self):
'''Disable foreground dispatch when app is paused.
'''
self.nfc_adapter.disableForegroundDispatch(self.j_context)
@run_on_ui_thread
def enable_foreground_dispatch(self):
'''Start listening for new tags
'''
self.nfc_adapter.enableForegroundDispatch(self.j_context,
self.nfc_pending_intent, self.ndef_exchange_filters, self.ndef_tech_list)
@run_on_ui_thread
def _nfc_enable_ndef_exchange(self, data):
# Enable p2p exchange
# Create record
ndef_record = NdefRecord(
NdefRecord.TNF_MIME_MEDIA,
'org.electrum_vtc.kivy', '', data)
# Create message
ndef_message = NdefMessage([ndef_record])
# Enable ndef push
self.nfc_adapter.enableForegroundNdefPush(self.j_context, ndef_message)
# Enable dispatch
self.nfc_adapter.enableForegroundDispatch(self.j_context,
self.nfc_pending_intent, self.ndef_exchange_filters, [])
@run_on_ui_thread
def _nfc_disable_ndef_exchange(self):
# Disable p2p exchange
self.nfc_adapter.disableForegroundNdefPush(self.j_context)
self.nfc_adapter.disableForegroundDispatch(self.j_context)
def nfc_enable_exchange(self, data):
'''Enable Ndef exchange for p2p
'''
self._nfc_enable_ndef_exchange()
def nfc_disable_exchange(self):
''' Disable Ndef exchange for p2p
'''
self._nfc_disable_ndef_exchange()
| mit |
ASCrookes/django | django/db/backends/mysql/features.py | 274 | 2651 | from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils.functional import cached_property
from .base import Database
try:
import pytz
except ImportError:
pytz = None
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
can_introspect_autofield = True
can_introspect_binary_field = False
can_introspect_small_integer_field = True
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_auto_pk_0 = False
uses_savepoints = True
can_release_savepoints = True
atomic_transactions = False
supports_column_check_constraints = False
@cached_property
def _mysql_storage_engine(self):
"Internal method used in Django tests. Don't rely on this from your code"
with self.connection.cursor() as cursor:
cursor.execute("SELECT ENGINE FROM INFORMATION_SCHEMA.ENGINES WHERE SUPPORT = 'DEFAULT'")
result = cursor.fetchone()
return result[0]
@cached_property
def can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
return self._mysql_storage_engine != 'MyISAM'
@cached_property
def supports_microsecond_precision(self):
# See https://github.com/farcepest/MySQLdb1/issues/24 for the reason
# about requiring MySQLdb 1.2.5
return self.connection.mysql_version >= (5, 6, 4) and Database.version_info >= (1, 2, 5)
@cached_property
def has_zoneinfo_database(self):
# MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects
# abbreviations (eg. EAT). When pytz isn't installed and the current
# time zone is LocalTimezone (the only sensible value in this
# context), the current time zone name will be an abbreviation. As a
# consequence, MySQL cannot perform time zone conversions reliably.
if pytz is None:
return False
# Test if the time zone definitions are installed.
with self.connection.cursor() as cursor:
cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1")
return cursor.fetchone() is not None
def introspected_boolean_field_type(self, *args, **kwargs):
return 'IntegerField'
| bsd-3-clause |
tryggvib/datapackage | datapackage/data/update_licenses.py | 3 | 1651 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import os
import io
import json
import codecs
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
OPENDEFINITION_LICENSES_URL = \
'http://licenses.opendefinition.org/licenses/groups/all.json'
def load_licenses_aliases():
dirname = os.path.split(os.path.realpath(__file__))[0]
filename = os.path.join(dirname, 'licenses_aliases.json')
with io.open(filename, 'r') as fh:
aliases = json.load(fh)
return aliases
def load_opendefinition_licenses(url=OPENDEFINITION_LICENSES_URL):
fh = urlopen(url)
reader = codecs.getreader('utf-8')
licenses = json.load(reader(fh))
return licenses
def generate_licenses_json():
aliases = load_licenses_aliases()
od_licenses = load_opendefinition_licenses()
licenses = {}
for _, details in od_licenses.items():
if (details.get('domain_data') or details.get('domain_content') and
details['url']):
licenses[details['id']] = details['url']
for alias in aliases.get(details['id'], []):
licenses[alias] = details['url']
return json.dumps(licenses,
sort_keys=True, indent=4, separators=(',', ': '))
def save_licenses_json():
dirname = os.path.split(os.path.realpath(__file__))[0]
filename = os.path.join(dirname, 'licenses.json')
content = generate_licenses_json()
with io.open(filename, 'w') as fh:
fh.write(content)
if __name__ == '__main__':
save_licenses_json()
| gpl-3.0 |
mikeyarce/subscriptions-checkout-for-woocommerce | node_modules/node-gyp/gyp/pylib/gyp/mac_tool.py | 1569 | 23354 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest, convert_to_binary):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest, convert_to_binary)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _ConvertToBinary(self, dest):
subprocess.check_call([
'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
def _CopyStringsFile(self, source, dest, convert_to_binary):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile(r'[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
if convert_to_binary == 'True':
self._ConvertToBinary(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtool_re5 = re.compile(
r'^.*libtool: warning for library: ' +
r'.* the table of contents is empty ' +
r'\(no object file members in the library define global symbols\)$')
env = os.environ.copy()
# Ref:
# http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
# The problem with this flag is that it resets the file mtime on the file to
# epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
env['ZERO_AR_DATE'] = '1'
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line) and not libtool_re5.match(line):
print >>sys.stderr, line
# Unconditionally touch the output .a file on the command line if present
# and the command succeeded. A bit hacky.
if not libtoolout.returncode:
for i in range(len(cmd_list) - 1):
if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
os.utime(cmd_list[i+1], None)
break
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCompileXcassets(self, keys, *inputs):
"""Compiles multiple .xcassets files into a single .car file.
This invokes 'actool' to compile all the inputs .xcassets files. The
|keys| arguments is a json-encoded dictionary of extra arguments to
pass to 'actool' when the asset catalogs contains an application icon
or a launch image.
Note that 'actool' does not create the Assets.car file if the asset
catalogs does not contains imageset.
"""
command_line = [
'xcrun', 'actool', '--output-format', 'human-readable-text',
'--compress-pngs', '--notices', '--warnings', '--errors',
]
is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
if is_iphone_target:
platform = os.environ['CONFIGURATION'].split('-')[-1]
if platform not in ('iphoneos', 'iphonesimulator'):
platform = 'iphonesimulator'
command_line.extend([
'--platform', platform, '--target-device', 'iphone',
'--target-device', 'ipad', '--minimum-deployment-target',
os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
])
else:
command_line.extend([
'--platform', 'macosx', '--target-device', 'mac',
'--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
'--compile',
os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
])
if keys:
keys = json.loads(keys)
for key, value in keys.iteritems():
arg_name = '--' + key
if isinstance(value, bool):
if value:
command_line.append(arg_name)
elif isinstance(value, list):
for v in value:
command_line.append(arg_name)
command_line.append(str(v))
else:
command_line.append(arg_name)
command_line.append(str(value))
# Note: actool crashes if inputs path are relative, so use os.path.abspath
# to get absolute path name for inputs.
command_line.extend(map(os.path.abspath, inputs))
subprocess.check_call(command_line)
def ExecMergeInfoPlist(self, output, *inputs):
"""Merge multiple .plist files into a single .plist file."""
merged_plist = {}
for path in inputs:
plist = self._LoadPlistMaybeBinary(path)
self._MergePlist(merged_plist, plist)
plistlib.writePlist(merged_plist, output)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _MergePlist(self, merged_plist, plist):
"""Merge |plist| into |merged_plist|."""
for key, value in plist.iteritems():
if isinstance(value, dict):
merged_value = merged_plist.get(key, {})
if isinstance(merged_value, dict):
self._MergePlist(merged_value, value)
merged_plist[key] = merged_value
else:
merged_plist[key] = value
else:
merged_plist[key] = value
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return {k: self._ExpandVariables(data[k], substitutions) for k in data}
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| gpl-2.0 |
skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/twisted/internet/iocpreactor/reactor.py | 42 | 9180 | # -*- test-case-name: twisted.internet.test.test_iocp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Reactor that uses IO completion ports
"""
import warnings, socket, sys
from zope.interface import implements
from twisted.internet import base, interfaces, main, error
from twisted.python import log, failure
from twisted.internet._dumbwin32proc import Process
from twisted.internet.win32eventreactor import _ThreadedWin32EventsMixin
from twisted.internet.iocpreactor import iocpsupport as _iocp
from twisted.internet.iocpreactor.const import WAIT_TIMEOUT
from twisted.internet.iocpreactor import tcp, udp
try:
from twisted.protocols.tls import TLSMemoryBIOFactory
except ImportError:
# Either pyOpenSSL isn't installed, or it is too old for this code to work.
# The reactor won't provide IReactorSSL.
TLSMemoryBIOFactory = None
_extraInterfaces = ()
warnings.warn(
"pyOpenSSL 0.10 or newer is required for SSL support in iocpreactor. "
"It is missing, so the reactor will not support SSL APIs.")
else:
_extraInterfaces = (interfaces.IReactorSSL,)
MAX_TIMEOUT = 2000 # 2 seconds, see doIteration for explanation
EVENTS_PER_LOOP = 1000 # XXX: what's a good value here?
# keys to associate with normal and waker events
KEY_NORMAL, KEY_WAKEUP = range(2)
_NO_GETHANDLE = error.ConnectionFdescWentAway(
'Handler has no getFileHandle method')
_NO_FILEDESC = error.ConnectionFdescWentAway('Filedescriptor went away')
class IOCPReactor(base._SignalReactorMixin, base.ReactorBase,
_ThreadedWin32EventsMixin):
implements(interfaces.IReactorTCP, interfaces.IReactorUDP,
interfaces.IReactorMulticast, interfaces.IReactorProcess,
*_extraInterfaces)
port = None
def __init__(self):
base.ReactorBase.__init__(self)
self.port = _iocp.CompletionPort()
self.handles = set()
def addActiveHandle(self, handle):
self.handles.add(handle)
def removeActiveHandle(self, handle):
self.handles.discard(handle)
def doIteration(self, timeout):
"""
Poll the IO completion port for new events.
"""
# This function sits and waits for an IO completion event.
#
# There are two requirements: process IO events as soon as they arrive
# and process ctrl-break from the user in a reasonable amount of time.
#
# There are three kinds of waiting.
# 1) GetQueuedCompletionStatus (self.port.getEvent) to wait for IO
# events only.
# 2) Msg* family of wait functions that can stop waiting when
# ctrl-break is detected (then, I think, Python converts it into a
# KeyboardInterrupt)
# 3) *Ex family of wait functions that put the thread into an
# "alertable" wait state which is supposedly triggered by IO completion
#
# 2) and 3) can be combined. Trouble is, my IO completion is not
# causing 3) to trigger, possibly because I do not use an IO completion
# callback. Windows is weird.
# There are two ways to handle this. I could use MsgWaitForSingleObject
# here and GetQueuedCompletionStatus in a thread. Or I could poll with
# a reasonable interval. Guess what! Threads are hard.
processed_events = 0
if timeout is None:
timeout = MAX_TIMEOUT
else:
timeout = min(MAX_TIMEOUT, int(1000*timeout))
rc, bytes, key, evt = self.port.getEvent(timeout)
while 1:
if rc == WAIT_TIMEOUT:
break
if key != KEY_WAKEUP:
assert key == KEY_NORMAL
log.callWithLogger(evt.owner, self._callEventCallback,
rc, bytes, evt)
processed_events += 1
if processed_events >= EVENTS_PER_LOOP:
break
rc, bytes, key, evt = self.port.getEvent(0)
def _callEventCallback(self, rc, bytes, evt):
owner = evt.owner
why = None
try:
evt.callback(rc, bytes, evt)
handfn = getattr(owner, 'getFileHandle', None)
if not handfn:
why = _NO_GETHANDLE
elif handfn() == -1:
why = _NO_FILEDESC
if why:
return # ignore handles that were closed
except:
why = sys.exc_info()[1]
log.err()
if why:
owner.loseConnection(failure.Failure(why))
def installWaker(self):
pass
def wakeUp(self):
self.port.postEvent(0, KEY_WAKEUP, None)
def registerHandle(self, handle):
self.port.addHandle(handle, KEY_NORMAL)
def createSocket(self, af, stype):
skt = socket.socket(af, stype)
self.registerHandle(skt.fileno())
return skt
def listenTCP(self, port, factory, backlog=50, interface=''):
"""
@see: twisted.internet.interfaces.IReactorTCP.listenTCP
"""
p = tcp.Port(port, factory, backlog, interface, self)
p.startListening()
return p
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""
@see: twisted.internet.interfaces.IReactorTCP.connectTCP
"""
c = tcp.Connector(host, port, factory, timeout, bindAddress, self)
c.connect()
return c
if TLSMemoryBIOFactory is not None:
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=''):
"""
@see: twisted.internet.interfaces.IReactorSSL.listenSSL
"""
port = self.listenTCP(
port,
TLSMemoryBIOFactory(contextFactory, False, factory),
backlog, interface)
port._type = 'TLS'
return port
def connectSSL(self, host, port, factory, contextFactory, timeout=30, bindAddress=None):
"""
@see: twisted.internet.interfaces.IReactorSSL.connectSSL
"""
return self.connectTCP(
host, port,
TLSMemoryBIOFactory(contextFactory, True, factory),
timeout, bindAddress)
else:
def listenSSL(self, port, factory, contextFactory, backlog=50, interface=''):
"""
Non-implementation of L{IReactorSSL.listenSSL}. Some dependency
is not satisfied. This implementation always raises
L{NotImplementedError}.
"""
raise NotImplementedError(
"pyOpenSSL 0.10 or newer is required for SSL support in "
"iocpreactor. It is missing, so the reactor does not support "
"SSL APIs.")
def connectSSL(self, host, port, factory, contextFactory, timeout=30, bindAddress=None):
"""
Non-implementation of L{IReactorSSL.connectSSL}. Some dependency
is not satisfied. This implementation always raises
L{NotImplementedError}.
"""
raise NotImplementedError(
"pyOpenSSL 0.10 or newer is required for SSL support in "
"iocpreactor. It is missing, so the reactor does not support "
"SSL APIs.")
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
"""
Connects a given L{DatagramProtocol} to the given numeric UDP port.
@returns: object conforming to L{IListeningPort}.
"""
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
return p
def listenMulticast(self, port, protocol, interface='', maxPacketSize=8192,
listenMultiple=False):
"""
Connects a given DatagramProtocol to the given numeric UDP port.
EXPERIMENTAL.
@returns: object conforming to IListeningPort.
"""
p = udp.MulticastPort(port, protocol, interface, maxPacketSize, self,
listenMultiple)
p.startListening()
return p
def spawnProcess(self, processProtocol, executable, args=(), env={},
path=None, uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn a process.
"""
if uid is not None:
raise ValueError("Setting UID is unsupported on this platform.")
if gid is not None:
raise ValueError("Setting GID is unsupported on this platform.")
if usePTY:
raise ValueError("PTYs are unsupported on this platform.")
if childFDs is not None:
raise ValueError(
"Custom child file descriptor mappings are unsupported on "
"this platform.")
args, env = self._checkProcessArgs(args, env)
return Process(self, processProtocol, executable, args, env, path)
def removeAll(self):
res = list(self.handles)
self.handles.clear()
return res
def install():
r = IOCPReactor()
main.installReactor(r)
__all__ = ['IOCPReactor', 'install']
| gpl-2.0 |
tumbl3w33d/ansible | test/units/modules/network/opx/test_opx_cps.py | 23 | 8692 | #
# (c) 2018 Red Hat Inc.
#
# (c) 2018 Dell Inc. or its subsidiaries. All Rights Reserved.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch, Mock
import sys
sys.modules['cps'] = Mock(QUALIFIERS=[
"target",
"observed",
"proposed",
"realtime",
"registration",
"running",
"startup"
], OPERATIONS=[
"delete",
"create",
"set",
"action",
"get"
])
sys.modules['cps_object'] = Mock()
sys.modules['cps_utils'] = Mock()
from ansible.modules.network.opx import opx_cps
from units.modules.utils import set_module_args
from .opx_module import TestOpxModule, load_fixture
class TestOpxCpsModule(TestOpxModule):
module = opx_cps
def setUp(self):
super(TestOpxCpsModule, self).setUp()
self.mock_cps_get = patch('ansible.modules.network.opx.opx_cps.cps_get')
self.cps_get = self.mock_cps_get.start()
self.mock_cps_transaction = patch('ansible.modules.network.opx.opx_cps.cps_transaction')
self.cps_transaction = self.mock_cps_transaction.start()
self.mock_parse_cps_parameters = patch('ansible.modules.network.opx.opx_cps.parse_cps_parameters')
self.parse_cps_parameters = self.mock_parse_cps_parameters.start()
self.mock_get_config = patch('ansible.modules.network.opx.opx_cps.cps_get.parse_cps_parameters')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestOpxCpsModule, self).tearDown()
self.mock_cps_get.stop()
self.mock_cps_transaction.stop()
self.mock_parse_cps_parameters.stop()
self.mock_get_config.stop()
def test_opx_operation_create(self):
resp = load_fixture('opx_operation_create.cfg')
attr_data = {"base-if-vlan/if/interfaces/interface/id": 105,
"if/interfaces/interface/type": "ianaift:l2vlan"}
module_name = "dell-base-if-cmn/if/interfaces/interface"
set_module_args(dict(module_name=module_name, operation="create", attr_data=attr_data))
self.get_config.return_value = dict()
self.cps_transaction.return_value = dict(changed=True, response=resp)
self.execute_module(changed=True, response=resp)
self.assertEqual(self.parse_cps_parameters.call_count, 2)
self.assertEqual(self.cps_transaction.call_count, 1)
def test_opx_operation_set(self):
resp = load_fixture('opx_operation_set.cfg')
config_data = load_fixture('opx_get_config.cfg')
attr_data = {"dell-if/if/interfaces/interface/untagged-ports": "e101-001-0",
"if/interfaces/interface/name": "br105"}
module_name = "dell-base-if-cmn/if/interfaces/interface"
set_module_args(dict(module_name=module_name, operation="set", attr_data=attr_data))
self.get_config.return_value = config_data
self.cps_transaction.return_value = dict(changed=True, response=resp)
self.execute_module(changed=True, response=resp)
self.assertEqual(self.parse_cps_parameters.call_count, 2)
self.assertEqual(self.cps_transaction.call_count, 1)
def test_opx_operation_delete(self):
resp = load_fixture('opx_operation_delete.cfg')
config_data = load_fixture('opx_get_config.cfg')
attr_data = {"if/interfaces/interface/name": "br105"}
module_name = "dell-base-if-cmn/if/interfaces/interface"
set_module_args(dict(module_name=module_name, operation="delete", attr_data=attr_data))
self.get_config.return_value = config_data
self.cps_transaction.return_value = dict(changed=True, response=resp)
self.execute_module(changed=True, response=resp)
self.assertEqual(self.parse_cps_parameters.call_count, 2)
self.assertEqual(self.cps_transaction.call_count, 1)
def test_opx_operation_delete_fail(self):
resp = load_fixture('opx_operation_delete.cfg')
attr_data = {"if/interfaces/interface/name": "br105"}
module_name = "dell-base-if-cmn/if/interfaces/interface"
set_module_args(dict(module_name=module_name, operation="delete", attr_data=attr_data))
self.get_config.return_value = dict()
self.execute_module(changed=False)
self.assertEqual(self.parse_cps_parameters.call_count, 2)
self.assertEqual(self.cps_transaction.call_count, 1)
def test_opx_operation_get(self):
resp = load_fixture('opx_operation_get.cfg')
attr_data = {"if/interfaces/interface/type": "ianaift:l2vlan"}
module_name = "dell-base-if-cmn/if/interfaces/interface"
set_module_args(dict(module_name=module_name, operation="get", attr_data=attr_data))
self.cps_get.return_value = dict(changed=True, response=resp)
self.cps_transaction.return_value = None
self.execute_module(changed=True, response=resp)
self.assertEqual(self.parse_cps_parameters.call_count, 1)
self.assertEqual(self.cps_get.call_count, 1)
self.cps_transaction.assert_not_called()
def test_opx_operation_set_fail(self):
attr_data = {"dell-if/if/interfaces/interface/untagged-ports": "e101-001-0",
"if/interfaces/interface/name": "br105"}
exp_msg = "RuntimeError: Transaction error while set"
module_name = "dell-base-if-cmn/if/interfaces/interface"
set_module_args(dict(module_name=module_name, operation="set", attr_data=attr_data))
self.get_config.return_value = dict()
self.cps_transaction.side_effect = RuntimeError("Transaction error while set")
self.execute_module(failed=True, msg=exp_msg)
self.assertEqual(self.parse_cps_parameters.call_count, 2)
self.assertEqual(self.cps_transaction.call_count, 1)
def test_opx_operation_create_fail(self):
attr_data = {"if/interfaces/interface/type": "ianaift:l2vlan"}
config_data = load_fixture('opx_get_config.cfg')
exp_msg = "RuntimeError: Transaction error while create"
module_name = "dell-base-if-cmn/if/interfaces/interface"
set_module_args(dict(module_name=module_name, operation="create", attr_data=attr_data))
self.get_config.return_value = config_data
self.cps_transaction.side_effect = RuntimeError("Transaction error while create")
self.execute_module(failed=True, msg=exp_msg)
self.assertEqual(self.parse_cps_parameters.call_count, 2)
self.assertEqual(self.cps_transaction.call_count, 1)
def test_opx_operation_get_db(self):
resp = load_fixture('opx_operation_get_db.cfg')
attr_data = {"if/interfaces/interface/name": "e101-001-0"}
module_name = "dell-base-if-cmn/if/interfaces/interface"
set_module_args(dict(module_name=module_name, operation="get", attr_data=attr_data, db=True))
self.cps_get.return_value = dict(changed=True, response=resp)
self.cps_transaction.return_value = None
self.execute_module(changed=True, response=resp, db=True)
self.assertEqual(self.parse_cps_parameters.call_count, 1)
self.assertEqual(self.cps_get.call_count, 1)
self.cps_transaction.assert_not_called()
def test_opx_operation_set_commit_event(self):
resp = load_fixture('opx_operation_set.cfg')
config_data = load_fixture('opx_get_config.cfg')
attr_data = {"dell-if/if/interfaces/interface/untagged-ports": "e101-001-0",
"if/interfaces/interface/name": "br105"}
module_name = "dell-base-if-cmn/if/interfaces/interface"
set_module_args(dict(module_name=module_name, operation="set", attr_data=attr_data, commit_event=True))
self.get_config.return_value = config_data
self.cps_transaction.return_value = dict(changed=True, response=resp)
self.execute_module(changed=True, response=resp, commit_event=True)
self.assertEqual(self.parse_cps_parameters.call_count, 2)
self.assertEqual(self.cps_transaction.call_count, 1)
| gpl-3.0 |
jmartinm/invenio | modules/bibformat/lib/elements/bfe_meta_opengraph_video.py | 17 | 5340 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - return the video of a record"""
import cgi
from invenio.config import CFG_SITE_URL, CFG_SITE_SECURE_URL, CFG_CERN_SITE
from invenio.bibdocfile import BibRecDocs, get_superformat_from_format
from invenio.config import CFG_WEBSEARCH_ENABLE_OPENGRAPH
def format_element(bfo):
"""
Return the video of the record, suitable for the Open Graph protocol.
"""
if not CFG_WEBSEARCH_ENABLE_OPENGRAPH:
return ""
bibarchive = BibRecDocs(bfo.recID)
bibdocs = bibarchive.list_bibdocs()
additional_tags = ""
tags = []
videos = []
images = []
for doc in bibdocs:
found_icons = []
found_image_url = ''
for docfile in doc.list_latest_files():
if docfile.is_icon():
found_icons.append((docfile.get_size(), docfile.get_url()))
elif get_superformat_from_format(docfile.get_format()).lower() in [".mp4", '.webm', '.ogv']:
found_image_url = docfile.get_url()
found_icons.sort()
for icon_size, icon_url in found_icons:
images.append((icon_url, icon_url.replace(CFG_SITE_URL, CFG_SITE_SECURE_URL)))
if found_image_url:
videos.append((found_image_url, found_image_url.replace(CFG_SITE_URL, CFG_SITE_SECURE_URL)))
if CFG_CERN_SITE:
mp4_urls = [url.replace('http://mediaarchive.cern.ch', 'https://mediastream.cern.ch') \
for url in bfo.fields('8567_u') if url.endswith('.mp4')]
img_urls = [url.replace('http://mediaarchive.cern.ch', 'https://mediastream.cern.ch') \
for url in bfo.fields('8567_u') if url.endswith('.jpg') or url.endswith('.png')]
if mp4_urls:
mp4_url = mp4_urls[0]
if "4/3" in bfo.field("300__b"):
width = "640"
height = "480"
else:
width = "640"
height = "360"
additional_tags += '''
<meta property="og:video" content="%(CFG_CERN_PLAYER_URL)s?file=%(mp4_url_relative)s&streamer=%(CFG_STREAMER_URL)s&provider=rtmp&stretching=exactfit&image=%(image_url)s" />
<meta property="og:video:height" content="%(height)s" />
<meta property="og:video:width" content="%(width)s" />
<meta property="og:video:type" content="application/x-shockwave-flash" />
<meta property="og:video" content="%(mp4_url)s" />
<meta property="og:video:type" content="video/mp4" />
<meta property="og:image" content="%(image_url)s" />
<meta name="twitter:player:height" content="%(height)s" />
<meta name="twitter:player:width" content="%(width)s" />
<link rel="image_src" href="%(image_url)s" />
<link rel="video_src" href="%(CFG_CERN_PLAYER_URL)s?file=%(mp4_url_relative)s&streamer=%(CFG_STREAMER_URL)s&provider=rtmp&stretching=exactfit&image=%(image_url)s"/>
''' % {'CFG_CERN_PLAYER_URL': "https://cds.cern.ch/mediaplayer.swf",
'CFG_STREAMER_URL': "rtmp://wowza.cern.ch:1935/vod",
'width': width,
'height': height,
'image_url': img_urls and img_urls[0] or '',
'mp4_url': mp4_url.replace('http://mediaarchive.cern.ch', 'https://mediastream.cern.ch'),
'mp4_url_relative': '/' + '/'.join(mp4_url.split('/')[4:])}
try:
from invenio.media_utils import generate_embedding_url
embed_url = generate_embedding_url(bfo.field('037__a'))
additional_tags += '''<meta name="twitter:player" content="%s"/>''' % cgi.escape(embed_url, quote=True).replace('http://', 'https://', 1)
except:
pass
tags = ['<meta property="og:image" content="%s" />%s' % (image_url, image_url != image_secure_url and '\n<meta property="og:image:secure_url" content="%s" />' % image_secure_url or "") for image_url, image_secure_url in images]
tags.extend(['<meta property="og:video" content="%s" />%s' % (video_url, video_url != video_secure_url and '\n<meta property="og:video:secure_url" content="%s" />' % video_secure_url or "") for video_url, video_secure_url in videos])
return "\n".join(tags) + additional_tags
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
jinie/sublime-wakatime | packages/wakatime/packages/pytz3/tzinfo.py | 11 | 19217 | '''Base classes and helpers for building zone specific tzinfo classes'''
from datetime import datetime, timedelta, tzinfo
from bisect import bisect_right
try:
set
except NameError:
from sets import Set as set
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
__all__ = []
_timedelta_cache = {}
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
_epoch = datetime.utcfromtimestamp(0)
_datetime_cache = {0: _epoch}
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
_ttinfo_cache = {}
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
_notime = memorized_timedelta(0)
def _to_seconds(td):
'''Convert a timedelta to seconds'''
return td.seconds + td.days * 24 * 60 * 60
class BaseTzInfo(tzinfo):
# Overridden in subclass
_utcoffset = None
_tzname = None
zone = None
def __str__(self):
return self.zone
class StaticTzInfo(BaseTzInfo):
'''A timezone that has a constant offset from UTC
These timezones are rare, as most locations have changed their
offset at some point in their history
'''
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if dt.tzinfo is not None and dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
return (dt + self._utcoffset).replace(tzinfo=self)
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return _notime
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._tzname
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime.
This is normally a no-op, as StaticTzInfo timezones never have
ambiguous cases to correct:
>>> from pytz import timezone
>>> gmt = timezone('GMT')
>>> isinstance(gmt, StaticTzInfo)
True
>>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt)
>>> gmt.normalize(dt) is dt
True
The supported method of converting between timezones is to use
datetime.astimezone(). Currently normalize() also works:
>>> la = timezone('America/Los_Angeles')
>>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> gmt.normalize(dt).strftime(fmt)
'2011-05-07 08:02:03 GMT (+0000)'
'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return '<StaticTzInfo %r>' % (self.zone,)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (self.zone,)
class DstTzInfo(BaseTzInfo):
'''A timezone that has a variable offset from UTC
The offset might change if daylight savings time comes into effect,
or at a point in history when the region decides to change their
timezone definition.
'''
# Overridden in subclass
_utc_transition_times = None # Sorted list of DST transition times in UTC
_transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding
# to _utc_transition_times entries
zone = None
# Set in __init__
_tzinfos = None
_dst = None # DST offset
def __init__(self, _inf=None, _tzinfos=None):
if _inf:
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = _inf
else:
_tzinfos = {}
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = self._transition_info[0]
_tzinfos[self._transition_info[0]] = self
for inf in self._transition_info[1:]:
if inf not in _tzinfos:
_tzinfos[inf] = self.__class__(inf, _tzinfos)
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if (dt.tzinfo is not None
and getattr(dt.tzinfo, '_tzinfos', None) is not self._tzinfos):
raise ValueError('fromutc: dt.tzinfo is not self')
dt = dt.replace(tzinfo=None)
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
inf = self._transition_info[idx]
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
def normalize(self, dt):
'''Correct the timezone information on the given datetime
If date arithmetic crosses DST boundaries, the tzinfo
is not magically adjusted. This method normalizes the
tzinfo to the correct one.
To test, first we need to do some setup
>>> from pytz import timezone
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
We next create a datetime right on an end-of-DST transition point,
the instant when the wallclocks are wound back one hour.
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
Now, if we subtract a few minutes from it, note that the timezone
information has not changed.
>>> before = loc_dt - timedelta(minutes=10)
>>> before.strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
But we can fix that by calling the normalize method
>>> before = eastern.normalize(before)
>>> before.strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
The supported method of converting between timezones is to use
datetime.astimezone(). Currently, normalize() also works:
>>> th = timezone('Asia/Bangkok')
>>> am = timezone('Europe/Amsterdam')
>>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> am.normalize(dt).strftime(fmt)
'2011-05-06 20:02:03 CEST (+0200)'
'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
# Convert dt in localtime to UTC
offset = dt.tzinfo._utcoffset
dt = dt.replace(tzinfo=None)
dt = dt - offset
# convert it back, and return it
return self.fromutc(dt)
def localize(self, dt, is_dst=False):
'''Convert naive time to local time.
This method should be used to construct localtimes, rather
than passing a tzinfo argument to a datetime constructor.
is_dst is used to determine the correct timezone in the ambigous
period at the end of daylight savings time.
>>> from pytz import timezone
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> amdam = timezone('Europe/Amsterdam')
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
>>> loc_dt1.strftime(fmt)
'2004-10-31 02:00:00 CEST (+0200)'
>>> loc_dt2.strftime(fmt)
'2004-10-31 02:00:00 CET (+0100)'
>>> str(loc_dt2 - loc_dt1)
'1:00:00'
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
times at the end of daylight savings
>>> try:
... loc_dt1 = amdam.localize(dt, is_dst=None)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
is_dst defaults to False
>>> amdam.localize(dt) == amdam.localize(dt, False)
True
is_dst is also used to determine the correct timezone in the
wallclock times jumped over at the start of daylight savings time.
>>> pacific = timezone('US/Pacific')
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
>>> ploc_dt1.strftime(fmt)
'2008-03-09 02:00:00 PDT (-0700)'
>>> ploc_dt2.strftime(fmt)
'2008-03-09 02:00:00 PST (-0800)'
>>> str(ploc_dt2 - ploc_dt1)
'1:00:00'
Use is_dst=None to raise a NonExistentTimeError for these skipped
times.
>>> try:
... loc_dt1 = pacific.localize(dt, is_dst=None)
... except NonExistentTimeError:
... print('Non-existent')
Non-existent
'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
# Find the two best possibilities.
possible_loc_dt = set()
for delta in [timedelta(days=-1), timedelta(days=1)]:
loc_dt = dt + delta
idx = max(0, bisect_right(
self._utc_transition_times, loc_dt) - 1)
inf = self._transition_info[idx]
tzinfo = self._tzinfos[inf]
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
if loc_dt.replace(tzinfo=None) == dt:
possible_loc_dt.add(loc_dt)
if len(possible_loc_dt) == 1:
return possible_loc_dt.pop()
# If there are no possibly correct timezones, we are attempting
# to convert a time that never happened - the time period jumped
# during the start-of-DST transition period.
if len(possible_loc_dt) == 0:
# If we refuse to guess, raise an exception.
if is_dst is None:
raise NonExistentTimeError(dt)
# If we are forcing the pre-DST side of the DST transition, we
# obtain the correct timezone by winding the clock forward a few
# hours.
elif is_dst:
return self.localize(
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
# If we are forcing the post-DST side of the DST transition, we
# obtain the correct timezone by winding the clock back.
else:
return self.localize(
dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6)
# If we get this far, we have multiple possible timezones - this
# is an ambiguous case occuring during the end-of-DST transition.
# If told to be strict, raise an exception since we have an
# ambiguous case
if is_dst is None:
raise AmbiguousTimeError(dt)
# Filter out the possiblilities that don't match the requested
# is_dst
filtered_possible_loc_dt = [
p for p in possible_loc_dt
if bool(p.tzinfo._dst) == is_dst
]
# Hopefully we only have one possibility left. Return it.
if len(filtered_possible_loc_dt) == 1:
return filtered_possible_loc_dt[0]
if len(filtered_possible_loc_dt) == 0:
filtered_possible_loc_dt = list(possible_loc_dt)
# If we get this far, we have in a wierd timezone transition
# where the clocks have been wound back but is_dst is the same
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
# At this point, we just have to guess unless we allow more
# hints to be passed in (such as the UTC offset or abbreviation),
# but that is just getting silly.
#
# Choose the earliest (by UTC) applicable timezone.
sorting_keys = {}
for local_dt in filtered_possible_loc_dt:
key = local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset
sorting_keys[key] = local_dt
first_key = sorted(sorting_keys)[0]
return sorting_keys[first_key]
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname
def __repr__(self):
if self._dst:
dst = 'DST'
else:
dst = 'STD'
if self._utcoffset > _notime:
return '<DstTzInfo %r %s+%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
else:
return '<DstTzInfo %r %s%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in list(tz._tzinfos.values()):
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
| bsd-3-clause |
broferek/ansible | test/units/modules/network/f5/test_bigip_profile_dns.py | 22 | 4652 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_profile_dns import ApiParameters
from library.modules.bigip_profile_dns import ModuleParameters
from library.modules.bigip_profile_dns import ModuleManager
from library.modules.bigip_profile_dns import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_profile_dns import ApiParameters
from ansible.modules.network.f5.bigip_profile_dns import ModuleParameters
from ansible.modules.network.f5.bigip_profile_dns import ModuleManager
from ansible.modules.network.f5.bigip_profile_dns import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='bar',
enable_dns_express=True,
enable_zone_transfer=True,
enable_dnssec=True,
enable_gtm=True,
process_recursion_desired=True,
use_local_bind=True,
enable_dns_firewall=True,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/bar'
assert p.enable_dns_express is True
assert p.enable_zone_transfer is True
assert p.enable_dnssec is True
assert p.enable_gtm is True
assert p.process_recursion_desired is True
assert p.use_local_bind is True
assert p.enable_dns_firewall is True
def test_api_parameters(self):
args = load_fixture('load_ltm_profile_dns_1.json')
p = ApiParameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/dns'
assert p.enable_dns_express is False
assert p.enable_zone_transfer is True
assert p.enable_dnssec is False
assert p.enable_gtm is False
assert p.process_recursion_desired is True
assert p.use_local_bind is False
assert p.enable_dns_firewall is True
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
# Configure the arguments that would be sent to the Ansible module
set_module_args(dict(
name='foo',
parent='bar',
enable_dns_express=True,
enable_zone_transfer=True,
enable_dnssec=True,
enable_gtm=True,
process_recursion_desired=True,
use_local_bind=True,
enable_dns_firewall=True,
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['enable_dns_express'] == 'yes'
assert results['enable_zone_transfer'] == 'yes'
assert results['enable_dnssec'] == 'yes'
assert results['enable_gtm'] == 'yes'
assert results['process_recursion_desired'] == 'yes'
assert results['use_local_bind'] == 'yes'
assert results['enable_dns_firewall'] == 'yes'
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.