repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
mbalasso/mynumpy | doc/numpybook/comparison/weave/inline.py | 20 | 1191 | from scipy import weave
from numpy import rand, zeros_like
def example1(a):
if not isinstance(a, list):
raise ValueError, "argument must be a list"
code = r"""
int i;
py::tuple results(2);
for (i=0; i<a.length(); i++) {
a[i] = i;
}
results[0] = 3.0;
results[1] = 4.0;
return_val = results;
"""
return weave.inline(code,['a'])
def arr(a):
if a.ndim != 2:
raise ValueError, "a must be 2-d"
code = r"""
int i,j;
for(i=1;i<Na[0]-1;i++) {
for(j=1;j<Na[1]-1;j++) {
B2(i,j) = A2(i,j) + A2(i-1,j)*0.5 +
A2(i+1,j)*0.5 + A2(i,j-1)*0.5
+ A2(i,j+1)*0.5
+ A2(i-1,j-1)*0.25
+ A2(i-1,j+1)*0.25
+ A2(i+1,j-1)*0.25
+ A2(i+1,j+1)*0.25;
}
}
"""
b = zeros_like(a)
weave.inline(code,['a','b'])
return b
a = [None]*10
print example1(a)
print a
a = rand(512,512)
b = arr(a)
h = [[0.25,0.5,0.25],[0.5,1,0.5],[0.25,0.5,0.25]]
import scipy.signal as ss
b2 = ss.convolve(h,a,'same')
| bsd-3-clause |
Antergos/antergos-welcome | src/client.py | 1 | 9962 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# client.py
#
# Copyright © 2015-2017 Antergos
#
# This file is part of antergos-welcome
#
# Antergos-welcome is free software: you can redistribute it and/or modify
# it under the temms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Antergos-welcome is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Antergos-welcome. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import gi
gi.require_version('Polkit', '1.0')
gi.require_version('Notify', '0.7')
from gi.repository import GObject, Gio, GLib, Polkit, Notify
try:
from pydbus import SessionBus, SystemBus
from pydbus.generic import signal
except ImportError as err:
msg = "Can't import pydbus library: {}".format(err)
logging.error(msg)
print(msg)
sys.exit(-1)
def _(x):
return x
class SimpleWelcomed(GObject.GObject):
def __init__(self, packages, action=""):
GObject.GObject.__init__(self)
self._timeout = 100
self.packages = packages
self.action = action
self.refresh_before_install = False
self.loop = GLib.MainLoop()
self.client = WelcomedClient()
self.client.connect("command-finished", self.on_command_finished)
Notify.init("antergos-welcome")
def on_error(self, error):
my_message = str(error)
msg_dialog = Gtk.MessageDialog(transient_for=self,
modal=True,
destroy_with_parent=True,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.CLOSE,
text=_("Antergos Welcome - Error"))
msg_dialog.format_secondary_text(my_message)
msg_dialog.run()
msg_dialog.destroy()
def quit(self):
""" called when the app quits """
Notify.uninit()
self.loop.quit()
def on_command_finished(self, client, uid, command, pkgs):
# print("on_command_finished:, command)
self.notify(command, 'exit-success')
self.loop.quit()
def prepare_message(self, command, status):
dialog_type = 'dialog-information'
title = ""
msg = ""
if command == 'install' or command == 'install_packages' or command == 'install_package':
if status == 'exit-success':
title = _("Installation succeeded!")
if len(self.packages) > 1:
msg = _('{} have been successfully installed').format(
' '.join(self.packages))
elif len(self.packages) == 1:
msg = _('{} has been successfully installed').format(
self.packages[0])
else:
msg = ""
elif status == 'processing':
title = _("Installation")
msg = _("Installing {} package(s)").format(
' '.join(self.packages))
else:
title = _("Installation failed!")
msg = _("Cannot install {} package(s)").format(
' '.join(self.packages))
dialog_type = 'dialog-error'
elif command == 'remove' or command == 'remove_packages' or command == 'remove_package':
if status == 'exit-success':
title = _("Removal succeeded!")
if len(self.packages) > 1:
msg = _('{} have been successfully removed').format(
' '.join(self.packages))
elif len(self.packages) == 1:
msg = _('{} has been successfully removed').format(
self.packages[0])
else:
msg = ""
elif status == 'processing':
title = _("Removal")
msg = _("Removing {} package(s)").format(
' '.join(self.packages))
else:
title = _("Removal failed!")
msg = _("Cannot remove {} package(s)").format(
' '.join(self.packages))
dialog_type = 'dialog-error'
elif command == 'refresh' or command == 'refresh_alpm':
if status == 'exit-success':
title = _("System refresh succeeded!")
msg = _("System databases updated successfully")
elif status == 'processing':
title = _("System refresh")
msg = _("Updating system databases...")
else:
title = _("System refresh failed!")
msg = _("Cannot update system databases!")
dialog_type = 'dialog-error'
elif command == 'system_upgrade':
if status == 'exit-success':
title = _("System upgrade succeeded!")
msg = _("System upgraded successfully")
elif status == 'processing':
title = _("System upgrade")
msg = _("Upgrading system...")
else:
title = _("System upgrade failed!")
msg = _("Cannot upgrade system!")
dialog_type = 'dialog-error'
else:
title = _("Unknown action!")
msg = _("Action '{}' is unknown").format(command)
dialog_type = 'dialog-error'
return (title, msg, dialog_type)
def notify(self, command, status):
# print('Status: ' + status)
(title, msg, dialog_type) = self.prepare_message(command, status)
Notify.Notification.new(title, msg, dialog_type).show()
def _do_install_packages(self):
self.notify('install', 'processing')
self.client.install_packages(self.packages)
return False
def _do_remove_packages(self):
self.notify('remove', 'processing')
self.client.remove_packages(self.packages)
return False
def _do_refresh(self):
self.notify('refresh', 'processing')
self.client.refresh()
return False
def _do_system_upgrade(self):
self.notify('system_upgrade', 'processing')
self.client.system_upgrade()
return False
def run_action(self):
if self.client.welcomed_ok:
if self.action == "refresh":
self.refresh()
elif self.action == "system_upgrade":
self.system_upgrade()
elif self.action == "install":
self.install_packages()
elif self.action == "remove":
self.remove_packages()
def refresh(self):
GLib.timeout_add(self._timeout, self._do_refresh)
self.loop.run()
def install_packages(self):
if self.refresh_before_install:
GLib.timeout_add(self._timeout, self._do_refresh)
else:
GLib.timeout_add(self._timeout, self._do_install_packages)
self.loop.run()
def remove_packages(self):
GLib.timeout_add(self._timeout, self._do_remove_packages)
self.loop.run()
def system_upgrade(self):
GLib.timeout_add(self._timeout, self._do_system_upgrade)
self.loop.run()
class WelcomedClient(GObject.GObject):
_name = 'com.antergos.welcome'
_object_path = '/com/antergos/welcome'
_interface_name = 'com.antergos.welcome'
__gsignals__ = {
'command-finished': (GObject.SignalFlags.RUN_FIRST, None,
(str, str, GObject.TYPE_PYOBJECT))
}
def __init__(self):
GObject.GObject.__init__(self)
self.interface = None
self.welcomed_ok = False
try:
self.bus = SystemBus()
self.dbus_proxy = self.bus.get(
WelcomedClient._name,
WelcomedClient._object_path)
if not self.dbus_proxy:
self.welcomed_ok = False
else:
self.dbus_proxy.PropertiesChanged.connect(
self.on_properties_changed)
self.welcomed_ok = self.dbus_proxy.is_alpm_on()
except Exception as err:
print(err)
finally:
if not self.welcomed_ok:
msg = _("Can't find Welcome d-bus service. Is it really installed?")
Notify.init("antergos-welcome")
Notify.Notification.new(
_("ERROR!"), msg, 'dialog-error').show()
def refresh(self):
""" pacman -Sy """
return self.dbus_proxy.refresh_alpm()
def on_properties_changed(self, *params):
""" A d-bus server property has changed """
(sender, prop, not_used) = params
# print("PARAMS:", params)
if sender == WelcomedClient._name and 'command_finished' in prop.keys():
(uid, command, pkgs) = prop['command_finished']
self.emit("command-finished", uid, command, pkgs)
def install_package(self, pkg):
""" pacman -S pkg """
return self.dbus_proxy.install_package(pkgs)
def install_packages(self, pkgs):
""" pacman -S pkgs """
return self.dbus_proxy.install_packages(pkgs)
def remove_package(self, package):
""" pacman -R pkg """
return self.dbus_proxy.remove_package(package)
def remove_packages(self, pkgs):
""" pacman -R pkgs """
for pkg in pkgs:
self.remove_package(pkg)
def check_updates(self):
return self.dbus_proxy.check_updates()
def system_upgrade(self):
return self.dbus_proxy.system_upgrade()
| gpl-2.0 |
grap/OpenUpgrade | addons/document/content_index.py | 430 | 6619 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
from subprocess import Popen, PIPE
_logger = logging.getLogger(__name__)
class NhException(Exception):
pass
class indexer(object):
""" An indexer knows how to parse the content of some file.
Typically, one indexer should be instantiated per file
type.
Override this class to add more functionality. Note that
you should only override the Content or the File methods
that give an optimal result. """
def _getMimeTypes(self):
""" Return supported mimetypes """
return []
def _getExtensions(self):
return []
def _getDefMime(self, ext):
""" Return a mimetype for this document type, ideally the
closest to the extension ext. """
mts = self._getMimeTypes();
if len (mts):
return mts[0]
return None
def indexContent(self, content, filename=None, realfile=None):
""" Use either content or the real file, to index.
Some parsers will work better with the actual
content, others parse a file easier. Try the
optimal.
"""
res = ''
try:
if content != None:
return self._doIndexContent(content)
except NhException:
pass
if realfile != None:
try:
return self._doIndexFile(realfile)
except NhException:
pass
fp = open(realfile,'rb')
try:
content2 = fp.read()
finally:
fp.close()
# The not-handled exception may be raised here
return self._doIndexContent(content2)
# last try, with a tmp file
if content:
try:
fname,ext = filename and os.path.splitext(filename) or ('','')
fd, rfname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
res = self._doIndexFile(rfname)
os.unlink(rfname)
return res
except NhException:
pass
raise NhException('No appropriate method to index file.')
def _doIndexContent(self, content):
raise NhException("Content cannot be handled here.")
def _doIndexFile(self, fpath):
raise NhException("Content cannot be handled here.")
def __repr__(self):
return "<indexer %s.%s>" %(self.__module__, self.__class__.__name__)
def mime_match(mime, mdict):
if mdict.has_key(mime):
return (mime, mdict[mime])
if '/' in mime:
mpat = mime.split('/')[0]+'/*'
if mdict.has_key(mpat):
return (mime, mdict[mpat])
return (None, None)
class contentIndex(object):
def __init__(self):
self.mimes = {}
self.exts = {}
def register(self, obj):
f = False
for mime in obj._getMimeTypes():
self.mimes[mime] = obj
f = True
for ext in obj._getExtensions():
self.exts[ext] = obj
f = True
if f:
_logger.debug('Register content indexer: %r.', obj)
if not f:
raise Exception("Your indexer should at least support a mimetype or extension.")
def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False):
fobj = None
fname = None
mime = None
if content_type and self.mimes.has_key(content_type):
mime = content_type
fobj = self.mimes[content_type]
elif filename:
bname,ext = os.path.splitext(filename)
if self.exts.has_key(ext):
fobj = self.exts[ext]
mime = fobj._getDefMime(ext)
if content_type and not fobj:
mime,fobj = mime_match(content_type, self.mimes)
if not fobj:
try:
if realfname :
fname = realfname
else:
try:
bname,ext = os.path.splitext(filename or 'test.tmp')
except Exception:
bname, ext = filename, 'tmp'
fd, fname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE)
(result, _) = pop.communicate()
mime2 = result.split(';')[0]
_logger.debug('File gives us: %s', mime2)
# Note that the temporary file still exists now.
mime,fobj = mime_match(mime2, self.mimes)
if not mime:
mime = mime2
except Exception:
_logger.exception('Cannot determine mime type.')
try:
if fobj:
res = (mime, fobj.indexContent(content,filename,fname or realfname) )
else:
_logger.debug("Have no object, return (%s, None).", mime)
res = (mime, '')
except Exception:
_logger.exception("Cannot index file %s (%s).",
filename, fname or realfname)
res = (mime, '')
# If we created a tmp file, unlink it now
if not realfname and fname:
try:
os.unlink(fname)
except Exception:
_logger.exception("Cannot unlink %s.", fname)
return res
cntIndex = contentIndex()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mdavid/pledgeservice | testlib/mox.py | 13 | 56547 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
WARNING! Mock objects created by Mox are not thread-safe. If you are
call a mock in multiple threads, it should be guarded by a mutex.
TODO(stevepm): Add the option to make mocks thread-safe!
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import difflib
import inspect
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
if expected is None:
self._str = "Unexpected method call %s" % (unexpected_method,)
else:
differ = difflib.Differ()
diff = differ.compare(str(unexpected_method).splitlines(True),
str(expected).splitlines(True))
self._str = ("Unexpected method call. unexpected:- expected:+\n%s"
% ("\n".join(diff),))
def __str__(self):
return self._str
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class PrivateAttributeError(Error):
"""
Raised if a MockObject is passed a private additional attribute name.
"""
def __init__(self, attr):
Error.__init__(self)
self._attr = attr
def __str__(self):
return ("Attribute '%s' is private and should not be available in a mock "
"object." % attr)
class ExpectedMockCreationError(Error):
"""Raised if mocks should have been created by StubOutClassWithMocks."""
def __init__(self, expected_mocks):
"""Init exception.
Args:
# expected_mocks: A sequence of MockObjects that should have been
# created
Raises:
ValueError: if expected_mocks contains no methods.
"""
if not expected_mocks:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_mocks = expected_mocks
def __str__(self):
mocks = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_mocks)])
return "Verify: Expected mocks never created:\n%s" % (mocks,)
class UnexpectedMockCreationError(Error):
"""Raised if too many mocks were created by StubOutClassWithMocks."""
def __init__(self, instance, *params, **named_params):
"""Init exception.
Args:
# instance: the type of obejct that was created
# params: parameters given during instantiation
# named_params: named parameters given during instantiation
"""
Error.__init__(self)
self._instance = instance
self._params = params
self._named_params = named_params
def __str__(self):
args = ", ".join(["%s" % v for i, v in enumerate(self._params)])
error = "Unexpected mock creation: %s(%s" % (self._instance, args)
if self._named_params:
error += ", " + ", ".join(["%s=%s" % (k, v) for k, v in
self._named_params.iteritems()])
error += ")"
return error
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.FunctionType, types.InstanceType,
types.ModuleType, types.ObjectType, types.TypeType,
types.MethodType, types.UnboundMethodType,
]
# A list of types that may be stubbed out with a MockObjectFactory.
_USE_MOCK_FACTORY = [types.ClassType, types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock, attrs={}):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
attrs: dict of attribute names to values that will be set on the mock
object. Only public attributes may be set.
Returns:
MockObject that can be used as the class_to_mock would be.
"""
new_mock = MockObject(class_to_mock, attrs=attrs)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self, description=None):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
Args:
description: str. Optionally, a descriptive name for the mock object being
created, for debugging output purposes.
"""
new_mock = MockAnything(description=description)
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
attr_type = type(attr_to_replace)
if attr_type == MockAnything or attr_type == MockObject:
raise TypeError('Cannot mock a MockAnything! Did you remember to '
'call UnsetStubs in your previous test?')
if attr_type in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything(description='Stub for %s' % attr_to_replace)
self.stubs.Set(obj, attr_name, stub)
def StubOutClassWithMocks(self, obj, attr_name):
"""Replace a class with a "mock factory" that will create mock objects.
This is useful if the code-under-test directly instantiates
dependencies. Previously some boilder plate was necessary to
create a mock that would act as a factory. Using
StubOutClassWithMocks, once you've stubbed out the class you may
use the stubbed class as you would any other mock created by mox:
during the record phase, new mock instances will be created, and
during replay, the recorded mocks will be returned.
In replay mode
# Example using StubOutWithMock (the old, clunky way):
mock1 = mox.CreateMock(my_import.FooClass)
mock2 = mox.CreateMock(my_import.FooClass)
foo_factory = mox.StubOutWithMock(my_import, 'FooClass',
use_mock_anything=True)
foo_factory(1, 2).AndReturn(mock1)
foo_factory(9, 10).AndReturn(mock2)
mox.ReplayAll()
my_import.FooClass(1, 2) # Returns mock1 again.
my_import.FooClass(9, 10) # Returns mock2 again.
mox.VerifyAll()
# Example using StubOutClassWithMocks:
mox.StubOutClassWithMocks(my_import, 'FooClass')
mock1 = my_import.FooClass(1, 2) # Returns a new mock of FooClass
mock2 = my_import.FooClass(9, 10) # Returns another mock instance
mox.ReplayAll()
my_import.FooClass(1, 2) # Returns mock1 again.
my_import.FooClass(9, 10) # Returns mock2 again.
mox.VerifyAll()
"""
attr_to_replace = getattr(obj, attr_name)
attr_type = type(attr_to_replace)
if attr_type == MockAnything or attr_type == MockObject:
raise TypeError('Cannot mock a MockAnything! Did you remember to '
'call UnsetStubs in your previous test?')
if attr_type not in self._USE_MOCK_FACTORY:
raise TypeError('Given attr is not a Class. Use StubOutWithMock.')
factory = _MockObjectFactory(attr_to_replace, self)
self._mock_objects.append(factory)
self.stubs.Set(obj, attr_name, factory)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self, description=None):
"""Initialize a new MockAnything.
Args:
description: str. Optionally, a descriptive name for the mock object being
created, for debugging output purposes.
"""
self._description = description
self._Reset()
def __repr__(self):
if self._description:
return '<MockAnything instance of %s>' % self._description
else:
return '<MockAnything instance>'
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name, method_to_mock=None):
"""Create a new mock method call and return it.
Args:
# method_name: the name of the method being called.
# method_to_mock: The actual method being mocked, used for introspection.
method_name: str
method_to_mock: a method object
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode, method_to_mock=method_to_mock,
description=self._description)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock, attrs={}):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
attrs: dict of attribute names to values that will be set on the mock
object. Only public attributes may be set.
Raises:
PrivateAttributeError: if a supplied attribute is not public.
ValueError: if an attribute would mask an existing method.
"""
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
try:
self._description = class_to_mock.__name__
# If class_to_mock is a mock itself, then we'll get an UnknownMethodCall
# error here from the underlying call to __getattr__('__name__')
except (UnknownMethodCallError, AttributeError):
try:
self._description = type(class_to_mock).__name__
except AttributeError:
pass
for method in dir(class_to_mock):
attr = getattr(class_to_mock, method)
if callable(attr):
self._known_methods.add(method)
elif not (type(attr) is property):
# treating properties as class vars makes little sense.
self._known_vars.add(method)
# Set additional attributes at instantiation time; this is quicker
# than manually setting attributes that are normally created in
# __init__.
for attr, value in attrs.items():
if attr.startswith("_"):
raise PrivateAttributeError(attr)
elif attr in self._known_methods:
raise ValueError("'%s' is a method of '%s' objects." % (attr,
class_to_mock))
else:
setattr(self, attr, value)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(
name,
method_to_mock=getattr(self._class_to_mock, name))
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
# Verify the class supports item assignment.
if '__setitem__' not in dir(self._class_to_mock):
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__getitem__.
"""
# Verify the class supports item assignment.
if '__getitem__' not in dir(self._class_to_mock):
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __iter__(self):
"""Provide custom logic for mocking classes that are iterable.
Returns:
Expected return value in replay mode. A MockMethod object for the
__iter__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not iterable.
UnexpectedMethodCallError if the object does not expect the call to
__iter__.
"""
methods = dir(self._class_to_mock)
# Verify the class supports iteration.
if '__iter__' not in methods:
# If it doesn't have iter method and we are in replay method, then try to
# iterate using subscripts.
if '__getitem__' not in methods or not self._replay_mode:
raise TypeError('not iterable object')
else:
results = []
index = 0
try:
while True:
results.append(self[index])
index += 1
except IndexError:
return iter(results)
# If we are in replay mode then simply call the mock __iter__ method.
if self._replay_mode:
return MockMethod('__iter__', self._expected_calls_queue,
self._replay_mode)()
# Otherwise, create a mock method __iter__.
return self._CreateMockMethod('__iter__')()
def __contains__(self, key):
"""Provide custom logic for mocking classes that contain items.
Args:
key: Key to look in container for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__contains__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not implement __contains__
UnexpectedMethodCaller if the object does not expect the call to
__contains__.
"""
contains = self._class_to_mock.__dict__.get('__contains__', None)
if contains is None:
raise TypeError('unsubscriptable object')
if self._replay_mode:
return MockMethod('__contains__', self._expected_calls_queue,
self._replay_mode)(key)
return self._CreateMockMethod('__contains__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable.
callable = hasattr(self._class_to_mock, '__call__')
if not callable:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
# If we are mocking a Function, then use the function, and not the
# __call__ method
method = None
if type(self._class_to_mock) in (types.FunctionType, types.MethodType):
method = self._class_to_mock;
else:
method = getattr(self._class_to_mock, '__call__')
mock_method = self._CreateMockMethod('__call__', method_to_mock=method)
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
class _MockObjectFactory(MockObject):
"""A MockObjectFactory creates mocks and verifies __init__ params.
A MockObjectFactory removes the boiler plate code that was previously
necessary to stub out direction instantiation of a class.
The MockObjectFactory creates new MockObjects when called and verifies the
__init__ params are correct when in record mode. When replaying, existing
mocks are returned, and the __init__ params are verified.
See StubOutWithMock vs StubOutClassWithMocks for more detail.
"""
def __init__(self, class_to_mock, mox_instance):
MockObject.__init__(self, class_to_mock)
self._mox = mox_instance
self._instance_queue = deque()
def __call__(self, *params, **named_params):
"""Instantiate and record that a new mock has been created."""
method = getattr(self._class_to_mock, '__init__')
mock_method = self._CreateMockMethod('__init__', method_to_mock=method)
# Note: calling mock_method() is deferred in order to catch the
# empty instance_queue first.
if self._replay_mode:
if not self._instance_queue:
raise UnexpectedMockCreationError(self._class_to_mock, *params,
**named_params)
mock_method(*params, **named_params)
return self._instance_queue.pop()
else:
mock_method(*params, **named_params)
instance = self._mox.CreateMock(self._class_to_mock)
self._instance_queue.appendleft(instance)
return instance
def _Verify(self):
"""Verify that all mocks have been created."""
if self._instance_queue:
raise ExpectedMockCreationError(self._instance_queue)
super(_MockObjectFactory, self)._Verify()
class MethodSignatureChecker(object):
"""Ensures that methods are called correctly."""
_NEEDED, _DEFAULT, _GIVEN = range(3)
def __init__(self, method):
"""Creates a checker.
Args:
# method: A method to check.
method: function
Raises:
ValueError: method could not be inspected, so checks aren't possible.
Some methods and functions like built-ins can't be inspected.
"""
try:
self._args, varargs, varkw, defaults = inspect.getargspec(method)
except TypeError:
raise ValueError('Could not get argument specification for %r'
% (method,))
if inspect.ismethod(method):
self._args = self._args[1:] # Skip 'self'.
self._method = method
self._instance = None # May contain the instance this is bound to.
self._has_varargs = varargs is not None
self._has_varkw = varkw is not None
if defaults is None:
self._required_args = self._args
self._default_args = []
else:
self._required_args = self._args[:-len(defaults)]
self._default_args = self._args[-len(defaults):]
def _RecordArgumentGiven(self, arg_name, arg_status):
"""Mark an argument as being given.
Args:
# arg_name: The name of the argument to mark in arg_status.
# arg_status: Maps argument names to one of _NEEDED, _DEFAULT, _GIVEN.
arg_name: string
arg_status: dict
Raises:
AttributeError: arg_name is already marked as _GIVEN.
"""
if arg_status.get(arg_name, None) == MethodSignatureChecker._GIVEN:
raise AttributeError('%s provided more than once' % (arg_name,))
arg_status[arg_name] = MethodSignatureChecker._GIVEN
def Check(self, params, named_params):
"""Ensures that the parameters used while recording a call are valid.
Args:
# params: A list of positional parameters.
# named_params: A dict of named parameters.
params: list
named_params: dict
Raises:
AttributeError: the given parameters don't work with the given method.
"""
arg_status = dict((a, MethodSignatureChecker._NEEDED)
for a in self._required_args)
for arg in self._default_args:
arg_status[arg] = MethodSignatureChecker._DEFAULT
# WARNING: Suspect hack ahead.
#
# Check to see if this is an unbound method, where the instance
# should be bound as the first argument. We try to determine if
# the first argument (param[0]) is an instance of the class, or it
# is equivalent to the class (used to account for Comparators).
#
# NOTE: If a Func() comparator is used, and the signature is not
# correct, this will cause extra executions of the function.
if inspect.ismethod(self._method):
# The extra param accounts for the bound instance.
if len(params) == len(self._args) + 1:
clazz = getattr(self._method, 'im_class', None)
if isinstance(params[0], clazz) or params[0] == clazz:
params = params[1:]
# Check that each positional param is valid.
for i in range(len(params)):
try:
arg_name = self._args[i]
except IndexError:
if not self._has_varargs:
raise AttributeError('%s does not take %d or more positional '
'arguments' % (self._method.__name__, i))
else:
self._RecordArgumentGiven(arg_name, arg_status)
# Check each keyword argument.
for arg_name in named_params:
if arg_name not in arg_status and not self._has_varkw:
raise AttributeError('%s is not expecting keyword argument %s'
% (self._method.__name__, arg_name))
self._RecordArgumentGiven(arg_name, arg_status)
# Ensure all the required arguments have been given.
still_needed = [k for k, v in arg_status.iteritems()
if v == MethodSignatureChecker._NEEDED]
if still_needed:
raise AttributeError('No values given for arguments: %s'
% (' '.join(sorted(still_needed))))
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode,
method_to_mock=None, description=None):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
# method_to_mock: The actual method being mocked, used for introspection.
# description: optionally, a descriptive name for this method. Typically
# this is equal to the descriptive name of the method's class.
method_name: str
call_queue: list or deque
replay_mode: bool
method_to_mock: a method object
description: str or None
"""
self._name = method_name
self.__name__ = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._description = description
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
try:
self._checker = MethodSignatureChecker(method_to_mock)
except ValueError:
self._checker = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
if self._checker is not None:
self._checker.Check(params, named_params)
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
result = expected_method._side_effects(*params, **named_params)
if expected_method._return_value is None:
expected_method._return_value = result
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def __iter__(self):
"""Raise a TypeError with a helpful message."""
raise TypeError('MockMethod cannot be iterated. '
'Did you remember to put your mocks in replay mode?')
def next(self):
"""Raise a TypeError with a helpful message."""
raise TypeError('MockMethod cannot be iterated. '
'Did you remember to put your mocks in replay mode?')
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
full_desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
if self._description:
full_desc = "%s.%s" % (self._description, full_desc)
return full_desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected mehtod can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class Is(Comparator):
"""Comparison class used to check identity, instead of equality."""
def __init__(self, obj):
self._obj = obj
def equals(self, rhs):
return rhs is self._obj
def __repr__(self):
return "<is %r (%s)>" % (self._obj, id(self._obj))
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def __repr__(self):
return str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except TypeError:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
return self.regex.search(rhs) is not None
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
return self._key in rhs
def __repr__(self):
return '<sequence or map containing \'%s\'>' % str(self._key)
class Not(Comparator):
"""Checks whether a predicates is False.
Example:
mock_dao.UpdateUsers(Not(ContainsKeyValue('stevepm', stevepm_user_info)))
"""
def __init__(self, predicate):
"""Initialize.
Args:
# predicate: a Comparator instance.
"""
assert isinstance(predicate, Comparator), ("predicate %r must be a"
" Comparator." % predicate)
self._predicate = predicate
def equals(self, rhs):
"""Check to see whether the predicate is False.
Args:
rhs: A value that will be given in argument of the predicate.
Returns:
bool
"""
return not self._predicate.equals(rhs)
def __repr__(self):
return '<not \'%s\'>' % self._predicate
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (str(self._key),
str(self._value))
class ContainsAttributeValue(Comparator):
"""Checks whether a passed parameter contains attributes with a given value.
Example:
mock_dao.UpdateSomething(ContainsAttribute('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: an attribute name of an object
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given attribute has a matching value in the rhs object.
Returns:
bool
"""
try:
return getattr(rhs, self._key) == self._value
except Exception:
return False
class SameElementsAs(Comparator):
"""Checks whether iterables contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
self._expected_seq = expected_seq
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
expected = dict([(element, None) for element in self._expected_seq])
actual = dict([(element, None) for element in actual_seq])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = list(self._expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_seq
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_left = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
self._methods_left.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_left.discard(method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
return len(self._methods_left) == 0
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
if attr_name not in d:
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
stubout_obj = getattr(self, 'stubs', None)
cleanup_mox = False
cleanup_stubout = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
if stubout_obj and isinstance(stubout_obj, stubout.StubOutForTesting):
cleanup_stubout = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_stubout:
stubout_obj.UnsetAll()
stubout_obj.SmartUnsetAll()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox (any mox tests will
want this), and a "stubs" attribute that is an instance of StubOutForTesting
(needed at times). Also automatically unsets any stubs and verifies that all
mock methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
super(MoxTestBase, self).setUp()
self.mox = Mox()
self.stubs = stubout.StubOutForTesting()
| agpl-3.0 |
datasciencedev/locality-sensitive-hashing | lsh_matrix.py | 2 | 11266 | import sys, struct, os, time, types, re, math, random, operator, hashlib, pdb
import logging, settings
logging.basicConfig(filename=settings.LOG_FILENAME, level=logging.DEBUG)
from lsh.shingles.shingles import _get_list_of_shingles
from lsh.utils.similarity import compute_positive_hash
DbType = settings.DATABASES['default']['ENGINE']
if DbType == 'cassandra':
from db_cassandra import DbInt, Table
elif DbType == 'datastore':
from db_datastore import DbInt, Table
else:
from db_in_memory import DbInt, Table
class UnknownException(Exception):
pass
class Matrix(object):
__metaclass__ = Table
attrs = [
'ds_key text',
'source text',
'filename text',
'file_key text',
'lsh_output text',
'eval_output text',
'count_output text',
'random_seeds list<bigint>',
'buckets list<int>',
'rows int',
'bands int',
'shingle_type ascii',
'minhash_modulo int',
]
p_keys = ['ds_key']
def __init__(self):
return
@classmethod
def get(cls, ds_key):
cls._initialize()
if ds_key:
ds = cls.select_row(ds_key = ds_key)
if ds:
for attr in ds:
if attr in ('random_seeds', 'buckets'):
if ds[attr]:
logging.info('retrieved dataset[%s][0] type %s, value %s', attr, type(ds[attr][0]), settings.max_mask & ds[attr][0])
else:
logging.info('retrieved dataset[%s] type %s, value %s', attr, type(ds[attr]), ds[attr])
return ds
return None
@classmethod
def find(cls, ds_key):
cls._initialize()
matrix = Matrix.select_row(ds_key = ds_key)
if not matrix:
logging.warning('Matrix.find failed to find matrix with ds_key %s', ds_key)
return matrix
def find_child_rows(self):
MatrixRow._initialize()
return MatrixRow.select_all(parent = self)
@classmethod
def _initialize(cls):
matrix = Matrix(name = cls.__name__, attrs = cls.attrs, p_keys = cls.p_keys)
return matrix
@classmethod
def make_new_id(cls, source, filename):
cls._initialize()
max_iters = 4
for iter_count in xrange(max_iters):
ds_key = 'k%04d' % (int(hashlib.md5(source + filename + ' ' * iter_count).hexdigest(), 16) % 10000)
try:
# Does a dataset with this ID already exist?
this_ds = Matrix.select_row(ds_key = ds_key)
if not this_ds:
break
if this_ds.filename == filename:
logging.debug("A dataset with %s already exists, reusing", filename)
return ds_key
except ValueError:
raise Exception('WTF?')
if iter_count == max_iters - 1:
raise Exception("Unable to create Dataset ID")
return ds_key
@classmethod
def create(cls, source, filename, file_key = '',
rows = settings.rows,
bands = settings.bands,
shingle_type = settings.shingle_type,
minhash_modulo = settings.minhash_modulo):
# logging.debug('Matrix.create cls = %s, vars = %s', cls, vars(cls))
Matrix._initialize()
# logging.debug('Matrix.create inputs %s, %s, %s', source, filename, file_key)
ds_key = cls.make_new_id(source, filename)
# logging.debug('Matrix.create ds_key %s', ds_key)
max_hashes = rows * bands
data = {
'ds_key': '%s' % ds_key,
'source': '%s' % source,
'filename': '%s' % filename,
'file_key': '%s' % file_key,
'random_seeds': [(settings.max_mask & random.getrandbits(settings.max_bits)) for _ in xrange(max_hashes)],
'rows': rows,
'bands': bands,
'shingle_type': '%s' % shingle_type,
'minhash_modulo': minhash_modulo,
}
Matrix.insert_row(data = data)
matrix = Matrix.find(ds_key)
# logging.debug('Matrix.create returning %s', matrix)
return matrix
def str(self):
txt = '<Matrix ds_key={ds_key} />'.format(ds_key = self.ds_key)
return txt
def get_else_create_doc(self, doc_id):
try:
doc = MatrixRow.select_row(ds_key = self.ds_key, doc_id = doc_id)
if doc:
return True, doc
except:
pass
doc = MatrixRow(name = 'MatrixRow', attrs = MatrixRow.attrs, p_keys = MatrixRow.p_keys)
doc.ds_key = self.ds_key
doc.doc_id = doc_id
return False, doc
def get_doc(self, doc_id):
try:
doc = MatrixRow.select_row(ds_key = self.ds_key, doc_id = doc_id)
if doc:
doc.ds_key = self.ds_key
doc.doc_id = doc_id
return doc
except:
pass
return None
def docs_iterator(self):
return MatrixRow.select_all(ds_key = self.ds_key)
def purge(self):
MatrixRow._initialize()
MatrixRow.delete_all(parent = self)
Matrix.delete_row(ds_key = self.ds_key)
def get_nns(self, doc_id):
doc = self.get_doc(doc_id)
if not doc:
return []
bkts = [DbInt.fm_db(bkt) for bkt in doc.buckets]
mhs = {}
for bkt in bkts:
bkt_docs = session.execute(self.nns_select, [self.ds_key, DbInt.to_db(bkt)])
for bkt_doc in bkt_docs:
mhs[bkt_doc['doc_id']] = bkt_doc['minhashes']
del mhs[doc_id]
jac = {}
for doc_id2 in mhs.keys():
jac_min = reduce(lambda x, y: x+y, map(lambda a,b: a == b, doc.minhashes,mhs[doc_id2])) / float(len(doc.minhashes))
jac[doc_id2] = 1.0 - jac_min
if 0 == int(1000*time.time()) % 100:
logging.info('Sampling (1%%) Jaccard distance %s | %s: %6.2f', doc_id, doc_id2, jac[doc_id2])
return jac
def create_doc(self, _id, text, stats):
(found, doc) = self.get_else_create_doc(_id)
stats['found'] = found
if found:
# if 0 == int(1000*time.time()) % 20:
# # print 5% of the documents on average
# logging.info('%s %s',doc['ds_key'], doc['doc_id'])
return doc
### Parse
t0 = time.time()
doc.text = text
tParse = time.time() - t0
stats['parse'] = tParse
doc.dataset = self
doc.rows = self.rows
doc.hashes = doc.rows * self.bands
doc.seeds = list(self.random_seeds)
doc.modulo = self.minhash_modulo
doc.sh_type = self.shingle_type
max_hashes = self.rows * self.bands
doc.minhashes = doc.calc_minhashes()
tMinhash = time.time() - t0 - tParse
stats['minhash'] = tMinhash
doc.buckets = doc.bucketize(doc.minhashes)
tBucketize = time.time() - t0 - tParse - tMinhash
stats['bucketize'] = tBucketize
# if 0 == int(1000*time.time()) % 20:
# # print 5% of the documents on average
# logging.info('%s %s %s', doc.ds_key, doc.doc_id, doc.buckets)
data = {
'ds_key': '%s' % doc.ds_key,
'doc_id': '%s' % doc.doc_id,
# Don't need to store minhashes
# 'minhashes': doc.minhashes,
'buckets': doc.buckets,
}
MatrixRow.insert_row(data = data)
tDbWrite = time.time() - t0 - tParse - tMinhash - tBucketize
stats['database'] = tDbWrite
return doc
class MatrixRow(object):
__metaclass__ = Table
attrs = [
'ds_key text',
'doc_id text',
'buckets list<int>',
# Don't need to store minhashes
# 'minhashes list<int>',
]
p_keys = ['doc_id', 'ds_key']
indexes = [('doc_buckets', 'buckets',)]
@classmethod
def _initialize(cls):
# Make sure the underlying tables or data structures exist
matrix_row = MatrixRow(name = cls.__name__, attrs = cls.attrs, p_keys = cls.p_keys, indexes = cls.indexes)
return matrix_row
@classmethod
def create(cls):
return cls._initialize()
def calc_minhashes(self):
def minhashes_for_shingles(shingles):
def calc_onehash(shingle, seed):
def c4_hash(shingle):
try:
h = struct.unpack('<i',shingle)[0]
except struct.error:
# We land here when the shingle has non-ascii characters in it.
size = 4
encoded = shingle.encode('utf-8')
int_hashes = [int(encoded[i:i + size].encode('hex'), 16) for i in range(len(encoded)/size)]
h = reduce(operator.xor, int_hashes)
hash_val = h & settings.max_mask
return hash_val
if self.sh_type == 'c4':
return operator.xor(c4_hash(shingle), long(seed)) % self.modulo
else:
return operator.xor(compute_positive_hash(shingle), long(seed)) % self.modulo
minhashes = [settings.max_mask for _ in xrange(self.hashes)]
for shingle in shingles:
for hno in xrange(self.hashes):
h_value = calc_onehash(shingle, self.seeds[hno])
minhashes[hno] = min(h_value, minhashes[hno])
return minhashes
##########################################
shingles = self.shingles()
minhashes = minhashes_for_shingles(shingles)
return minhashes
def shingles(self):
return MatrixRow.shingle_text(self.text, self.sh_type)
@staticmethod
def shingle_text(text, sh_type):
retval = set(text.split()) if sh_type=='w' else set(_get_list_of_shingles(text))
return retval
def bucketize(self, minhashes):
buckets = []
try:
band_bits = self.dataset.band_bits
except AttributeError:
matrix = self.dataset
band_bits = int(math.ceil(math.log(matrix.bands, 2)))
band_mask = (2**band_bits - 1)
setattr(matrix, 'band_bits', band_bits)
setattr(matrix, 'band_mask', band_mask)
setattr(matrix, 'hash_mask', 2**(settings.max_bits - band_bits)-1)
band_mask = self.dataset.band_mask
hash_mask = self.dataset.hash_mask
for band in xrange(self.dataset.bands):
band_hash = (band_mask & band) * (hash_mask + 1)
minhashes_in_band = [minhashes[band*self.rows + row] for row in xrange(self.rows)]
minhashes_into_a_string = '-'.join([str(mh) for mh in minhashes_in_band])
bucket = band_hash | (hash_mask & int(hashlib.md5(minhashes_into_a_string).hexdigest(), 16))
buckets.append(DbInt.to_db(bucket))
return buckets
| apache-2.0 |
NaturalGIS/naturalgis_qgis | tests/src/python/test_qgspointclusterrenderer.py | 43 | 8110 | # -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgspointclusterrenderer.py
-----------------------------
Date : September 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
From build dir, run: ctest -R PyQgsPointClusterRenderer -V
"""
__author__ = 'Nyall Dawson'
__date__ = 'September 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
import os
from qgis.PyQt.QtCore import QSize
from qgis.PyQt.QtGui import QColor
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsPointClusterRenderer,
QgsUnitTypes,
QgsMapUnitScale,
QgsMarkerSymbol,
QgsSingleSymbolRenderer,
QgsReadWriteContext,
QgsPointDisplacementRenderer,
QgsMapSettings,
QgsProperty,
QgsSymbolLayer,
QgsRenderContext
)
from qgis.testing import start_app, unittest
from utilities import (unitTestDataPath)
# Convenience instances in case you may need them
# not used in this test
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsPointClusterRenderer(unittest.TestCase):
def setUp(self):
myShpFile = os.path.join(TEST_DATA_DIR, 'points.shp')
self.layer = QgsVectorLayer(myShpFile, 'Points', 'ogr')
QgsProject.instance().addMapLayer(self.layer)
self.renderer = QgsPointClusterRenderer()
sym1 = QgsMarkerSymbol.createSimple({'color': '#ff00ff', 'size': '3', 'outline_style': 'no'})
renderer = QgsSingleSymbolRenderer(sym1)
self.renderer.setEmbeddedRenderer(renderer)
self.renderer.setClusterSymbol(QgsMarkerSymbol.createSimple({'color': '#ffff00', 'size': '3', 'outline_style': 'no'}))
self.layer.setRenderer(self.renderer)
rendered_layers = [self.layer]
self.mapsettings = QgsMapSettings()
self.mapsettings.setOutputSize(QSize(400, 400))
self.mapsettings.setOutputDpi(96)
self.mapsettings.setExtent(QgsRectangle(-123, 18, -70, 52))
self.mapsettings.setLayers(rendered_layers)
def tearDown(self):
QgsProject.instance().removeAllMapLayers()
def _setProperties(self, r):
""" set properties for a renderer for testing with _checkProperties"""
r.setTolerance(5)
r.setToleranceUnit(QgsUnitTypes.RenderMapUnits)
r.setToleranceMapUnitScale(QgsMapUnitScale(5, 15))
m = QgsMarkerSymbol()
m.setColor(QColor(0, 255, 0))
r.setClusterSymbol(m)
sym1 = QgsMarkerSymbol.createSimple({'color': '#fdbf6f'})
renderer = QgsSingleSymbolRenderer(sym1)
r.setEmbeddedRenderer(renderer)
def _checkProperties(self, r):
""" test properties of renderer against expected"""
self.assertEqual(r.tolerance(), 5)
self.assertEqual(r.toleranceUnit(), QgsUnitTypes.RenderMapUnits)
self.assertEqual(r.toleranceMapUnitScale(), QgsMapUnitScale(5, 15))
self.assertEqual(r.clusterSymbol().color(), QColor(0, 255, 0))
self.assertEqual(r.embeddedRenderer().symbol().color().name(), '#fdbf6f')
def testGettersSetters(self):
""" test getters and setters """
r = QgsPointClusterRenderer()
self._setProperties(r)
self._checkProperties(r)
def testClone(self):
""" test cloning renderer """
r = QgsPointClusterRenderer()
self._setProperties(r)
c = r.clone()
self._checkProperties(c)
def testSaveCreate(self):
""" test saving and recreating from XML """
r = QgsPointClusterRenderer()
self._setProperties(r)
doc = QDomDocument("testdoc")
elem = r.save(doc, QgsReadWriteContext())
c = QgsPointClusterRenderer.create(elem, QgsReadWriteContext())
self._checkProperties(c)
def testConvert(self):
""" test renderer conversion """
# same type, should clone
r = QgsPointClusterRenderer()
self._setProperties(r)
c = QgsPointClusterRenderer.convertFromRenderer(r)
self._checkProperties(c)
# test conversion from displacement renderer
r = QgsPointDisplacementRenderer()
r.setTolerance(5)
r.setToleranceUnit(QgsUnitTypes.RenderMapUnits)
r.setToleranceMapUnitScale(QgsMapUnitScale(5, 15))
m = QgsMarkerSymbol()
m.setColor(QColor(0, 255, 0))
r.setCenterSymbol(m)
sym1 = QgsMarkerSymbol.createSimple({'color': '#fdbf6f'})
renderer = QgsSingleSymbolRenderer(sym1)
r.setEmbeddedRenderer(renderer)
# want to keep as many settings as possible when converting between cluster and displacement renderer
d = QgsPointClusterRenderer.convertFromRenderer(r)
self.assertEqual(d.tolerance(), 5)
self.assertEqual(d.toleranceUnit(), QgsUnitTypes.RenderMapUnits)
self.assertEqual(d.toleranceMapUnitScale(), QgsMapUnitScale(5, 15))
self.assertEqual(d.clusterSymbol().color(), QColor(0, 255, 0))
self.assertEqual(d.embeddedRenderer().symbol().color().name(), '#fdbf6f')
def testRenderNoCluster(self):
self.layer.renderer().setTolerance(1)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('cluster_renderer')
renderchecker.setControlName('expected_cluster_no_cluster')
self.assertTrue(renderchecker.runTest('cluster_no_cluster'))
def testRenderWithin(self):
self.layer.renderer().setTolerance(10)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('cluster_renderer')
renderchecker.setControlName('expected_cluster_cluster')
self.assertTrue(renderchecker.runTest('expected_cluster_cluster'))
def testRenderVariables(self):
""" test rendering with expression variables in marker """
self.layer.renderer().setTolerance(10)
old_marker = self.layer.renderer().clusterSymbol().clone()
new_marker = QgsMarkerSymbol.createSimple({'color': '#ffff00', 'size': '3', 'outline_style': 'no'})
new_marker.symbolLayer(0).setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor, QgsProperty.fromExpression('@cluster_color'))
new_marker.symbolLayer(0).setDataDefinedProperty(QgsSymbolLayer.PropertySize, QgsProperty.fromExpression('@cluster_size*2'))
self.layer.renderer().setClusterSymbol(new_marker)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('cluster_renderer')
renderchecker.setControlName('expected_cluster_variables')
result = renderchecker.runTest('expected_cluster_variables')
self.layer.renderer().setClusterSymbol(old_marker)
self.assertTrue(result)
def testUsedAttributes(self):
ctx = QgsRenderContext.fromMapSettings(self.mapsettings)
self.assertCountEqual(self.renderer.usedAttributes(ctx), {})
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
kernevil/samba | third_party/waf/waflib/extras/swig.py | 39 | 6252 | #! /usr/bin/env python
# encoding: UTF-8
# Petar Forai
# Thomas Nagy 2008-2010 (ita)
import re
from waflib import Task, Logs
from waflib.TaskGen import extension, feature, after_method
from waflib.Configure import conf
from waflib.Tools import c_preproc
"""
tasks have to be added dynamically:
- swig interface files may be created at runtime
- the module name may be unknown in advance
"""
SWIG_EXTS = ['.swig', '.i']
re_module = re.compile(r'%module(?:\s*\(.*\))?\s+(.+)', re.M)
re_1 = re.compile(r'^%module.*?\s+([\w]+)\s*?$', re.M)
re_2 = re.compile(r'[#%](?:include|import(?:\(module=".*"\))+|python(?:begin|code)) [<"](.*)[">]', re.M)
class swig(Task.Task):
color = 'BLUE'
run_str = '${SWIG} ${SWIGFLAGS} ${SWIGPATH_ST:INCPATHS} ${SWIGDEF_ST:DEFINES} ${SRC}'
ext_out = ['.h'] # might produce .h files although it is not mandatory
vars = ['SWIG_VERSION', 'SWIGDEPS']
def runnable_status(self):
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
if not getattr(self, 'init_outputs', None):
self.init_outputs = True
if not getattr(self, 'module', None):
# search the module name
txt = self.inputs[0].read()
m = re_module.search(txt)
if not m:
raise ValueError("could not find the swig module name")
self.module = m.group(1)
swig_c(self)
# add the language-specific output files as nodes
# call funs in the dict swig_langs
for x in self.env['SWIGFLAGS']:
# obtain the language
x = x[1:]
try:
fun = swig_langs[x]
except KeyError:
pass
else:
fun(self)
return super(swig, self).runnable_status()
def scan(self):
"scan for swig dependencies, climb the .i files"
lst_src = []
seen = []
missing = []
to_see = [self.inputs[0]]
while to_see:
node = to_see.pop(0)
if node in seen:
continue
seen.append(node)
lst_src.append(node)
# read the file
code = node.read()
code = c_preproc.re_nl.sub('', code)
code = c_preproc.re_cpp.sub(c_preproc.repl, code)
# find .i files and project headers
names = re_2.findall(code)
for n in names:
for d in self.generator.includes_nodes + [node.parent]:
u = d.find_resource(n)
if u:
to_see.append(u)
break
else:
missing.append(n)
return (lst_src, missing)
# provide additional language processing
swig_langs = {}
def swigf(fun):
swig_langs[fun.__name__.replace('swig_', '')] = fun
return fun
swig.swigf = swigf
def swig_c(self):
ext = '.swigwrap_%d.c' % self.generator.idx
flags = self.env['SWIGFLAGS']
if '-c++' in flags:
ext += 'xx'
out_node = self.inputs[0].parent.find_or_declare(self.module + ext)
if '-c++' in flags:
c_tsk = self.generator.cxx_hook(out_node)
else:
c_tsk = self.generator.c_hook(out_node)
c_tsk.set_run_after(self)
# transfer weights from swig task to c task
if getattr(self, 'weight', None):
c_tsk.weight = self.weight
if getattr(self, 'tree_weight', None):
c_tsk.tree_weight = self.tree_weight
try:
self.more_tasks.append(c_tsk)
except AttributeError:
self.more_tasks = [c_tsk]
try:
ltask = self.generator.link_task
except AttributeError:
pass
else:
ltask.set_run_after(c_tsk)
# setting input nodes does not declare the build order
# because the build already started, but it sets
# the dependency to enable rebuilds
ltask.inputs.append(c_tsk.outputs[0])
self.outputs.append(out_node)
if not '-o' in self.env['SWIGFLAGS']:
self.env.append_value('SWIGFLAGS', ['-o', self.outputs[0].abspath()])
@swigf
def swig_python(tsk):
node = tsk.inputs[0].parent
if tsk.outdir:
node = tsk.outdir
tsk.set_outputs(node.find_or_declare(tsk.module+'.py'))
@swigf
def swig_ocaml(tsk):
node = tsk.inputs[0].parent
if tsk.outdir:
node = tsk.outdir
tsk.set_outputs(node.find_or_declare(tsk.module+'.ml'))
tsk.set_outputs(node.find_or_declare(tsk.module+'.mli'))
@extension(*SWIG_EXTS)
def i_file(self, node):
# the task instance
tsk = self.create_task('swig')
tsk.set_inputs(node)
tsk.module = getattr(self, 'swig_module', None)
flags = self.to_list(getattr(self, 'swig_flags', []))
tsk.env.append_value('SWIGFLAGS', flags)
tsk.outdir = None
if '-outdir' in flags:
outdir = flags[flags.index('-outdir')+1]
outdir = tsk.generator.bld.bldnode.make_node(outdir)
outdir.mkdir()
tsk.outdir = outdir
@feature('c', 'cxx', 'd', 'fc', 'asm')
@after_method('apply_link', 'process_source')
def enforce_swig_before_link(self):
try:
link_task = self.link_task
except AttributeError:
pass
else:
for x in self.tasks:
if x.__class__.__name__ == 'swig':
link_task.run_after.add(x)
@conf
def check_swig_version(conf, minver=None):
"""
Check if the swig tool is found matching a given minimum version.
minver should be a tuple, eg. to check for swig >= 1.3.28 pass (1,3,28) as minver.
If successful, SWIG_VERSION is defined as 'MAJOR.MINOR'
(eg. '1.3') of the actual swig version found.
:param minver: minimum version
:type minver: tuple of int
:return: swig version
:rtype: tuple of int
"""
assert minver is None or isinstance(minver, tuple)
swigbin = conf.env['SWIG']
if not swigbin:
conf.fatal('could not find the swig executable')
# Get swig version string
cmd = swigbin + ['-version']
Logs.debug('swig: Running swig command %r', cmd)
reg_swig = re.compile(r'SWIG Version\s(.*)', re.M)
swig_out = conf.cmd_and_log(cmd)
swigver_tuple = tuple([int(s) for s in reg_swig.findall(swig_out)[0].split('.')])
# Compare swig version with the minimum required
result = (minver is None) or (swigver_tuple >= minver)
if result:
# Define useful environment variables
swigver = '.'.join([str(x) for x in swigver_tuple[:2]])
conf.env['SWIG_VERSION'] = swigver
# Feedback
swigver_full = '.'.join(map(str, swigver_tuple[:3]))
if minver is None:
conf.msg('Checking for swig version', swigver_full)
else:
minver_str = '.'.join(map(str, minver))
conf.msg('Checking for swig version >= %s' % (minver_str,), swigver_full, color=result and 'GREEN' or 'YELLOW')
if not result:
conf.fatal('The swig version is too old, expecting %r' % (minver,))
return swigver_tuple
def configure(conf):
conf.find_program('swig', var='SWIG')
conf.env.SWIGPATH_ST = '-I%s'
conf.env.SWIGDEF_ST = '-D%s'
| gpl-3.0 |
askeing/servo | tests/wpt/web-platform-tests/webvtt/parsing/file-parsing/tools/build.py | 89 | 3474 | import os
import glob
import shutil
from os import path
TEST_FILE_PATTERN = "support/**.test"
TEST_OUTPUT_PATH = "tests"
TEMPLATE = """\
<!doctype html>
<!-- DO NOT EDIT! This file and %vtt_file_rel_path are generated. -->
<!-- See /webvtt/parsing/file-parsing/README.md -->
<meta charset=utf-8>
<title>WebVTT parser test: %test_name</title>
%test_headers
<script src="/resources/testharness.js"></script>
<script src="/resources/testharnessreport.js"></script>
<div id=log></div>
<script>
var t = async_test('%test_name');
t.step(function(){
var video = document.createElement('video');
var track = document.createElement('track');
assert_true('src' in track, 'track element not supported');
track.src = '%vtt_file_rel_path';
track['default'] = true;
track.kind = 'subtitles';
track.onload = this.step_func(trackLoaded);
track.onerror = this.step_func(trackError);
video.appendChild(track);
document.body.appendChild(video);
});
function trackLoaded(event) {
var track = event.target;
var video = track.parentNode;
var cues = video.textTracks[0].cues;
{
%test_js
}
this.done();
}
function trackError(e) {
assert_unreached('got unexpected error event');
}
</script>
"""
def generate_test(test_path, output_dir):
# Read test file
test_filename = path.basename(test_path)
test_basefilename = path.splitext(test_filename)[0]
with open(test_path, 'r') as test:
test_source = test.read()
# Split test header
splits = test_source.split('\n\n', 1)
if len(splits) != 2:
raise ValueError("Leave an empty line between the test header and body")
test_header, test_body = splits
# Split header into name + html headers
splits = test_header.split('\n', 1)
test_name = splits[0]
if len(splits) == 2:
test_headers = splits[1]
# Split body into js + vtt
splits = test_body.split('\n===\n', 1)
if len(splits) != 2:
raise ValueError("Use === to separate the js and vtt parts")
test_js, test_vtt = splits
# Get output paths
os.makedirs(output_dir, exist_ok=True)
html_file_path = path.join(output_dir, test_basefilename + '.html')
vtt_file_dir = path.join(output_dir, 'support')
os.makedirs(vtt_file_dir, exist_ok=True)
vtt_file_name = test_basefilename + '.vtt'
vtt_file_path = path.join(vtt_file_dir, vtt_file_name)
vtt_file_rel_path = path.join('support', vtt_file_name)
# Write html file
with open(html_file_path, 'w') as output:
html = (TEMPLATE.replace('%test_name', test_name)
.replace('%test_headers', test_headers)
.replace('%test_js', test_js)
.replace('%vtt_file_rel_path', vtt_file_rel_path))
output.write(html)
# Write vtt file
with open(vtt_file_path, 'w') as output:
encoded = bytes(test_vtt, "utf-8").decode("unicode_escape")
output.write(encoded)
def main():
file_parsing_path = path.normpath(path.join(path.dirname(__file__), ".."))
test_output_path = path.join(file_parsing_path, TEST_OUTPUT_PATH)
tests_pattern = path.join(file_parsing_path, TEST_FILE_PATTERN)
# Clean test directory
shutil.rmtree(test_output_path)
# Generate tests
for file in glob.glob(tests_pattern):
print('Building test files for: ' + file)
generate_test(file, test_output_path)
if __name__ == '__main__':
main()
| mpl-2.0 |
jrrembert/django | django/utils/deconstruct.py | 502 | 2047 | from importlib import import_module
from django.utils.version import get_docs_version
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Python 2/fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version()))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
| bsd-3-clause |
iShoto/testpy | codes/20200112_pytorch_cifar10/src/models/googlenet.py | 4 | 3221 | '''GoogLeNet with PyTorch.'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
# 1x1 conv branch
self.b1 = nn.Sequential(
nn.Conv2d(in_planes, n1x1, kernel_size=1),
nn.BatchNorm2d(n1x1),
nn.ReLU(True),
)
# 1x1 conv -> 3x3 conv branch
self.b2 = nn.Sequential(
nn.Conv2d(in_planes, n3x3red, kernel_size=1),
nn.BatchNorm2d(n3x3red),
nn.ReLU(True),
nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1),
nn.BatchNorm2d(n3x3),
nn.ReLU(True),
)
# 1x1 conv -> 5x5 conv branch
self.b3 = nn.Sequential(
nn.Conv2d(in_planes, n5x5red, kernel_size=1),
nn.BatchNorm2d(n5x5red),
nn.ReLU(True),
nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1),
nn.BatchNorm2d(n5x5),
nn.ReLU(True),
)
# 3x3 pool -> 1x1 conv branch
self.b4 = nn.Sequential(
nn.MaxPool2d(3, stride=1, padding=1),
nn.Conv2d(in_planes, pool_planes, kernel_size=1),
nn.BatchNorm2d(pool_planes),
nn.ReLU(True),
)
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1,y2,y3,y4], 1)
class GoogLeNet(nn.Module):
def __init__(self):
super(GoogLeNet, self).__init__()
self.pre_layers = nn.Sequential(
nn.Conv2d(3, 192, kernel_size=3, padding=1),
nn.BatchNorm2d(192),
nn.ReLU(True),
)
self.a3 = Inception(192, 64, 96, 128, 16, 32, 32)
self.b3 = Inception(256, 128, 128, 192, 32, 96, 64)
self.maxpool = nn.MaxPool2d(3, stride=2, padding=1)
self.a4 = Inception(480, 192, 96, 208, 16, 48, 64)
self.b4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.c4 = Inception(512, 128, 128, 256, 24, 64, 64)
self.d4 = Inception(512, 112, 144, 288, 32, 64, 64)
self.e4 = Inception(528, 256, 160, 320, 32, 128, 128)
self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)
self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
out = self.pre_layers(x)
out = self.a3(out)
out = self.b3(out)
out = self.maxpool(out)
out = self.a4(out)
out = self.b4(out)
out = self.c4(out)
out = self.d4(out)
out = self.e4(out)
out = self.maxpool(out)
out = self.a5(out)
out = self.b5(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def test():
net = GoogLeNet()
x = torch.randn(1,3,32,32)
y = net(x)
print(y.size())
# test()
| mit |
ABaldwinHunter/django-clone | tests/builtin_server/tests.py | 368 | 5230 | from __future__ import unicode_literals
import sys
import traceback
from io import BytesIO
from unittest import TestCase
from wsgiref import simple_server
# If data is too large, socket will choke, so write chunks no larger than 32MB
# at a time. The rationale behind the 32MB can be found on Django's Trac:
# https://code.djangoproject.com/ticket/5596#comment:4
MAX_SOCKET_CHUNK_SIZE = 32 * 1024 * 1024 # 32 MB
class ServerHandler(simple_server.ServerHandler, object):
error_status = str("500 INTERNAL SERVER ERROR")
def write(self, data):
"""'write()' callable as specified by PEP 3333"""
assert isinstance(data, bytes), "write() argument must be bytestring"
if not self.status:
raise AssertionError("write() before start_response()")
elif not self.headers_sent:
# Before the first output, send the stored headers
self.bytes_sent = len(data) # make sure we know content-length
self.send_headers()
else:
self.bytes_sent += len(data)
# XXX check Content-Length and truncate if too many bytes written?
data = BytesIO(data)
for chunk in iter(lambda: data.read(MAX_SOCKET_CHUNK_SIZE), b''):
self._write(chunk)
self._flush()
def error_output(self, environ, start_response):
super(ServerHandler, self).error_output(environ, start_response)
return ['\n'.join(traceback.format_exception(*sys.exc_info()))]
# Backport of http://hg.python.org/cpython/rev/d5af1b235dab. See #16241.
# This can be removed when support for Python <= 2.7.3 is deprecated.
def finish_response(self):
try:
if not self.result_is_file() or not self.sendfile():
for data in self.result:
self.write(data)
self.finish_content()
finally:
self.close()
class DummyHandler(object):
def log_request(self, *args, **kwargs):
pass
class FileWrapperHandler(ServerHandler):
def __init__(self, *args, **kwargs):
super(FileWrapperHandler, self).__init__(*args, **kwargs)
self.request_handler = DummyHandler()
self._used_sendfile = False
def sendfile(self):
self._used_sendfile = True
return True
def wsgi_app(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
return [b'Hello World!']
def wsgi_app_file_wrapper(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
return environ['wsgi.file_wrapper'](BytesIO(b'foo'))
class WSGIFileWrapperTests(TestCase):
"""
Test that the wsgi.file_wrapper works for the builting server.
Tests for #9659: wsgi.file_wrapper in the builtin server.
We need to mock a couple of handlers and keep track of what
gets called when using a couple kinds of WSGI apps.
"""
def test_file_wrapper_uses_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env)
handler.run(wsgi_app_file_wrapper)
self.assertTrue(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue(), b'')
self.assertEqual(handler.stderr.getvalue(), b'')
def test_file_wrapper_no_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = FileWrapperHandler(None, BytesIO(), BytesIO(), env)
handler.run(wsgi_app)
self.assertFalse(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue().splitlines()[-1], b'Hello World!')
self.assertEqual(handler.stderr.getvalue(), b'')
class WriteChunkCounterHandler(ServerHandler):
"""
Server handler that counts the number of chunks written after headers were
sent. Used to make sure large response body chunking works properly.
"""
def __init__(self, *args, **kwargs):
super(WriteChunkCounterHandler, self).__init__(*args, **kwargs)
self.request_handler = DummyHandler()
self.headers_written = False
self.write_chunk_counter = 0
def send_headers(self):
super(WriteChunkCounterHandler, self).send_headers()
self.headers_written = True
def _write(self, data):
if self.headers_written:
self.write_chunk_counter += 1
self.stdout.write(data)
def send_big_data_app(environ, start_response):
start_response(str('200 OK'), [(str('Content-Type'), str('text/plain'))])
# Return a blob of data that is 1.5 times the maximum chunk size.
return [b'x' * (MAX_SOCKET_CHUNK_SIZE + MAX_SOCKET_CHUNK_SIZE // 2)]
class ServerHandlerChunksProperly(TestCase):
"""
Test that the ServerHandler chunks data properly.
Tests for #18972: The logic that performs the math to break data into
32MB (MAX_SOCKET_CHUNK_SIZE) chunks was flawed, BUT it didn't actually
cause any problems.
"""
def test_chunked_data(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
handler = WriteChunkCounterHandler(None, BytesIO(), BytesIO(), env)
handler.run(send_big_data_app)
self.assertEqual(handler.write_chunk_counter, 2)
| bsd-3-clause |
dlazz/ansible | lib/ansible/plugins/inventory/vultr.py | 39 | 5042 | # (c) 2018, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
name: vultr
plugin_type: inventory
author:
- "Yanis Guenane (@Spredzy)"
short_description: Vultr inventory source
description:
- Get inventory hosts from Vultr public cloud.
- Uses C(api_config), C(~/.vultr.ini), C(./vultr.ini) or VULTR_API_CONFIG path to config file.
options:
plugin:
description: token that ensures this is a source file for the 'vultr' plugin.
required: True
choices: ['vultr']
api_account:
description: Specify the account to be used.
default: default
api_config:
description: Path to the vultr configuration file. If not specified will be taken from regular Vultr configuration.
env:
- name: VULTR_API_CONFIG
api_key:
description: Vultr API key. If not specified will be taken from regular Vultr configuration.
env:
- name: VULTR_API_KEY
hostname:
description: Field to match the hostname. Note v4_main_ip corresponds to the main_ip field returned from the API and name to label.
type: string
default: v4_main_ip
choices:
- v4_main_ip
- v6_main_ip
- name
'''
EXAMPLES = r'''
# vultr_inventory.yml file in YAML format
# Example command line: ansible-inventory --list -i vultr_inventory.yml
plugin: vultr
'''
import json
from ansible.errors import AnsibleError
from ansible.plugins.inventory import BaseInventoryPlugin
from ansible.module_utils.six.moves import configparser
from ansible.module_utils.urls import open_url
from ansible.module_utils._text import to_native
from ansible.module_utils.vultr import Vultr, VULTR_API_ENDPOINT, VULTR_USER_AGENT
SCHEMA = {
'SUBID': dict(key='id'),
'label': dict(key='name'),
'date_created': dict(),
'allowed_bandwidth_gb': dict(convert_to='int'),
'auto_backups': dict(key='auto_backup_enabled', convert_to='bool'),
'current_bandwidth_gb': dict(),
'kvm_url': dict(),
'default_password': dict(),
'internal_ip': dict(),
'disk': dict(),
'cost_per_month': dict(convert_to='float'),
'location': dict(key='region'),
'main_ip': dict(key='v4_main_ip'),
'network_v4': dict(key='v4_network'),
'gateway_v4': dict(key='v4_gateway'),
'os': dict(),
'pending_charges': dict(convert_to='float'),
'power_status': dict(),
'ram': dict(),
'plan': dict(),
'server_state': dict(),
'status': dict(),
'firewall_group': dict(),
'tag': dict(),
'v6_main_ip': dict(),
'v6_network': dict(),
'v6_network_size': dict(),
'v6_networks': dict(),
'vcpu_count': dict(convert_to='int'),
}
def _load_conf(path, account):
if path:
conf = configparser.ConfigParser()
conf.read(path)
if not conf._sections.get(account):
return None
return dict(conf.items(account))
else:
return Vultr.read_ini_config(account)
def _retrieve_servers(api_key):
api_url = '%s/v1/server/list' % VULTR_API_ENDPOINT
try:
response = open_url(
api_url, headers={'API-Key': api_key, 'Content-type': 'application/json'},
http_agent=VULTR_USER_AGENT,
)
servers_list = json.loads(response.read())
return servers_list.values() if servers_list else []
except ValueError:
raise AnsibleError("Incorrect JSON payload")
except Exception as e:
raise AnsibleError("Error while fetching %s: %s" % (api_url, to_native(e)))
class InventoryModule(BaseInventoryPlugin):
NAME = 'vultr'
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path=path)
conf = _load_conf(self.get_option('api_config'), self.get_option('api_account'))
try:
api_key = self.get_option('api_key') or conf.get('key')
except Exception:
raise AnsibleError('Could not find an API key. Check inventory file and Vultr configuration files.')
hostname_preference = self.get_option('hostname')
for server in _retrieve_servers(api_key):
server = Vultr.normalize_result(server, SCHEMA)
for group in ['region', 'os']:
self.inventory.add_group(group=server[group])
self.inventory.add_host(group=server[group], host=server['name'])
for attribute, value in server.items():
self.inventory.set_variable(server['name'], attribute, value)
if hostname_preference != 'name':
self.inventory.set_variable(server['name'], 'ansible_host', server[hostname_preference])
| gpl-3.0 |
Morgan-Stanley/treadmill | lib/python/treadmill/cli/admin/diag/psmem.py | 2 | 4422 | """Reports memory utilization details for given container."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import click
from treadmill import psmem
from treadmill import cgroups
from treadmill import cgutils
from treadmill import metrics
from treadmill import utils
from treadmill import cli
class PsmemProcPrettyFormatter:
"""Pretty table formatter for psmem processes."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('name', None, None),
('tgid', None, None),
('threads', None, None),
('private', None, None),
('shared', None, None),
('total', None, None),
]
format_item = cli.make_dict_to_table(schema)
format_list = cli.make_list_to_table(schema)
if isinstance(item, list):
return format_list(item)
else:
return format_item(item)
class PsmemTotalPrettyFormatter:
"""Pretty table formatter for psmem processes."""
@staticmethod
def format(item):
"""Return pretty-formatted item."""
schema = [
('memory-type', None, None),
('value', None, None),
]
format_list = cli.make_list_to_table(schema, False)
return format_list(item)
def init():
"""Top level command handler.
"""
@click.command(name='psmem')
@click.option('--fast', is_flag=True, help='Disable statm/pss analysis.')
@click.option('-v', '--verbose', is_flag=True, help='Verbose')
@click.option('--percent', is_flag=True)
@click.option('--root-cgroup', default='treadmill',
envvar='TREADMILL_ROOT_CGROUP', required=False)
@click.argument('app')
def psmem_cmd(fast, app, verbose, percent, root_cgroup):
"""Reports memory utilization details for given container.
"""
if app.find('#') == -1:
raise click.BadParameter('Specify full instance name: xxx#nnn')
app = app.replace('#', '-')
cgroup = None
apps_group = cgutils.apps_group_name(root_cgroup)
apps = os.listdir(os.path.join(cgroups.CG_ROOT, 'memory', apps_group))
for entry in apps:
if app in entry:
cgroup = os.path.join(apps_group, entry)
if not cgroup:
raise click.BadParameter('Could not find corresponding cgroup')
pids = cgutils.pids_in_cgroup('memory', cgroup)
use_pss = not fast
memusage = psmem.get_memory_usage(pids, verbose, use_pss=use_pss)
total = sum([info['total'] for info in memusage])
def _readable(value):
return utils.bytes_to_readable(value, power='B')
def _percentage(value, total):
return '{:.1%}'.format(value / total)
to_format = ['private', 'shared', 'total']
for info in memusage:
for key, val in info.items():
if key in to_format:
if percent:
info[key] = _percentage(val, total)
else:
info[key] = _readable(val)
proc_table = PsmemProcPrettyFormatter()
print(proc_table.format(memusage))
metric = metrics.read_memory_stats(cgroup)
total_list = []
# Actual memory usage is without the disk cache
total_list.append({'memory-type': 'usage', 'value':
_readable(metric['memory.usage_in_bytes'] -
metric['memory.stat']['cache'])})
total_list.append({'memory-type': '', 'value':
_percentage(metric['memory.usage_in_bytes'],
metric['memory.limit_in_bytes'])})
total_list.append({'memory-type': 'diskcache', 'value':
_readable(metric['memory.stat']['cache'])})
total_list.append({'memory-type': 'softlimit', 'value':
_readable(metric['memory.soft_limit_in_bytes'])})
total_list.append({'memory-type': 'hardlimit', 'value':
_readable(metric['memory.limit_in_bytes'])})
total_table = PsmemTotalPrettyFormatter()
print('')
print(total_table.format(total_list))
return psmem_cmd
| apache-2.0 |
polyrabbit/polyglot | polyglot/cli.py | 1 | 2433 | #!/usr/bin/env python
#coding: utf-8
import os
import sys
import logging
import click
from .model import LanguageModel
from .classifier import Classifier
logging.basicConfig(format='%(asctime)s -- %(message)s')
@click.group()
def run():
"""I am a computer language savant"""
@run.command()
@click.option('-c', '--corpus', default='corpus', type=click.Path(exists=True),
help='The corpus folder for training(default corpus).')
@click.option('-n', '--ngram', default=3, type=click.INT,
help='The size of grams to use, the larger the better, but more expensive(default 3).')
@click.option('-v', '--verbose', is_flag=True, help='Run in debug mode.')
@click.option('-o', '--output', type=click.File('w'), default='-',
help='File to store training result(default to stdout).')
def train(corpus, ngram, output, verbose):
"""Train polyglot from the corpus folder, each sub-folder represents a language
which contains many files written in that language(excluding files starting with "." of course)."""
if verbose:
logging.getLogger().setLevel(logging.DEBUG)
if not os.path.isdir(corpus):
print >> sys.stderr, '%s is not a folder.' % corpus
return
db = LanguageModel(output)
master = Classifier(db, ngram)
master.train(corpus)
@run.command()
@click.argument('file', type=click.File('r'))
# help='Input source code to classify (or standard input if no files are named, or if a single hyphen-minus (-) is given as file name).')
@click.option('-n', '--ngram', default=3, type=click.INT,
help='The size of grams to use, the larger the better, but more expensive(default 3).')
@click.option('-t', '--top', default=3, type=click.INT,
help='Output top N most likely language, -1 means all(default 3).')
@click.option('-v', '--verbose', is_flag=True, help='Run in debug mode.')
@click.option('-m', '--model', type=click.File('r', lazy=True), default='model.json',
help='Language model file which holds the training result(default model.json).')
def classify(file, model, ngram, top, verbose):
"""Do a Naive Bayes classifier on the given FILE, top N most likely languages in descending order with their scores"""
if verbose:
logging.getLogger().setLevel(logging.DEBUG)
db = LanguageModel(model)
master = Classifier(db, ngram)
print master.classify(file.read())[:top]
if __name__ == '__main__':
run()
| bsd-3-clause |
shangvven/Wox | PythonHome/Lib/site-packages/pip/_vendor/distlib/_backport/tarfile.py | 1005 | 92627 | #-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import stat
import errno
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
_open = builtins.open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile(object):
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream(object):
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\037\213\010"):
return "gz"
if self.buf.startswith(b"BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = b""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
self.buf += data
x += len(data)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def seekable(self):
if not hasattr(self.fileobj, "seekable"):
# XXX gzip.GzipFile and bz2.BZ2File
return True
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
tarinfo.sparse)
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b""
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
# XXX TextIOWrapper uses the read1() method.
read1 = read
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
tarinfo.linkname)
else:
linkpath = tarinfo.linkname
else:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter(object):
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
next = __next__ # for Python 2.x
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| mit |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/pip/download.py | 279 | 31936 | from __future__ import absolute_import
import cgi
import email.utils
import hashlib
import getpass
import json
import logging
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
try:
import ssl # noqa
HAS_TLS = True
except ImportError:
HAS_TLS = False
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
import pip
from pip.exceptions import InstallationError, HashMismatch
from pip.models import PyPI
from pip.utils import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
call_subprocess, ARCHIVE_EXTENSIONS)
from pip.utils.filesystem import check_path_owner
from pip.utils.logging import indent_log
from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner
from pip.locations import write_delete_marker_file
from pip.vcs import vcs
from pip._vendor import requests, six
from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import Response
from pip._vendor.requests.structures import CaseInsensitiveDict
from pip._vendor.requests.packages import urllib3
from pip._vendor.cachecontrol import CacheControlAdapter
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.lockfile import LockError
from pip._vendor.six.moves import xmlrpc_client
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url',
'unpack_http_url', 'unpack_url']
logger = logging.getLogger(__name__)
def user_agent():
"""
Return a string representing the user agent.
"""
data = {
"installer": {"name": "pip", "version": pip.__version__},
"python": platform.python_version(),
"implementation": {
"name": platform.python_implementation(),
},
}
if data["implementation"]["name"] == 'CPython':
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'PyPy':
if sys.pypy_version_info.releaselevel == 'final':
pypy_version_info = sys.pypy_version_info[:3]
else:
pypy_version_info = sys.pypy_version_info
data["implementation"]["version"] = ".".join(
[str(x) for x in pypy_version_info]
)
elif data["implementation"]["name"] == 'Jython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
elif data["implementation"]["name"] == 'IronPython':
# Complete Guess
data["implementation"]["version"] = platform.python_version()
if sys.platform.startswith("linux"):
distro = dict(filter(
lambda x: x[1],
zip(["name", "version", "id"], platform.linux_distribution()),
))
libc = dict(filter(
lambda x: x[1],
zip(["lib", "version"], platform.libc_ver()),
))
if libc:
distro["libc"] = libc
if distro:
data["distro"] = distro
if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
data["distro"] = {"name": "OS X", "version": platform.mac_ver()[0]}
if platform.system():
data.setdefault("system", {})["name"] = platform.system()
if platform.release():
data.setdefault("system", {})["release"] = platform.release()
if platform.machine():
data["cpu"] = platform.machine()
return "{data[installer][name]}/{data[installer][version]} {json}".format(
data=data,
json=json.dumps(data, separators=(",", ":"), sort_keys=True),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urllib_parse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.rsplit("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username = six.moves.input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
pathname = url_to_path(request.url)
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
stats = os.stat(pathname)
except OSError as exc:
resp.status_code = 404
resp.raw = exc
else:
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
resp.headers = CaseInsensitiveDict({
"Content-Type": content_type,
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = open(pathname, "rb")
resp.close = resp.raw.close
return resp
def close(self):
pass
class SafeFileCache(FileCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, *args, **kwargs):
super(SafeFileCache, self).__init__(*args, **kwargs)
# Check to ensure that the directory containing our cache directory
# is owned by the user current executing pip. If it does not exist
# we will check the parent directory until we find one that does exist.
# If it is not owned by the user executing pip then we will disable
# the cache and log a warning.
if not check_path_owner(self.directory):
logger.warning(
"The directory '%s' or its parent directory is not owned by "
"the current user and the cache has been disabled. Please "
"check the permissions and owner of that directory. If "
"executing pip with sudo, you may want sudo's -H flag.",
self.directory,
)
# Set our directory to None to disable the Cache
self.directory = None
def get(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).get(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def set(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).set(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
def delete(self, *args, **kwargs):
# If we don't have a directory, then the cache should be a no-op.
if self.directory is None:
return
try:
return super(SafeFileCache, self).delete(*args, **kwargs)
except (LockError, OSError, IOError):
# We intentionally silence this error, if we can't access the cache
# then we can just skip caching and process the request as if
# caching wasn't enabled.
pass
class InsecureHTTPAdapter(HTTPAdapter):
def cert_verify(self, conn, url, verify, cert):
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
retries = kwargs.pop("retries", 0)
cache = kwargs.pop("cache", None)
insecure_hosts = kwargs.pop("insecure_hosts", [])
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Create our urllib3.Retry instance which will allow us to customize
# how we handle retries.
retries = urllib3.Retry(
# Set the total number of retries that a particular request can
# have.
total=retries,
# A 503 error from PyPI typically means that the Fastly -> Origin
# connection got interupted in some way. A 503 error in general
# is typically considered a transient error so we'll go ahead and
# retry it.
status_forcelist=[503],
# Add a small amount of back off between failed requests in
# order to prevent hammering the service.
backoff_factor=0.25,
)
# We want to _only_ cache responses on securely fetched origins. We do
# this because we can't validate the response of an insecurely fetched
# origin, and we don't want someone to be able to poison the cache and
# require manual evication from the cache to fix it.
if cache:
secure_adapter = CacheControlAdapter(
cache=SafeFileCache(cache, use_dir_lock=True),
max_retries=retries,
)
else:
secure_adapter = HTTPAdapter(max_retries=retries)
# Our Insecure HTTPAdapter disables HTTPS validation. It does not
# support caching (see above) so we'll use it for all http:// URLs as
# well as any https:// host that we've marked as ignoring TLS errors
# for.
insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
self.mount("https://", secure_adapter)
self.mount("http://", insecure_adapter)
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
# We want to use a non-validating adapter for any requests which are
# deemed insecure.
for host in insecure_hosts:
self.mount("https://{0}/".format(host), insecure_adapter)
def request(self, method, url, *args, **kwargs):
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
raise TypeError(
"get_file_content() missing 1 required keyword argument: 'session'"
)
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from and
comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
# FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
with open(url) as f:
content = f.read()
except IOError as exc:
raise InstallationError(
'Could not open requirements file: %s' % str(exc)
)
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
_, netloc, path, _, _ = urllib_parse.urlsplit(url)
# if we have a UNC path, prepend UNC share notation
if netloc:
netloc = '\\\\' + netloc
path = urllib_request.url2pathname(netloc + path)
return path
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path))
return url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def unpack_vcs_link(link, location):
vcs_backend = _get_used_vcs_backend(link)
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.critical(
"Hash digest size of the package %d (%s) doesn't match the "
"expected hash name %s!",
download_hash.digest_size, link, link.hash_name,
)
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.critical(
"Hash of the package %s (%s) doesn't match the expected hash %s!",
link, download_hash.hexdigest(), link.hash,
)
raise HashMismatch(
'Bad %s hash for package %s' % (link.hash_name, link)
)
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warning(
"Unsupported hash name %s for package %s", link.hash_name, link,
)
return None
with open(target_file, 'rb') as fp:
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
return download_hash
def _progress_indicator(iterable, *args, **kwargs):
return iterable
def _download_url(resp, link, content_file):
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warning(
"Unsupported hash name %s for package %s",
link.hash_name, link,
)
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
cached_resp = getattr(resp, "from_cache", False)
if logger.getEffectiveLevel() > logging.INFO:
show_progress = False
elif cached_resp:
show_progress = False
elif total_length > (40 * 1000):
show_progress = True
elif not total_length:
show_progress = True
else:
show_progress = False
show_url = link.show_url
def resp_read(chunk_size):
try:
# Special case for urllib3.
for chunk in resp.raw.stream(
chunk_size,
# We use decode_content=False here because we do
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
progress_indicator = _progress_indicator
if link.netloc == PyPI.netloc:
url = show_url
else:
url = link.url_without_fragment
if show_progress: # We don't show progress on cached responses
if total_length:
logger.info(
"Downloading %s (%s)", url, format_size(total_length),
)
progress_indicator = DownloadProgressBar(
max=total_length,
).iter
else:
logger.info("Downloading %s", url)
progress_indicator = DownloadProgressSpinner().iter
elif cached_resp:
logger.info("Using cached %s", url)
else:
logger.info("Downloading %s", url)
logger.debug('Downloading from URL %s', link)
for chunk in progress_indicator(resp_read(4096), 4096):
if download_hash is not None:
download_hash.update(chunk)
content_file.write(chunk)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warning('Deleting %s', display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warning(
'Backing up %s to %s',
display_path(download_location),
display_path(dest_file),
)
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.info('Saved %s', display_path(download_location))
def unpack_http_url(link, location, download_dir=None, session=None):
if session is None:
raise TypeError(
"unpack_http_url() missing 1 required keyword argument: 'session'"
)
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
# If a download dir is specified, is the file already downloaded there?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link, download_dir)
if already_downloaded_path:
from_path = already_downloaded_path
content_type = mimetypes.guess_type(from_path)[0]
else:
# let's download to a tmp dir
from_path, content_type = _download_http_url(link, session, temp_dir)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, content_type, link)
if not already_downloaded_path:
os.unlink(from_path)
rmtree(temp_dir)
def unpack_file_url(link, location, download_dir=None):
"""Unpack link into location.
If download_dir is provided and link points to a file, make a copy
of the link file inside download_dir."""
link_path = url_to_path(link.url_without_fragment)
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
if download_dir:
logger.info('Link is a directory, ignoring download_dir')
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
already_downloaded_path = None
if download_dir:
already_downloaded_path = _check_download_dir(link, download_dir)
if already_downloaded_path:
from_path = already_downloaded_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded_path:
_copy_file(from_path, download_dir, content_type, link)
def _copy_dist_from_dir(link_path, location):
"""Copy distribution files in `link_path` to `location`.
Invoked when user requests to install a local directory. E.g.:
pip install .
pip install ~/dev/git-repos/python-prompt-toolkit
"""
# Note: This is currently VERY SLOW if you have a lot of data in the
# directory, because it copies everything with `shutil.copytree`.
# What it should really do is build an sdist and install that.
# See https://github.com/pypa/pip/issues/2195
if os.path.isdir(location):
rmtree(location)
# build an sdist
setup_py = 'setup.py'
sdist_args = [sys.executable]
sdist_args.append('-c')
sdist_args.append(
"import setuptools, tokenize;__file__=%r;"
"exec(compile(getattr(tokenize, 'open', open)(__file__).read()"
".replace('\\r\\n', '\\n'), __file__, 'exec'))" % setup_py)
sdist_args.append('sdist')
sdist_args += ['--dist-dir', location]
logger.info('Running setup.py sdist for %s', link_path)
with indent_log():
call_subprocess(sdist_args, cwd=link_path, show_stdout=False)
# unpack sdist into `location`
sdist = os.path.join(location, os.listdir(location)[0])
logger.info('Unpacking sdist %s into %s', sdist, location)
unpack_file(sdist, location, content_type=None, link=None)
class PipXmlrpcTransport(xmlrpc_client.Transport):
"""Provide a `xmlrpclib.Transport` implementation via a `PipSession`
object.
"""
def __init__(self, index_url, session, use_datetime=False):
xmlrpc_client.Transport.__init__(self, use_datetime)
index_parts = urllib_parse.urlparse(index_url)
self._scheme = index_parts.scheme
self._session = session
def request(self, host, handler, request_body, verbose=False):
parts = (self._scheme, host, handler, None, None, None)
url = urllib_parse.urlunparse(parts)
try:
headers = {'Content-Type': 'text/xml'}
response = self._session.post(url, data=request_body,
headers=headers, stream=True)
response.raise_for_status()
self.verbose = verbose
return self.parse_response(response.raw)
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s",
exc.response.status_code, url,
)
raise
def unpack_url(link, location, download_dir=None,
only_download=False, session=None):
"""Unpack link.
If link is a VCS link:
if only_download, export into download_dir and ignore location
else unpack into location
for other types of link:
- unpack into location
- if download_dir, copy the file into download_dir
- if only_download, mark location for deletion
"""
# non-editable vcs urls
if is_vcs_url(link):
unpack_vcs_link(link, location)
# file urls
elif is_file_url(link):
unpack_file_url(link, location, download_dir)
# http urls
else:
if session is None:
session = PipSession()
unpack_http_url(
link,
location,
download_dir,
session,
)
if only_download:
write_delete_marker_file(location)
def _download_http_url(link, session, temp_dir):
"""Download link url into temp_dir using provided session"""
target_url = link.url.split('#', 1)[0]
try:
resp = session.get(
target_url,
# We use Accept-Encoding: identity here because requests
# defaults to accepting compressed responses. This breaks in
# a variety of ways depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible
# file and will leave the file alone and with an empty
# Content-Encoding
# - Some servers will notice that the file is already
# compressed and will leave the file alone and will add a
# Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take
# a file that's already been compressed and compress it again
# and set the Content-Encoding: gzip header
# By setting this to request only the identity encoding We're
# hoping to eliminate the third case. Hopefully there does not
# exist a server which when given a file will notice it is
# already compressed and that you're not asking for a
# compressed file and will then decompress it before sending
# because if that's the case I don't think it'll ever be
# possible to make this work.
headers={"Accept-Encoding": "identity"},
stream=True,
)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.critical(
"HTTP error %s while getting %s", exc.response.status_code, link,
)
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
file_path = os.path.join(temp_dir, filename)
with open(file_path, 'wb') as content_file:
_download_url(resp, link, content_file)
return file_path, content_type
def _check_download_dir(link, download_dir):
""" Check download_dir for previously downloaded file with correct hash
If a correct file is found return its path else None
"""
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
# If already downloaded, does its hash match?
logger.info('File was already downloaded %s', download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warning(
'Previously-downloaded file %s has bad hash, '
're-downloading.',
download_path
)
os.unlink(download_path)
return None
return download_path
return None
| mit |
Jajcus/pyxmpp | tests/vcard.py | 1 | 5613 | #!/usr/bin/python
import unittest
import libxml2
from pyxmpp.jabber import vcard
def vcard2txt(vcard):
"""Extract data from VCard object for text comparision.
Separate function defined here to test the API (attribute access)."""
ret="Full name: %r\n" % (vcard.fn.value,)
ret+="Structural name:\n"
ret+=" Family name: %r\n" % (vcard.n.family,)
ret+=" Given name: %r\n" % (vcard.n.given,)
ret+=" Middle name: %r\n" % (vcard.n.middle,)
ret+=" Prefix: %r\n" % (vcard.n.prefix,)
ret+=" Suffix: %r\n" % (vcard.n.suffix,)
for nickname in vcard.nickname:
ret+="Nickname: %r\n" % (nickname.value,)
for photo in vcard.photo:
ret+="Photo:\n"
ret+=" Type: %r\n" % (photo.type,)
ret+=" Image: %r\n" % (photo.image,)
ret+=" URI: %r\n" % (photo.uri,)
for bday in vcard.bday:
ret+="Birthday: %r\n" % (bday.value,)
for adr in vcard.adr:
ret+="Address:\n"
ret+=" Type: %r\n" % (adr.type,)
ret+=" POBox: %r\n" % (adr.pobox,)
ret+=" Extended: %r\n" % (adr.extadr,)
ret+=" Street: %r\n" % (adr.street,)
ret+=" Locality: %r\n" % (adr.locality,)
ret+=" Region: %r\n" % (adr.region,)
ret+=" Postal code: %r\n" % (adr.pcode,)
ret+=" Country: %r\n" % (adr.ctry,)
for label in vcard.label:
ret+="Label:\n"
ret+=" Type: %r\n" % (label.type,)
ret+=" Lines: %r\n" % (label.lines,)
for tel in vcard.tel:
ret+="Telephone:\n"
ret+=" Type: %r\n" % (tel.type,)
ret+=" Number: %r\n" % (tel.number,)
for email in vcard.email:
ret+="E-mail:\n"
ret+=" Type: %r\n" % (email.type,)
ret+=" Address: %r\n" % (email.address,)
for jid in vcard.jabberid:
ret+="JID: %r\n" % (jid.value,)
for mailer in vcard.mailer:
ret+="Mailer: %r\n" % (mailer.value,)
for tz in vcard.tz:
ret+="Timezone: %r\n" % (tz.value,)
for geo in vcard.geo:
ret+="Geographical location:\n"
ret+=" Latitude: %r\n" % (geo.lat,)
ret+=" Longitude: %r\n" % (geo.lon,)
for title in vcard.title:
ret+="Title: %r\n" % (title.value,)
for role in vcard.role:
ret+="Role: %r\n" % (role.value,)
for logo in vcard.logo:
ret+="Logo:\n"
ret+=" Type: %r\n" % (logo.type,)
ret+=" Image: %r\n" % (logo.image,)
ret+=" URI: %r\n" % (logo.uri,)
for org in vcard.org:
ret+="Organization:\n"
ret+=" Name: %r\n" % (org.name,)
ret+=" Unit: %r\n" % (org.unit,)
for cat in vcard.categories:
ret+="Categories: %r\n" % (cat.keywords,)
for note in vcard.note:
ret+="Note: %r\n" % (note.value,)
for prodid in vcard.prodid:
ret+="Product id: %r\n" % (prodid.value,)
for rev in vcard.rev:
ret+="Revision: %r\n" % (rev.value,)
for sort_string in vcard.sort_string:
ret+="Sort string: %r\n" % (sort_string.value,)
for sound in vcard.sound:
ret+="Sound:\n"
ret+=" Sound: %r\n" % (sound.sound,)
ret+=" URI: %r\n" % (sound.uri,)
ret+=" Phonetic: %r\n" % (sound.phonetic,)
for uid in vcard.uid:
ret+="User id: %r\n" % (uid.value,)
for url in vcard.url:
ret+="URL: %r\n" % (url.value,)
try:
for cls in vcard["CLASS"]:
ret+="Class: %r\n" % (cls.value,)
except KeyError:
pass
for key in vcard.key:
ret+="Key:\n"
ret+=" Type: %r\n" % (key.type,)
ret+=" Value: %r\n" % (key.cred,)
for desc in vcard.desc:
ret+="Description: %r\n" % (desc.value,)
return ret
def xml_error_handler(ctx,error):
pass
class TestVCard(unittest.TestCase):
def setUp(self):
libxml2.registerErrorHandler(xml_error_handler,None)
def tearDown(self):
libxml2.registerErrorHandler(None,None)
def test_xml_input1(self):
xmldata=libxml2.parseFile("data/vcard1.xml")
vc=vcard.VCard(xmldata.getRootElement())
should_be=file("data/vcard1.txt").read()
self.failUnlessEqual(vcard2txt(vc),should_be)
def test_xml_without_n(self):
xmldata=libxml2.parseFile("data/vcard_without_n.xml")
vc=vcard.VCard(xmldata.getRootElement())
should_be=file("data/vcard_without_n.txt").read()
self.failUnlessEqual(vcard2txt(vc),should_be)
def test_xml_without_fn(self):
xmldata=libxml2.parseFile("data/vcard_without_n.xml")
vc=vcard.VCard(xmldata.getRootElement())
should_be=file("data/vcard_without_n.txt").read()
self.failUnlessEqual(vcard2txt(vc),should_be)
def test_xml_with_semicolon(self):
xmldata = libxml2.parseFile("data/vcard_with_semicolon.xml")
vc = vcard.VCard(xmldata.getRootElement())
first = vc.rfc2426()
second = vcard.VCard(first).rfc2426()
self.failUnlessEqual(first, second)
def test_vcf_input1(self):
input=file("data/vcard2.vcf").read()
vc=vcard.VCard(input)
should_be=file("data/vcard2.txt").read()
self.failUnlessEqual(vcard2txt(vc),should_be)
def test_vcf_input2(self):
input=file("data/vcard3.vcf").read()
vc=vcard.VCard(input)
should_be=file("data/vcard3.txt").read()
self.failUnlessEqual(vcard2txt(vc),should_be)
#TODO: test_xml_output
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestVCard))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
# vi: sts=4 et sw=4
| lgpl-2.1 |
yajiedesign/mxnet | tests/nightly/common.py | 5 | 4064 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import logging
import os
import random
import mxnet as mx
import numpy as np
def with_seed(seed=None):
"""
A decorator for test functions that manages rng seeds.
Parameters
----------
seed : the seed to pass to np.random and mx.random
This tests decorator sets the np, mx and python random seeds identically
prior to each test, then outputs those seeds if the test fails or
if the test requires a fixed seed (as a reminder to make the test
more robust against random data).
@with_seed()
def test_ok_with_random_data():
...
@with_seed(1234)
def test_not_ok_with_random_data():
...
Use of the @with_seed() decorator for all tests creates
tests isolation and reproducability of failures. When a
test fails, the decorator outputs the seed used. The user
can then set the environment variable MXNET_TEST_SEED to
the value reported, then rerun the test with:
pytest --verbose --capture=no <test_module_name.py>::<failing_test>
To run a test repeatedly, set MXNET_TEST_COUNT=<NNN> in the environment.
To see the seeds of even the passing tests, add '--log-level=DEBUG' to pytest.
"""
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
test_count = int(os.getenv('MXNET_TEST_COUNT', '1'))
env_seed_str = os.getenv('MXNET_TEST_SEED')
for i in range(test_count):
if seed is not None:
this_test_seed = seed
log_level = logging.INFO
elif env_seed_str is not None:
this_test_seed = int(env_seed_str)
log_level = logging.INFO
else:
this_test_seed = np.random.randint(0, np.iinfo(np.int32).max)
log_level = logging.DEBUG
post_test_state = np.random.get_state()
np.random.seed(this_test_seed)
mx.random.seed(this_test_seed)
random.seed(this_test_seed)
# 'pytest --logging-level=DEBUG' shows this msg even with an ensuing core dump.
test_count_msg = '{} of {}: '.format(i+1,test_count) if test_count > 1 else ''
pre_test_msg = ('{}Setting test np/mx/python random seeds, use MXNET_TEST_SEED={}'
' to reproduce.').format(test_count_msg, this_test_seed)
on_err_test_msg = ('{}Error seen with seeded test, use MXNET_TEST_SEED={}'
' to reproduce.').format(test_count_msg, this_test_seed)
logging.log(log_level, pre_test_msg)
try:
orig_test(*args, **kwargs)
except:
# With exceptions, repeat test_msg at WARNING level to be sure it's seen.
if log_level < logging.WARNING:
logging.warning(on_err_test_msg)
raise
finally:
# Provide test-isolation for any test having this decorator
mx.nd.waitall()
np.random.set_state(post_test_state)
return test_new
return test_helper
| apache-2.0 |
Ssawa/Diamond | src/collectors/ping/test/testping.py | 26 | 6033 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from ping import PingCollector
##########################################################################
class TestPingCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('PingCollector', {
'interval': 10,
'target_a': 'localhost',
'bin': 'true'
})
self.collector = PingCollector(config, None)
def test_import(self):
self.assertTrue(PingCollector)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_bad_gentoo(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('bad_gentoo').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
self.assertPublishedMany(publish_mock, {
'localhost': 10000
})
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_host_gentoo(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('host_gentoo').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
metrics = {
'localhost': 11
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_ip_gentoo(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('ip_gentoo').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
self.assertPublishedMany(publish_mock, {
'localhost': 0
})
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_longhost_gentoo(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture(
'longhost_gentoo').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
self.assertPublishedMany(publish_mock, {
'localhost': 10
})
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_timeout_gentoo(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture(
'timeout_gentoo').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
self.assertPublishedMany(publish_mock, {
'localhost': 10000
})
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_host_osx(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('host_osx').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
self.assertPublishedMany(publish_mock, {
'localhost': 38
})
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_ip_osx(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('ip_osx').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
self.assertPublishedMany(publish_mock, {
'localhost': 0
})
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_longhost_osx(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('longhost_osx').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
self.assertPublishedMany(publish_mock, {
'localhost': 42
})
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_timeout_osx(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('timeout_osx').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
self.assertPublishedMany(publish_mock, {
'localhost': 10000
})
##########################################################################
if __name__ == "__main__":
unittest.main()
| mit |
Grirrane/odoo | openerp/addons/base/ir/ir_values.py | 5 | 26147 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pickle
from openerp import tools
from openerp.osv import osv, fields
from openerp.exceptions import AccessError
from openerp.tools.translate import _
EXCLUDED_FIELDS = set((
'report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml',
'report_sxw_content_data', 'report_rml_content_data', 'search_view', ))
#: Possible slots to bind an action to with :meth:`~.set_action`
ACTION_SLOTS = [
"client_action_multi", # sidebar wizard action
"client_print_multi", # sidebar report printing button
"client_action_relate", # sidebar related link
"tree_but_open", # double-click on item in tree view
"tree_but_action", # deprecated: same as tree_but_open
]
class ir_values(osv.osv):
"""Holds internal model-specific action bindings and user-defined default
field values. definitions. This is a legacy internal model, mixing
two different concepts, and will likely be updated or replaced in a
future version by cleaner, separate models. You should not depend
explicitly on it.
The purpose of each ``ir.values`` entry depends on its type, defined
by the ``key`` column:
* 'default': user-defined default values, used when creating new
records of this model:
* 'action': binding of an action to a particular *action slot* of
this model, making the action easily available in the user
interface for this model.
The ``key2`` column acts as a qualifier, further refining the type
of the entry. The possible values are:
* for 'default' entries: an optional condition restricting the
cases where this particular default value will be applicable,
or ``False`` for no condition
* for 'action' entries: the ``key2`` qualifier is one of the available
action slots, defining how this action can be invoked:
* ``'client_print_multi'`` for report printing actions that will
be available on views displaying items from this model
* ``'client_action_multi'`` for assistants (wizards) actions
that will be available in views displaying objects of this model
* ``'client_action_relate'`` for links towards related documents
that should be available in views displaying objects of this model
* ``'tree_but_open'`` for actions that will be triggered when
double-clicking an item from this model in a hierarchical tree view
Each entry is specific to a model (``model`` column), and for ``'actions'``
type, may even be made specific to a given record of that model when the
``res_id`` column contains a record ID (``False`` means it's global for
all records).
The content of the entry is defined by the ``value`` column, which may either
contain an arbitrary value, or a reference string defining the action that
should be executed.
.. rubric:: Usage: default values
The ``'default'`` entries are usually defined manually by the
users, and set by their UI clients calling :meth:`~.set_default`.
These default values are then automatically used by the
ORM every time a new record is about to be created, i.e. when
:meth:`~openerp.osv.osv.osv.default_get`
or :meth:`~openerp.osv.osv.osv.create` are called.
.. rubric:: Usage: action bindings
Business applications will usually bind their actions during
installation, and OpenERP UI clients will apply them as defined,
based on the list of actions included in the result of
:meth:`~openerp.osv.osv.osv.fields_view_get`,
or directly returned by explicit calls to :meth:`~.get_actions`.
"""
_name = 'ir.values'
def _value_unpickle(self, cursor, user, ids, name, arg, context=None):
res = {}
for record in self.browse(cursor, user, ids, context=context):
value = record[name[:-9]]
if record.key == 'default' and value:
# default values are pickled on the fly
try:
value = str(pickle.loads(value))
except Exception:
pass
res[record.id] = value
return res
def _value_pickle(self, cursor, user, id, name, value, arg, context=None):
if context is None:
context = {}
ctx = context.copy()
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
record = self.browse(cursor, user, id, context=context)
if record.key == 'default':
# default values are pickled on the fly
value = pickle.dumps(value)
self.write(cursor, user, id, {name[:-9]: value}, context=ctx)
def onchange_object_id(self, cr, uid, ids, object_id, context=None):
if not object_id: return {}
act = self.pool.get('ir.model').browse(cr, uid, object_id, context=context)
return {
'value': {'model': act.model}
}
def onchange_action_id(self, cr, uid, ids, action_id, context=None):
if not action_id: return {}
act = self.pool.get('ir.actions.actions').browse(cr, uid, action_id, context=context)
return {
'value': {'value_unpickle': act.type+','+str(act.id)}
}
_columns = {
'name': fields.char('Name', required=True),
'model': fields.char('Model Name', select=True, required=True,
help="Model to which this entry applies"),
# TODO: model_id and action_id should be read-write function fields
'model_id': fields.many2one('ir.model', 'Model (change only)', size=128,
help="Model to which this entry applies - "
"helper field for setting a model, will "
"automatically set the correct model name"),
'action_id': fields.many2one('ir.actions.actions', 'Action (change only)',
help="Action bound to this entry - "
"helper field for binding an action, will "
"automatically set the correct reference"),
'value': fields.text('Value', help="Default value (pickled) or reference to an action"),
'value_unpickle': fields.function(_value_unpickle, fnct_inv=_value_pickle,
type='text',
string='Default value or action reference'),
'key': fields.selection([('action','Action'),('default','Default')],
'Type', select=True, required=True,
help="- Action: an action attached to one slot of the given model\n"
"- Default: a default value for a model field"),
'key2' : fields.char('Qualifier', select=True,
help="For actions, one of the possible action slots: \n"
" - client_action_multi\n"
" - client_print_multi\n"
" - client_action_relate\n"
" - tree_but_open\n"
"For defaults, an optional condition"
,),
'res_id': fields.integer('Record ID', select=True,
help="Database identifier of the record to which this applies. "
"0 = for all records"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', select=True,
help="If set, action binding only applies for this user."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', select=True,
help="If set, action binding only applies for this company")
}
_defaults = {
'key': 'action',
'key2': 'tree_but_open',
}
def _auto_init(self, cr, context=None):
super(ir_values, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_values_key_model_key2_res_id_user_id_idx\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_values_key_model_key2_res_id_user_id_idx ON ir_values (key, model, key2, res_id, user_id)')
def create(self, cr, uid, vals, context=None):
res = super(ir_values, self).create(cr, uid, vals, context=context)
self.get_defaults_dict.clear_cache(self)
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(ir_values, self).write(cr, uid, ids, vals, context=context)
self.get_defaults_dict.clear_cache(self)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(ir_values, self).unlink(cr, uid, ids, context=context)
self.get_defaults_dict.clear_cache(self)
return res
def set_default(self, cr, uid, model, field_name, value, for_all_users=True, company_id=False, condition=False):
"""Defines a default value for the given model and field_name. Any previous
default for the same scope (model, field_name, value, for_all_users, company_id, condition)
will be replaced and lost in the process.
Defaults can be later retrieved via :meth:`~.get_defaults`, which will return
the highest priority default for any given field. Defaults that are more specific
have a higher priority, in the following order (highest to lowest):
* specific to user and company
* specific to user only
* specific to company only
* global to everyone
:param string model: model name
:param string field_name: field name to which the default applies
:param value: the default field value to set
:type value: any serializable Python value
:param bool for_all_users: whether the default should apply to everybody or only
the user calling the method
:param int company_id: optional ID of the company to which the default should
apply. If omitted, the default will be global. If True
is passed, the current user's company will be used.
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: id of the newly created ir.values entry
"""
if isinstance(value, unicode):
value = value.encode('utf8')
if company_id is True:
# should be company-specific, need to get company id
user = self.pool.get('res.users').browse(cr, uid, uid)
company_id = user.company_id.id
# remove existing defaults for the same scope
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'name': field_name,
'value': pickle.dumps(value),
'model': model,
'key': 'default',
'key2': condition and condition[:200],
'user_id': False if for_all_users else uid,
'company_id': company_id,
})
def get_default(self, cr, uid, model, field_name, for_all_users=True, company_id=False, condition=False):
""" Return the default value defined for model, field_name, users, company and condition.
Return ``None`` if no such default exists.
"""
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
defaults = self.browse(cr, uid, self.search(cr, uid, search_criteria))
return pickle.loads(defaults[0].value.encode('utf-8')) if defaults else None
def get_defaults(self, cr, uid, model, condition=False):
"""Returns any default values that are defined for the current model and user,
(and match ``condition``, if specified), previously registered via
:meth:`~.set_default`.
Defaults are global to a model, not field-specific, but an optional
``condition`` can be provided to restrict matching default values
to those that were defined for the same condition (usually based
on another field's value).
Default values also have priorities depending on whom they apply
to: only the highest priority value will be returned for any
field. See :meth:`~.set_default` for more details.
:param string model: model name
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: list of default values tuples of the form ``(id, field_name, value)``
(``id`` is the ID of the default entry, usually irrelevant)
"""
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
LEFT JOIN res_users u ON (v.user_id = u.id)
WHERE v.key = %%s AND v.model = %%s
AND (v.user_id = %%s OR v.user_id IS NULL)
AND (v.company_id IS NULL OR
v.company_id =
(SELECT company_id from res_users where id = %%s)
)
%s
ORDER BY v.user_id, u.company_id"""
params = ('default', model, uid, uid)
if condition:
query %= 'AND v.key2 = %s'
params += (condition[:200],)
else:
query %= 'AND v.key2 is NULL'
cr.execute(query, params)
# keep only the highest priority default for each field
defaults = {}
for row in cr.dictfetchall():
defaults.setdefault(row['name'],
(row['id'], row['name'], pickle.loads(row['value'].encode('utf-8'))))
return defaults.values()
# use ormcache: this is called a lot by BaseModel.default_get()!
@tools.ormcache(skiparg=2)
def get_defaults_dict(self, cr, uid, model, condition=False):
""" Returns a dictionary mapping field names with their corresponding
default value. This method simply improves the returned value of
:meth:`~.get_defaults`.
"""
return dict((f, v) for i, f, v in self.get_defaults(cr, uid, model, condition))
def set_action(self, cr, uid, name, action_slot, model, action, res_id=False):
"""Binds an the given action to the given model's action slot - for later
retrieval via :meth:`~.get_actions`. Any existing binding of the same action
to the same slot is first removed, allowing an update of the action's name.
See the class description for more details about the various action
slots: :class:`~ir_values`.
:param string name: action label, usually displayed by UI client
:param string action_slot: the action slot to which the action should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param string action: action reference, in the form ``'model,id'``
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: id of the newly created ir.values entry
"""
assert isinstance(action, basestring) and ',' in action, \
'Action definition must be an action reference, e.g. "ir.actions.act_window,42"'
assert action_slot in ACTION_SLOTS, \
'Action slot (%s) must be one of: %r' % (action_slot, ACTION_SLOTS)
# remove existing action definition of same slot and value
search_criteria = [
('key', '=', 'action'),
('key2', '=', action_slot),
('model', '=', model),
('res_id', '=', res_id or 0), # int field -> NULL == 0
('value', '=', action),
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'key': 'action',
'key2': action_slot,
'model': model,
'res_id': res_id,
'name': name,
'value': action,
})
def get_actions(self, cr, uid, action_slot, model, res_id=False, context=None):
"""Retrieves the list of actions bound to the given model's action slot.
See the class description for more details about the various action
slots: :class:`~.ir_values`.
:param string action_slot: the action slot to which the actions should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: list of action tuples of the form ``(id, name, action_def)``,
where ``id`` is the ID of the default entry, ``name`` is the
action label, and ``action_def`` is a dict containing the
action definition as obtained by calling
:meth:`~openerp.osv.osv.osv.read` on the action record.
"""
assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
WHERE v.key = %s AND v.key2 = %s
AND v.model = %s
AND (v.res_id = %s
OR v.res_id IS NULL
OR v.res_id = 0)
ORDER BY v.id"""
cr.execute(query, ('action', action_slot, model, res_id or None))
results = {}
for action in cr.dictfetchall():
if not action['value']:
continue # skip if undefined
action_model_name, action_id = action['value'].split(',')
if action_model_name not in self.pool:
continue # unknow model? skip it
action_model = self.pool[action_model_name]
fields = [field for field in action_model._fields if field not in EXCLUDED_FIELDS]
# FIXME: needs cleanup
try:
action_def = action_model.read(cr, uid, int(action_id), fields, context)
if action_def:
if action_model_name in ('ir.actions.report.xml', 'ir.actions.act_window'):
groups = action_def.get('groups_id')
if groups:
cr.execute('SELECT 1 FROM res_groups_users_rel WHERE gid IN %s AND uid=%s',
(tuple(groups), uid))
if not cr.fetchone():
if action['name'] == 'Menuitem':
raise AccessError(_('You do not have the permission to perform this operation!!!'))
continue
# keep only the first action registered for each action name
results[action['name']] = (action['id'], action['name'], action_def)
except AccessError:
continue
return sorted(results.values())
def _map_legacy_model_list(self, model_list, map_fn, merge_results=False):
"""Apply map_fn to the various models passed, according to
legacy way to specify models/records.
"""
assert isinstance(model_list, (list, tuple)), \
"model_list should be in the form [model,..] or [(model,res_id), ..]"
results = []
for model in model_list:
res_id = False
if isinstance(model, (list, tuple)):
model, res_id = model
result = map_fn(model, res_id)
# some of the functions return one result at a time (tuple or id)
# and some return a list of many of them - care for both
if merge_results:
results.extend(result)
else:
results.append(result)
return results
# Backards-compatibility adapter layer to retrofit into split API
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
"""Deprecated legacy method to set default values and bind actions to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.set_default`
(``key=='default'``) or :meth:`~.set_action` (``key == 'action'``).
:deprecated: As of v6.1, ``set_default()`` or ``set_action()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_set(model,res_id):
return self.set_default(cr, uid, model, field_name=name, value=value,
for_all_users=(not preserve_user), company_id=company,
condition=key2)
elif key == 'action':
def do_set(model,res_id):
return self.set_action(cr, uid, name, action_slot=key2, model=model, action=value, res_id=res_id)
return self._map_legacy_model_list(models, do_set)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
"""Deprecated legacy method to get the list of default values or actions bound to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.get_defaults`
(``key=='default'``) or :meth:`~.get_actions` (``key == 'action'``)
:deprecated: As of v6.1, ``get_defaults()`` or ``get_actions()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_get(model,res_id):
return self.get_defaults(cr, uid, model, condition=key2)
elif key == 'action':
def do_get(model,res_id):
return self.get_actions(cr, uid, action_slot=key2, model=model, res_id=res_id, context=context)
return self._map_legacy_model_list(models, do_get, merge_results=True)
| agpl-3.0 |
kittiu/odoo | openerp/addons/base/tests/test_view_validation.py | 396 | 3427 | # This test can be run stand-alone with something like:
# > PYTHONPATH=. python2 openerp/tests/test_view_validation.py
from lxml import etree
from StringIO import StringIO
import unittest2
from openerp.tools.view_validation import (valid_page_in_book, valid_att_in_form, valid_type_in_colspan,
valid_type_in_col, valid_att_in_field, valid_att_in_label,
valid_field_in_graph, valid_field_in_tree
)
invalid_form = etree.parse(StringIO('''\
<form>
<label></label>
<group>
<div>
<page></page>
<label colspan="True"></label>
<field></field>
</div>
</group>
<notebook>
<page>
<group col="Two">
<div>
<label></label>
<field colspan="Five"> </field>
</div>
</group>
</page>
</notebook>
</form>
''')).getroot()
valid_form = etree.parse(StringIO('''\
<form string="">
<field name=""></field>
<field name=""></field>
<notebook>
<page>
<field name=""></field>
<label string=""></label>
<field name=""></field>
</page>
<page>
<group colspan="5" col="2">
<label for=""></label>
<label string="" colspan="5"></label>
</group>
</page>
</notebook>
</form>
''')).getroot()
invalid_graph = etree.parse(StringIO('''\
<graph>
<label/>
<group>
<div>
<field></field>
<field></field>
</div>
</group>
</graph>
''')).getroot()
valid_graph = etree.parse(StringIO('''\
<graph string="">
<field name=""></field>
<field name=""></field>
</graph>
''')).getroot()
invalid_tree = etree.parse(StringIO('''\
<tree>
<group>
<div>
<field></field>
<field></field>
</div>
</group>
</tree>
''')).getroot()
valid_tree = etree.parse(StringIO('''\
<tree string="">
<field name=""></field>
<field name=""></field>
<button/>
<field name=""></field>
</tree>
''')).getroot()
class test_view_validation(unittest2.TestCase):
""" Test the view validation code (but not the views themselves). """
def test_page_validation(self):
assert not valid_page_in_book(invalid_form)
assert valid_page_in_book(valid_form)
def test_all_field_validation(self):
assert not valid_att_in_field(invalid_form)
assert valid_att_in_field(valid_form)
def test_all_label_validation(self):
assert not valid_att_in_label(invalid_form)
assert valid_att_in_label(valid_form)
def test_form_string_validation(self):
assert valid_att_in_form(valid_form)
def test_graph_validation(self):
assert not valid_field_in_graph(invalid_graph)
assert valid_field_in_graph(valid_graph)
def test_tree_validation(self):
assert not valid_field_in_tree(invalid_tree)
assert valid_field_in_tree(valid_tree)
def test_colspan_datatype_validation(self):
assert not valid_type_in_colspan(invalid_form)
assert valid_type_in_colspan(valid_form)
def test_col_datatype_validation(self):
assert not valid_type_in_col(invalid_form)
assert valid_type_in_col(valid_form)
if __name__ == '__main__':
unittest2.main()
| agpl-3.0 |
beniwohli/apm-agent-python | tests/instrumentation/urllib3_tests.py | 1 | 12732 | # BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import urllib3
from elasticapm.conf import constants
from elasticapm.conf.constants import TRANSACTION
from elasticapm.traces import capture_span
from elasticapm.utils.compat import urlparse
from elasticapm.utils.disttracing import TraceParent
def test_urllib3(instrument, elasticapm_client, waiting_httpserver):
waiting_httpserver.serve_content("")
url = waiting_httpserver.url + "/hello_world"
parsed_url = urlparse.urlparse(url)
elasticapm_client.begin_transaction("transaction")
expected_sig = "GET {0}".format(parsed_url.netloc)
with capture_span("test_name", "test_type"):
pool = urllib3.PoolManager(timeout=0.1)
url = "http://{0}/hello_world".format(parsed_url.netloc)
r = pool.request("GET", url)
elasticapm_client.end_transaction("MyView")
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
expected_signatures = {"test_name", expected_sig}
assert {t["name"] for t in spans} == expected_signatures
assert len(spans) == 2
assert spans[0]["name"] == expected_sig
assert spans[0]["type"] == "external"
assert spans[0]["subtype"] == "http"
assert spans[0]["context"]["http"]["url"] == url
assert spans[0]["context"]["http"]["status_code"] == 200
assert spans[0]["context"]["destination"]["service"] == {
"name": "http://127.0.0.1:%d" % parsed_url.port,
"resource": "127.0.0.1:%d" % parsed_url.port,
"type": "external",
}
assert spans[0]["parent_id"] == spans[1]["id"]
assert spans[0]["outcome"] == "success"
assert spans[1]["name"] == "test_name"
assert spans[1]["type"] == "test_type"
assert spans[1]["parent_id"] == transactions[0]["id"]
@pytest.mark.parametrize("status_code", [400, 500])
def test_urllib3_error(instrument, elasticapm_client, waiting_httpserver, status_code):
waiting_httpserver.serve_content("", code=status_code)
url = waiting_httpserver.url + "/hello_world"
parsed_url = urlparse.urlparse(url)
elasticapm_client.begin_transaction("transaction")
expected_sig = "GET {0}".format(parsed_url.netloc)
with capture_span("test_name", "test_type"):
pool = urllib3.PoolManager(timeout=0.1)
url = "http://{0}/hello_world".format(parsed_url.netloc)
r = pool.request("GET", url)
elasticapm_client.end_transaction("MyView")
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
assert spans[0]["name"] == expected_sig
assert spans[0]["type"] == "external"
assert spans[0]["subtype"] == "http"
assert spans[0]["context"]["http"]["url"] == url
assert spans[0]["context"]["http"]["status_code"] == status_code
assert spans[0]["context"]["destination"]["service"] == {
"name": "http://127.0.0.1:%d" % parsed_url.port,
"resource": "127.0.0.1:%d" % parsed_url.port,
"type": "external",
}
assert spans[0]["parent_id"] == spans[1]["id"]
assert spans[0]["outcome"] == "failure"
@pytest.mark.parametrize(
"elasticapm_client",
[
pytest.param({"use_elastic_traceparent_header": True}, id="use_elastic_traceparent_header-True"),
pytest.param({"use_elastic_traceparent_header": False}, id="use_elastic_traceparent_header-False"),
],
indirect=True,
)
def test_trace_parent_propagation_sampled(instrument, elasticapm_client, waiting_httpserver):
waiting_httpserver.serve_content("")
url = waiting_httpserver.url + "/hello_world"
elasticapm_client.begin_transaction("transaction")
pool = urllib3.PoolManager(timeout=0.1)
r = pool.request("GET", url)
elasticapm_client.end_transaction("MyView")
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
headers = waiting_httpserver.requests[0].headers
assert constants.TRACEPARENT_HEADER_NAME in headers
trace_parent = TraceParent.from_string(
headers[constants.TRACEPARENT_HEADER_NAME], tracestate_string=headers[constants.TRACESTATE_HEADER_NAME]
)
assert trace_parent.trace_id == transactions[0]["trace_id"]
assert trace_parent.span_id == spans[0]["id"]
assert trace_parent.trace_options.recorded
# Check that sample_rate was correctly placed in the tracestate
assert constants.TRACESTATE.SAMPLE_RATE in trace_parent.tracestate_dict
if elasticapm_client.config.use_elastic_traceparent_header:
assert constants.TRACEPARENT_LEGACY_HEADER_NAME in headers
assert headers[constants.TRACEPARENT_HEADER_NAME] == headers[constants.TRACEPARENT_LEGACY_HEADER_NAME]
else:
assert constants.TRACEPARENT_LEGACY_HEADER_NAME not in headers
@pytest.mark.parametrize(
"elasticapm_client",
[
pytest.param({"use_elastic_traceparent_header": True}, id="use_elastic_traceparent_header-True"),
pytest.param({"use_elastic_traceparent_header": False}, id="use_elastic_traceparent_header-False"),
],
indirect=True,
)
def test_trace_parent_propagation_unsampled(instrument, elasticapm_client, waiting_httpserver):
waiting_httpserver.serve_content("")
url = waiting_httpserver.url + "/hello_world"
transaction_object = elasticapm_client.begin_transaction("transaction")
transaction_object.is_sampled = False
pool = urllib3.PoolManager(timeout=0.1)
r = pool.request("GET", url)
elasticapm_client.end_transaction("MyView")
transactions = elasticapm_client.events[TRANSACTION]
spans = elasticapm_client.spans_for_transaction(transactions[0])
assert not spans
headers = waiting_httpserver.requests[0].headers
assert constants.TRACEPARENT_HEADER_NAME in headers
trace_parent = TraceParent.from_string(
headers[constants.TRACEPARENT_HEADER_NAME], tracestate_string=headers[constants.TRACESTATE_HEADER_NAME]
)
assert trace_parent.trace_id == transactions[0]["trace_id"]
assert trace_parent.span_id == transaction_object.id
assert not trace_parent.trace_options.recorded
# Check that sample_rate was correctly placed in the tracestate
assert constants.TRACESTATE.SAMPLE_RATE in trace_parent.tracestate_dict
if elasticapm_client.config.use_elastic_traceparent_header:
assert constants.TRACEPARENT_LEGACY_HEADER_NAME in headers
assert headers[constants.TRACEPARENT_HEADER_NAME] == headers[constants.TRACEPARENT_LEGACY_HEADER_NAME]
else:
assert constants.TRACEPARENT_LEGACY_HEADER_NAME not in headers
@pytest.mark.parametrize(
"is_sampled", [pytest.param(True, id="is_sampled-True"), pytest.param(False, id="is_sampled-False")]
)
def test_tracestate_propagation(instrument, elasticapm_client, waiting_httpserver, is_sampled):
traceparent = TraceParent.from_string(
"00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-03", "foo=bar,baz=bazzinga"
)
waiting_httpserver.serve_content("")
url = waiting_httpserver.url + "/hello_world"
transaction_object = elasticapm_client.begin_transaction("transaction", trace_parent=traceparent)
transaction_object.is_sampled = is_sampled
pool = urllib3.PoolManager(timeout=0.1)
r = pool.request("GET", url)
elasticapm_client.end_transaction("MyView")
headers = waiting_httpserver.requests[0].headers
assert headers[constants.TRACESTATE_HEADER_NAME] == "foo=bar,baz=bazzinga"
@pytest.mark.parametrize("elasticapm_client", [{"transaction_max_spans": 1}], indirect=True)
def test_span_only_dropped(instrument, elasticapm_client, waiting_httpserver):
"""test that urllib3 instrumentation does not fail if no parent span can be found"""
waiting_httpserver.serve_content("")
url = waiting_httpserver.url + "/hello_world"
transaction_object = elasticapm_client.begin_transaction("transaction")
for i in range(2):
with capture_span("test", "test"):
pool = urllib3.PoolManager(timeout=0.1)
pool.request("GET", url)
elasticapm_client.end_transaction("bla", "OK")
trace_parent_1 = TraceParent.from_string(waiting_httpserver.requests[0].headers[constants.TRACEPARENT_HEADER_NAME])
trace_parent_2 = TraceParent.from_string(waiting_httpserver.requests[1].headers[constants.TRACEPARENT_HEADER_NAME])
assert trace_parent_1.span_id != transaction_object.id
# second request should use transaction id as span id because there is no span
assert trace_parent_2.span_id == transaction_object.id
def test_url_sanitization(instrument, elasticapm_client, waiting_httpserver):
waiting_httpserver.serve_content("")
url = waiting_httpserver.url + "/hello_world"
url = url.replace("http://", "http://user:pass@")
transaction_object = elasticapm_client.begin_transaction("transaction")
pool = urllib3.PoolManager(timeout=0.1)
r = pool.request("GET", url)
elasticapm_client.end_transaction("MyView")
transactions = elasticapm_client.events[TRANSACTION]
span = elasticapm_client.spans_for_transaction(transactions[0])[0]
assert "pass" not in span["context"]["http"]["url"]
@pytest.mark.parametrize(
"is_sampled", [pytest.param(True, id="is_sampled-True"), pytest.param(False, id="is_sampled-False")]
)
@pytest.mark.parametrize(
"instance_headers",
[pytest.param(True, id="instance-headers-set"), pytest.param(False, id="instance-headers-not-set")],
)
@pytest.mark.parametrize(
"header_arg,header_kwarg",
[
pytest.param(True, False, id="args-set"),
pytest.param(False, True, id="kwargs-set"),
pytest.param(False, False, id="both-not-set"),
],
)
def test_instance_headers_are_respected(
instrument, elasticapm_client, waiting_httpserver, is_sampled, instance_headers, header_arg, header_kwarg
):
traceparent = TraceParent.from_string(
"00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-03", "foo=bar,baz=bazzinga"
)
waiting_httpserver.serve_content("")
url = waiting_httpserver.url + "/hello_world"
parsed_url = urlparse.urlparse(url)
transaction_object = elasticapm_client.begin_transaction("transaction", trace_parent=traceparent)
transaction_object.is_sampled = is_sampled
pool = urllib3.HTTPConnectionPool(
parsed_url.hostname,
parsed_url.port,
maxsize=1,
block=True,
headers={"instance": "true"} if instance_headers else None,
)
if header_arg:
args = ("GET", url, None, {"args": "true"})
else:
args = ("GET", url)
if header_kwarg:
kwargs = {"headers": {"kwargs": "true"}}
else:
kwargs = {}
r = pool.urlopen(*args, **kwargs)
request_headers = waiting_httpserver.requests[0].headers
# all combinations should have the "traceparent" header
assert "traceparent" in request_headers, (instance_headers, header_arg, header_kwarg)
if header_arg:
assert "args" in request_headers
if header_kwarg:
assert "kwargs" in request_headers
if instance_headers and not (header_arg or header_kwarg):
assert "instance" in request_headers
| bsd-3-clause |
MrLoick/python-for-android | python-modules/twisted/twisted/persisted/aot.py | 60 | 17451 | # -*- test-case-name: twisted.test.test_persisted -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
AOT: Abstract Object Trees
The source-code-marshallin'est abstract-object-serializin'est persister
this side of Marmalade!
"""
import types, new, string, copy_reg, tokenize, re
from twisted.python import reflect, log
from twisted.persisted import crefutil
###########################
# Abstract Object Classes #
###########################
#"\0" in a getSource means "insert variable-width indention here".
#see `indentify'.
class Named:
def __init__(self, name):
self.name = name
class Class(Named):
def getSource(self):
return "Class(%r)" % self.name
class Function(Named):
def getSource(self):
return "Function(%r)" % self.name
class Module(Named):
def getSource(self):
return "Module(%r)" % self.name
class InstanceMethod:
def __init__(self, name, klass, inst):
if not (isinstance(inst, Ref) or isinstance(inst, Instance) or isinstance(inst, Deref)):
raise TypeError("%s isn't an Instance, Ref, or Deref!" % inst)
self.name = name
self.klass = klass
self.instance = inst
def getSource(self):
return "InstanceMethod(%r, %r, \n\0%s)" % (self.name, self.klass, prettify(self.instance))
class _NoStateObj:
pass
NoStateObj = _NoStateObj()
_SIMPLE_BUILTINS = [
types.StringType, types.UnicodeType, types.IntType, types.FloatType,
types.ComplexType, types.LongType, types.NoneType, types.SliceType,
types.EllipsisType]
try:
_SIMPLE_BUILTINS.append(types.BooleanType)
except AttributeError:
pass
class Instance:
def __init__(self, className, __stateObj__=NoStateObj, **state):
if not isinstance(className, types.StringType):
raise TypeError("%s isn't a string!" % className)
self.klass = className
if __stateObj__ is not NoStateObj:
self.state = __stateObj__
self.stateIsDict = 0
else:
self.state = state
self.stateIsDict = 1
def getSource(self):
#XXX make state be foo=bar instead of a dict.
if self.stateIsDict:
stateDict = self.state
elif isinstance(self.state, Ref) and isinstance(self.state.obj, types.DictType):
stateDict = self.state.obj
else:
stateDict = None
if stateDict is not None:
try:
return "Instance(%r, %s)" % (self.klass, dictToKW(stateDict))
except NonFormattableDict:
return "Instance(%r, %s)" % (self.klass, prettify(stateDict))
return "Instance(%r, %s)" % (self.klass, prettify(self.state))
class Ref:
def __init__(self, *args):
#blargh, lame.
if len(args) == 2:
self.refnum = args[0]
self.obj = args[1]
elif not args:
self.refnum = None
self.obj = None
def setRef(self, num):
if self.refnum:
raise ValueError("Error setting id %s, I already have %s" % (num, self.refnum))
self.refnum = num
def setObj(self, obj):
if self.obj:
raise ValueError("Error setting obj %s, I already have %s" % (obj, self.obj))
self.obj = obj
def getSource(self):
if self.obj is None:
raise RuntimeError("Don't try to display me before setting an object on me!")
if self.refnum:
return "Ref(%d, \n\0%s)" % (self.refnum, prettify(self.obj))
return prettify(self.obj)
class Deref:
def __init__(self, num):
self.refnum = num
def getSource(self):
return "Deref(%d)" % self.refnum
__repr__ = getSource
class Copyreg:
def __init__(self, loadfunc, state):
self.loadfunc = loadfunc
self.state = state
def getSource(self):
return "Copyreg(%r, %s)" % (self.loadfunc, prettify(self.state))
###############
# Marshalling #
###############
def getSource(ao):
"""Pass me an AO, I'll return a nicely-formatted source representation."""
return indentify("app = " + prettify(ao))
class NonFormattableDict(Exception):
"""A dictionary was not formattable.
"""
r = re.compile('[a-zA-Z_][a-zA-Z0-9_]*$')
def dictToKW(d):
out = []
items = d.items()
items.sort()
for k,v in items:
if not isinstance(k, types.StringType):
raise NonFormattableDict("%r ain't a string" % k)
if not r.match(k):
raise NonFormattableDict("%r ain't an identifier" % k)
out.append(
"\n\0%s=%s," % (k, prettify(v))
)
return string.join(out, '')
def prettify(obj):
if hasattr(obj, 'getSource'):
return obj.getSource()
else:
#basic type
t = type(obj)
if t in _SIMPLE_BUILTINS:
return repr(obj)
elif t is types.DictType:
out = ['{']
for k,v in obj.items():
out.append('\n\0%s: %s,' % (prettify(k), prettify(v)))
out.append(len(obj) and '\n\0}' or '}')
return string.join(out, '')
elif t is types.ListType:
out = ["["]
for x in obj:
out.append('\n\0%s,' % prettify(x))
out.append(len(obj) and '\n\0]' or ']')
return string.join(out, '')
elif t is types.TupleType:
out = ["("]
for x in obj:
out.append('\n\0%s,' % prettify(x))
out.append(len(obj) and '\n\0)' or ')')
return string.join(out, '')
else:
raise TypeError("Unsupported type %s when trying to prettify %s." % (t, obj))
def indentify(s):
out = []
stack = []
def eater(type, val, r, c, l, out=out, stack=stack):
#import sys
#sys.stdout.write(val)
if val in ['[', '(', '{']:
stack.append(val)
elif val in [']', ')', '}']:
stack.pop()
if val == '\0':
out.append(' '*len(stack))
else:
out.append(val)
l = ['', s]
tokenize.tokenize(l.pop, eater)
return string.join(out, '')
###########
# Unjelly #
###########
def unjellyFromAOT(aot):
"""
Pass me an Abstract Object Tree, and I'll unjelly it for you.
"""
return AOTUnjellier().unjelly(aot)
def unjellyFromSource(stringOrFile):
"""
Pass me a string of code or a filename that defines an 'app' variable (in
terms of Abstract Objects!), and I'll execute it and unjelly the resulting
AOT for you, returning a newly unpersisted Application object!
"""
ns = {"Instance": Instance,
"InstanceMethod": InstanceMethod,
"Class": Class,
"Function": Function,
"Module": Module,
"Ref": Ref,
"Deref": Deref,
"Copyreg": Copyreg,
}
if hasattr(stringOrFile, "read"):
exec stringOrFile.read() in ns
else:
exec stringOrFile in ns
if ns.has_key('app'):
return unjellyFromAOT(ns['app'])
else:
raise ValueError("%s needs to define an 'app', it didn't!" % stringOrFile)
class AOTUnjellier:
"""I handle the unjellying of an Abstract Object Tree.
See AOTUnjellier.unjellyAO
"""
def __init__(self):
self.references = {}
self.stack = []
self.afterUnjelly = []
##
# unjelly helpers (copied pretty much directly from (now deleted) marmalade)
##
def unjellyLater(self, node):
"""Unjelly a node, later.
"""
d = crefutil._Defer()
self.unjellyInto(d, 0, node)
return d
def unjellyInto(self, obj, loc, ao):
"""Utility method for unjellying one object into another.
This automates the handling of backreferences.
"""
o = self.unjellyAO(ao)
obj[loc] = o
if isinstance(o, crefutil.NotKnown):
o.addDependant(obj, loc)
return o
def callAfter(self, callable, result):
if isinstance(result, crefutil.NotKnown):
l = [None]
result.addDependant(l, 1)
else:
l = [result]
self.afterUnjelly.append((callable, l))
def unjellyAttribute(self, instance, attrName, ao):
#XXX this is unused????
"""Utility method for unjellying into instances of attributes.
Use this rather than unjellyAO unless you like surprising bugs!
Alternatively, you can use unjellyInto on your instance's __dict__.
"""
self.unjellyInto(instance.__dict__, attrName, ao)
def unjellyAO(self, ao):
"""Unjelly an Abstract Object and everything it contains.
I return the real object.
"""
self.stack.append(ao)
t = type(ao)
if t is types.InstanceType:
#Abstract Objects
c = ao.__class__
if c is Module:
return reflect.namedModule(ao.name)
elif c in [Class, Function] or issubclass(c, type):
return reflect.namedObject(ao.name)
elif c is InstanceMethod:
im_name = ao.name
im_class = reflect.namedObject(ao.klass)
im_self = self.unjellyAO(ao.instance)
if im_name in im_class.__dict__:
if im_self is None:
return getattr(im_class, im_name)
elif isinstance(im_self, crefutil.NotKnown):
return crefutil._InstanceMethod(im_name, im_self, im_class)
else:
return new.instancemethod(im_class.__dict__[im_name],
im_self,
im_class)
else:
raise TypeError("instance method changed")
elif c is Instance:
klass = reflect.namedObject(ao.klass)
state = self.unjellyAO(ao.state)
if hasattr(klass, "__setstate__"):
inst = new.instance(klass, {})
self.callAfter(inst.__setstate__, state)
else:
inst = new.instance(klass, state)
return inst
elif c is Ref:
o = self.unjellyAO(ao.obj) #THIS IS CHANGING THE REF OMG
refkey = ao.refnum
ref = self.references.get(refkey)
if ref is None:
self.references[refkey] = o
elif isinstance(ref, crefutil.NotKnown):
ref.resolveDependants(o)
self.references[refkey] = o
elif refkey is None:
# This happens when you're unjellying from an AOT not read from source
pass
else:
raise ValueError("Multiple references with the same ID: %s, %s, %s!" % (ref, refkey, ao))
return o
elif c is Deref:
num = ao.refnum
ref = self.references.get(num)
if ref is None:
der = crefutil._Dereference(num)
self.references[num] = der
return der
return ref
elif c is Copyreg:
loadfunc = reflect.namedObject(ao.loadfunc)
d = self.unjellyLater(ao.state).addCallback(
lambda result, _l: apply(_l, result), loadfunc)
return d
#Types
elif t in _SIMPLE_BUILTINS:
return ao
elif t is types.ListType:
l = []
for x in ao:
l.append(None)
self.unjellyInto(l, len(l)-1, x)
return l
elif t is types.TupleType:
l = []
tuple_ = tuple
for x in ao:
l.append(None)
if isinstance(self.unjellyInto(l, len(l)-1, x), crefutil.NotKnown):
tuple_ = crefutil._Tuple
return tuple_(l)
elif t is types.DictType:
d = {}
for k,v in ao.items():
kvd = crefutil._DictKeyAndValue(d)
self.unjellyInto(kvd, 0, k)
self.unjellyInto(kvd, 1, v)
return d
else:
raise TypeError("Unsupported AOT type: %s" % t)
del self.stack[-1]
def unjelly(self, ao):
try:
l = [None]
self.unjellyInto(l, 0, ao)
for callable, v in self.afterUnjelly:
callable(v[0])
return l[0]
except:
log.msg("Error jellying object! Stacktrace follows::")
log.msg(string.join(map(repr, self.stack), "\n"))
raise
#########
# Jelly #
#########
def jellyToAOT(obj):
"""Convert an object to an Abstract Object Tree."""
return AOTJellier().jelly(obj)
def jellyToSource(obj, file=None):
"""
Pass me an object and, optionally, a file object.
I'll convert the object to an AOT either return it (if no file was
specified) or write it to the file.
"""
aot = jellyToAOT(obj)
if file:
file.write(getSource(aot))
else:
return getSource(aot)
class AOTJellier:
def __init__(self):
# dict of {id(obj): (obj, node)}
self.prepared = {}
self._ref_id = 0
self.stack = []
def prepareForRef(self, aoref, object):
"""I prepare an object for later referencing, by storing its id()
and its _AORef in a cache."""
self.prepared[id(object)] = aoref
def jellyToAO(self, obj):
"""I turn an object into an AOT and return it."""
objType = type(obj)
self.stack.append(repr(obj))
#immutable: We don't care if these have multiple refs!
if objType in _SIMPLE_BUILTINS:
retval = obj
elif objType is types.MethodType:
# TODO: make methods 'prefer' not to jelly the object internally,
# so that the object will show up where it's referenced first NOT
# by a method.
retval = InstanceMethod(obj.im_func.__name__, reflect.qual(obj.im_class),
self.jellyToAO(obj.im_self))
elif objType is types.ModuleType:
retval = Module(obj.__name__)
elif objType is types.ClassType:
retval = Class(reflect.qual(obj))
elif issubclass(objType, type):
retval = Class(reflect.qual(obj))
elif objType is types.FunctionType:
retval = Function(reflect.fullFuncName(obj))
else: #mutable! gotta watch for refs.
#Marmalade had the nicety of being able to just stick a 'reference' attribute
#on any Node object that was referenced, but in AOT, the referenced object
#is *inside* of a Ref call (Ref(num, obj) instead of
#<objtype ... reference="1">). The problem is, especially for built-in types,
#I can't just assign some attribute to them to give them a refnum. So, I have
#to "wrap" a Ref(..) around them later -- that's why I put *everything* that's
#mutable inside one. The Ref() class will only print the "Ref(..)" around an
#object if it has a Reference explicitly attached.
if self.prepared.has_key(id(obj)):
oldRef = self.prepared[id(obj)]
if oldRef.refnum:
# it's been referenced already
key = oldRef.refnum
else:
# it hasn't been referenced yet
self._ref_id = self._ref_id + 1
key = self._ref_id
oldRef.setRef(key)
return Deref(key)
retval = Ref()
self.prepareForRef(retval, obj)
if objType is types.ListType:
retval.setObj(map(self.jellyToAO, obj)) #hah!
elif objType is types.TupleType:
retval.setObj(tuple(map(self.jellyToAO, obj)))
elif objType is types.DictionaryType:
d = {}
for k,v in obj.items():
d[self.jellyToAO(k)] = self.jellyToAO(v)
retval.setObj(d)
elif objType is types.InstanceType:
if hasattr(obj, "__getstate__"):
state = self.jellyToAO(obj.__getstate__())
else:
state = self.jellyToAO(obj.__dict__)
retval.setObj(Instance(reflect.qual(obj.__class__), state))
elif copy_reg.dispatch_table.has_key(objType):
unpickleFunc, state = copy_reg.dispatch_table[objType](obj)
retval.setObj(Copyreg( reflect.fullFuncName(unpickleFunc),
self.jellyToAO(state)))
else:
raise TypeError("Unsupported type: %s" % objType.__name__)
del self.stack[-1]
return retval
def jelly(self, obj):
try:
ao = self.jellyToAO(obj)
return ao
except:
log.msg("Error jellying object! Stacktrace follows::")
log.msg(string.join(self.stack, '\n'))
raise
| apache-2.0 |
XXMrHyde/android_external_chromium_org | chrome/test/functional/chromeos_onc.py | 35 | 4836 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pyauto_functional # must come before pyauto.
import policy_base
import pyauto
class ChromeosONC(policy_base.PolicyTestBase):
"""
Tests for Open Network Configuration (ONC).
Open Network Configuration (ONC) files is a json dictionary
that contains network configurations and is pulled via policies.
These tests verify that ONC files that are formatted correctly
add the network/certificate to the device.
"""
ONC_PATH = os.path.join(pyauto.PyUITest.ChromeOSDataDir(), 'network')
def setUp(self):
self.CleanupFlimflamDirsOnChromeOS()
policy_base.PolicyTestBase.setUp(self)
self.LoginWithTestAccount()
def _ReadONCFileAndSet(self, filename):
"""Reads the specified ONC file and sends it as a policy.
Inputs:
filename: The filename of the ONC file. ONC files should
all be stored in the path defined by ONC_PATH.
"""
with open(os.path.join(self.ONC_PATH, filename)) as fp:
self.SetUserPolicy({'OpenNetworkConfiguration': fp.read()})
def _VerifyRememberedWifiNetworks(self, wifi_expect):
"""Verify the list of remembered networks contains those in wifi_expect.
Inputs:
wifi_expect: A dictionary of wifi networks where the key is the ssid
and the value is the encryption type of the network.
"""
# Sometimes there is a race condition where upon restarting chrome
# NetworkScan has not populated the network lists yet. We should
# scan until the device is online.
self.WaitUntil(lambda: not self.NetworkScan().get('offline_mode', True))
networks = self.NetworkScan()
# Temprorary dictionary to keep track of which wifi networks
# have been visited by removing them as we see them.
wifi_expect_temp = dict(wifi_expect)
for service, wifi_dict in networks['remembered_wifi'].iteritems():
if isinstance(wifi_dict, dict) and \
'encryption' in wifi_dict and \
'name' in wifi_dict:
msg = ('Wifi network %s was in the remembered_network list but '
'shouldn\'t be.' % wifi_dict['name'])
# wifi_dict['encryption'] will always be a string and not None.
self.assertTrue(wifi_expect.get(wifi_dict['name'], None) ==
wifi_dict['encryption'], msg)
del wifi_expect_temp[wifi_dict['name']]
# Error if wifi_expect_temp is not empty.
self.assertFalse(wifi_expect_temp, 'The following networks '
'were not remembered: %s' % self.pformat(wifi_expect_temp))
def testONCAddOpenWifi(self):
"""Test adding open network."""
wifi_networks = {
'ssid-none': '',
}
self._ReadONCFileAndSet('toplevel_wifi_open.onc')
self._VerifyRememberedWifiNetworks(wifi_networks)
def testONCAddWEPWifi(self):
"""Test adding WEP network."""
wifi_networks = {
'ssid-wep': 'WEP',
}
self._ReadONCFileAndSet('toplevel_wifi_wep_proxy.onc')
self._VerifyRememberedWifiNetworks(wifi_networks)
def testONCAddPSKWifi(self):
"""Test adding WPA network."""
wifi_networks = {
'ssid-wpa': 'WPA',
}
self._ReadONCFileAndSet('toplevel_wifi_wpa_psk.onc')
self._VerifyRememberedWifiNetworks(wifi_networks)
def testAddBacktoBackONC(self):
"""Test adding three different ONC files one after the other."""
test_dict = {
'toplevel_wifi_open.onc': { 'ssid-none': '' },
'toplevel_wifi_wep_proxy.onc': { 'ssid-wep': 'WEP' },
'toplevel_wifi_wpa_psk.onc': { 'ssid-wpa': 'WPA' },
}
for onc, wifi_networks in test_dict.iteritems():
self._ReadONCFileAndSet(onc)
self._VerifyRememberedWifiNetworks(wifi_networks)
def testAddBacktoBackONC2(self):
"""Test adding three different ONC files one after the other.
Due to inconsistent behaviors as addressed in crosbug.com/27862
this test does not perform a network scan/verification between
the setting of policies.
"""
wifi_networks = {
'ssid-wpa': 'WPA',
}
self._ReadONCFileAndSet('toplevel_wifi_open.onc')
self._ReadONCFileAndSet('toplevel_wifi_wep_proxy.onc')
self._ReadONCFileAndSet('toplevel_wifi_wpa_psk.onc')
# Verify that only the most recent onc is updated.
self._VerifyRememberedWifiNetworks(wifi_networks)
def testAddONCWithUnknownFields(self):
"""Test adding an ONC file with unknown fields."""
wifi_networks = {
'ssid-none': '',
'ssid-wpa': 'WPA'
}
self._ReadONCFileAndSet('toplevel_with_unknown_fields.onc')
self._VerifyRememberedWifiNetworks(wifi_networks)
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause |
mhdella/scikit-learn | sklearn/utils/class_weight.py | 140 | 7206 | # Authors: Andreas Mueller
# Manoj Kumar
# License: BSD 3 clause
import warnings
import numpy as np
from ..externals import six
from ..utils.fixes import in1d
from .fixes import bincount
def compute_class_weight(class_weight, classes, y):
"""Estimate class weights for unbalanced datasets.
Parameters
----------
class_weight : dict, 'balanced' or None
If 'balanced', class weights will be given by
``n_samples / (n_classes * np.bincount(y))``.
If a dictionary is given, keys are classes and values
are corresponding class weights.
If None is given, the class weights will be uniform.
classes : ndarray
Array of the classes occurring in the data, as given by
``np.unique(y_org)`` with ``y_org`` the original class labels.
y : array-like, shape (n_samples,)
Array of original class labels per sample;
Returns
-------
class_weight_vect : ndarray, shape (n_classes,)
Array with class_weight_vect[i] the weight for i-th class
References
----------
The "balanced" heuristic is inspired by
Logistic Regression in Rare Events Data, King, Zen, 2001.
"""
# Import error caused by circular imports.
from ..preprocessing import LabelEncoder
if class_weight is None or len(class_weight) == 0:
# uniform class weights
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
elif class_weight in ['auto', 'balanced']:
# Find the weight of each class as present in y.
le = LabelEncoder()
y_ind = le.fit_transform(y)
if not all(np.in1d(classes, le.classes_)):
raise ValueError("classes should have valid labels that are in y")
# inversely proportional to the number of samples in the class
if class_weight == 'auto':
recip_freq = 1. / bincount(y_ind)
weight = recip_freq[le.transform(classes)] / np.mean(recip_freq)
warnings.warn("The class_weight='auto' heuristic is deprecated in"
" favor of a new heuristic class_weight='balanced'."
" 'auto' will be removed in 0.18", DeprecationWarning)
else:
recip_freq = len(y) / (len(le.classes_) *
bincount(y_ind).astype(np.float64))
weight = recip_freq[le.transform(classes)]
else:
# user-defined dictionary
weight = np.ones(classes.shape[0], dtype=np.float64, order='C')
if not isinstance(class_weight, dict):
raise ValueError("class_weight must be dict, 'auto', or None,"
" got: %r" % class_weight)
for c in class_weight:
i = np.searchsorted(classes, c)
if classes[i] != c:
raise ValueError("Class label %d not present." % c)
else:
weight[i] = class_weight[c]
return weight
def compute_sample_weight(class_weight, y, indices=None):
"""Estimate sample weights by class for unbalanced datasets.
Parameters
----------
class_weight : dict, list of dicts, "balanced", or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data:
``n_samples / (n_classes * np.bincount(y))``.
For multi-output, the weights of each column of y will be multiplied.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Array of original class labels per sample.
indices : array-like, shape (n_subsample,), or None
Array of indices to be used in a subsample. Can be of length less than
n_samples in the case of a subsample, or equal to n_samples in the
case of a bootstrap subsample with repeated indices. If None, the
sample weight will be calculated over the full sample. Only "auto" is
supported for class_weight if this is provided.
Returns
-------
sample_weight_vect : ndarray, shape (n_samples,)
Array with sample weights as applied to the original y
"""
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
if isinstance(class_weight, six.string_types):
if class_weight not in ['balanced', 'auto']:
raise ValueError('The only valid preset for class_weight is '
'"balanced". Given "%s".' % class_weight)
elif (indices is not None and
not isinstance(class_weight, six.string_types)):
raise ValueError('The only valid class_weight for subsampling is '
'"balanced". Given "%s".' % class_weight)
elif n_outputs > 1:
if (not hasattr(class_weight, "__iter__") or
isinstance(class_weight, dict)):
raise ValueError("For multi-output, class_weight should be a "
"list of dicts, or a valid string.")
if len(class_weight) != n_outputs:
raise ValueError("For multi-output, number of elements in "
"class_weight should match number of outputs.")
expanded_class_weight = []
for k in range(n_outputs):
y_full = y[:, k]
classes_full = np.unique(y_full)
classes_missing = None
if class_weight in ['balanced', 'auto'] or n_outputs == 1:
class_weight_k = class_weight
else:
class_weight_k = class_weight[k]
if indices is not None:
# Get class weights for the subsample, covering all classes in
# case some labels that were present in the original data are
# missing from the sample.
y_subsample = y[indices, k]
classes_subsample = np.unique(y_subsample)
weight_k = np.choose(np.searchsorted(classes_subsample,
classes_full),
compute_class_weight(class_weight_k,
classes_subsample,
y_subsample),
mode='clip')
classes_missing = set(classes_full) - set(classes_subsample)
else:
weight_k = compute_class_weight(class_weight_k,
classes_full,
y_full)
weight_k = weight_k[np.searchsorted(classes_full, y_full)]
if classes_missing:
# Make missing classes' weight zero
weight_k[in1d(y_full, list(classes_missing))] = 0.
expanded_class_weight.append(weight_k)
expanded_class_weight = np.prod(expanded_class_weight,
axis=0,
dtype=np.float64)
return expanded_class_weight
| bsd-3-clause |
smalls257/VRvisu | Library/External.LCA_RESTRICTED/Languages/CPython/27/Lib/idlelib/FileList.py | 123 | 3656 | import os
from Tkinter import *
import tkMessageBox
class FileList:
# N.B. this import overridden in PyShellFileList.
from idlelib.EditorWindow import EditorWindow
def __init__(self, root):
self.root = root
self.dict = {}
self.inversedict = {}
self.vars = {} # For EditorWindow.getrawvar (shared Tcl variables)
def open(self, filename, action=None):
assert filename
filename = self.canonize(filename)
if os.path.isdir(filename):
# This can happen when bad filename is passed on command line:
tkMessageBox.showerror(
"File Error",
"%r is a directory." % (filename,),
master=self.root)
return None
key = os.path.normcase(filename)
if key in self.dict:
edit = self.dict[key]
edit.top.wakeup()
return edit
if action:
# Don't create window, perform 'action', e.g. open in same window
return action(filename)
else:
return self.EditorWindow(self, filename, key)
def gotofileline(self, filename, lineno=None):
edit = self.open(filename)
if edit is not None and lineno is not None:
edit.gotoline(lineno)
def new(self, filename=None):
return self.EditorWindow(self, filename)
def close_all_callback(self, *args, **kwds):
for edit in self.inversedict.keys():
reply = edit.close()
if reply == "cancel":
break
return "break"
def unregister_maybe_terminate(self, edit):
try:
key = self.inversedict[edit]
except KeyError:
print "Don't know this EditorWindow object. (close)"
return
if key:
del self.dict[key]
del self.inversedict[edit]
if not self.inversedict:
self.root.quit()
def filename_changed_edit(self, edit):
edit.saved_change_hook()
try:
key = self.inversedict[edit]
except KeyError:
print "Don't know this EditorWindow object. (rename)"
return
filename = edit.io.filename
if not filename:
if key:
del self.dict[key]
self.inversedict[edit] = None
return
filename = self.canonize(filename)
newkey = os.path.normcase(filename)
if newkey == key:
return
if newkey in self.dict:
conflict = self.dict[newkey]
self.inversedict[conflict] = None
tkMessageBox.showerror(
"Name Conflict",
"You now have multiple edit windows open for %r" % (filename,),
master=self.root)
self.dict[newkey] = edit
self.inversedict[edit] = newkey
if key:
try:
del self.dict[key]
except KeyError:
pass
def canonize(self, filename):
if not os.path.isabs(filename):
try:
pwd = os.getcwd()
except os.error:
pass
else:
filename = os.path.join(pwd, filename)
return os.path.normpath(filename)
def _test():
from idlelib.EditorWindow import fixwordbreaks
import sys
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = FileList(root)
if sys.argv[1:]:
for filename in sys.argv[1:]:
flist.open(filename)
else:
flist.new()
if flist.inversedict:
root.mainloop()
if __name__ == '__main__':
_test()
| gpl-3.0 |
DeltaEpsilon-HackFMI2/FMICalendar-REST | venv/lib/python2.7/site-packages/django/utils/unittest/result.py | 570 | 6105 | """Test result object"""
import sys
import traceback
import unittest
from StringIO import StringIO
from django.utils.unittest import util
from django.utils.unittest.compatibility import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(unittest.TestResult):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_moduleSetUpFailed = False
def __init__(self):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
self._mirrorOutput = False
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return (len(self.failures) + len(self.errors) == 0)
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures))
| mit |
demisto/content | Packs/Netcraft/Integrations/Netcraft/Netcraft.py | 1 | 20261 |
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
from requests.auth import HTTPBasicAuth
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
USERNAME = demisto.params()['credentials']['identifier']
PASSWORD = demisto.params()['credentials']['password']
LIMIT = int(demisto.params().get('limit'))
USE_SSL = not demisto.params().get('unsecure', False)
# Service base URL
BASE_URL = "https://takedown.netcraft.com/"
# codes for maicious site report
MALICIOUS_REPORT_SUCCESS = "TD_OK"
MALICIOUS_REPORT_ALREADY_EXISTS = "TD_EXISTS"
MALICIOUS_REPORT_URL_IS_WILDCARD = "TD_WILDCARD"
MALICIOUS_REPORT_ACCESS_DENIED = "TD_DENIED"
MALICIOUS_REPORT_ERROR = "TD_ERROR"
# suffix endpoints
REPORT_MALICIOUS_SUFFIX = "authorise.php"
GET_TAKEDOWN_INFO_SUFFIX = "apis/get-info.php"
ACCESS_TAKEDOWN_NOTES_SUFFIX = "apis/note.php"
ESCALATE_TAKEDOWN_SUFFIX = "apis/escalate.php"
TEST_MODULE_SUFFIX = "authorise-test.php"
# Table Headers
TAKEDOWN_INFO_HEADER = ["ID", "Status", "Attack Type", "Date Submitted", "Last Updated", "Reporter", "Group ID",
"Region", "Evidence URL", "Attack URL", "IP", "Domain", "Hostname", "Country Code",
"Domain Attack", "Targeted URL", "Certificate"]
TAKEDOWN_NOTE_HEADERS = ["Takedown ID", "Note ID", "Note", "Author", "Time", "Group ID"]
# Titles for human readables
TAKEDOWN_INFO_TITLE = "Takedowns information found:"
REPORT_MALICIOUS_SUCCESS_TITLE = "New takedown successfully created"
''' HELPER FUNCTIONS '''
@logger
def http_request(method, request_suffix, params=None, data=None, should_convert_to_json=True):
# A wrapper for requests lib to send our requests and handle requests and responses better
# the Netcraft API gets the arguments as params for GET requests, as data for POST
res = requests.request(
method,
BASE_URL + request_suffix,
verify=USE_SSL,
params=params,
data=data,
auth=HTTPBasicAuth(USERNAME, PASSWORD)
)
if should_convert_to_json:
return res.json()
else:
return res.text.splitlines()
@logger
def filter_by_id(result_list_to_filter, filtering_id_field, desired_id):
""" Given a list of results, returns only the ones that are tied to a given ID.
Args:
result_list_to_filter (list): list of dictionaries, containing data about entries.
filtering_id_field: The name of the field containing the IDs to filter.
desired_id: The ID to keep when filtering.
Returns:
list: A copy of the input list, containing only entries with the desired ID.
"""
new_results_list = [result for result in result_list_to_filter if result[filtering_id_field] == desired_id]
return new_results_list
@logger
def generate_report_malicious_site_human_readable(response_lines_array):
response_status_code = response_lines_array[0]
human_readable = ""
if response_status_code == MALICIOUS_REPORT_ALREADY_EXISTS:
human_readable = "### Takedown not submitted.\n " \
"A takedown for this URL already exists.\n" \
"ID number of the existing takedown: {}.".format(response_lines_array[1])
elif response_status_code == MALICIOUS_REPORT_URL_IS_WILDCARD:
human_readable = "### Takedown not submitted\n " \
"This URL is a wildcard sub-domain variation of an existing takedown.\n"
elif response_status_code == MALICIOUS_REPORT_ACCESS_DENIED:
human_readable = "### Takedown not submitted\n Access is denied."
elif response_status_code == MALICIOUS_REPORT_ERROR:
human_readable = "### Takedown not submitted\n " \
"An error has occurred while submitting your takedown.\n" \
"Error is: {}".format(" ".join(response_lines_array))
return human_readable
@logger
def return_dict_without_none_values(dict_with_none_values):
""" Removes all keys from given dict which have None as a value.
Args:
dict_with_none_values (dict): dict which may include keys with None as their value.
Returns:
dict: A new copy of the input dictionary, from which all keys with None as a value were removed.
"""
new_dict = {key: dict_with_none_values[key] for key in dict_with_none_values if
dict_with_none_values[key] is not None}
return new_dict
@logger
def generate_takedown_info_context(takedown_info):
takedown_info_context = {
"ID": takedown_info.get("id"),
"GroupID": takedown_info.get("group_id"),
"Status": takedown_info.get("status"),
"AttackType": takedown_info.get("attack_type"),
"AttackURL": takedown_info.get("attack_url"),
"Region": takedown_info.get("region"),
"DateSubmitted": takedown_info.get("date_submitted"),
"LastUpdated": takedown_info.get("last_updated"),
"EvidenceURL": takedown_info.get("evidence_url"),
"Reporter": takedown_info.get("reporter"),
"IP": takedown_info.get("ip"),
"Domain": takedown_info.get("domain"),
"Hostname": takedown_info.get("hostname"),
"CountryCode": takedown_info.get("country_code"),
"DomainAttack": takedown_info.get("domain_attack"),
"TargetedURL": takedown_info.get("targeted_url"),
"Certificate": takedown_info.get("certificate")
}
return createContext(takedown_info_context, removeNull=True)
@logger
def gen_takedown_info_human_readable(list_of_takedowns_contexts, title=TAKEDOWN_INFO_TITLE):
contexts_in_human_readable_format = []
for takedown_info_context in list_of_takedowns_contexts:
human_readable_dict = {
"ID": takedown_info_context.get("ID"),
"Status": takedown_info_context.get("Status"),
"Attack Type": takedown_info_context.get("AttackType"),
"Date Submitted": takedown_info_context.get("DateSubmitted"),
"Last Updated": takedown_info_context.get("LastUpdated"),
"Reporter": takedown_info_context.get("Reporter"),
"Group ID": takedown_info_context.get("GroupID"),
"Region": takedown_info_context.get("Region"),
"Evidence URL": takedown_info_context.get("EvidenceURL"),
"Attack URL": takedown_info_context.get("AttackURL"),
"IP": takedown_info_context.get("IP"),
"Domain": takedown_info_context.get("Domain"),
"Hostname": takedown_info_context.get("Hostname"),
"Country Code": takedown_info_context.get("CountryCode"),
"Domain Attack": takedown_info_context.get("DomainAttack"),
"Targeted URL": takedown_info_context.get("TargetedURL"),
"Certificate": takedown_info_context.get("Certificate")
}
contexts_in_human_readable_format.append(human_readable_dict)
human_readable = tableToMarkdown(title, contexts_in_human_readable_format,
headers=TAKEDOWN_INFO_HEADER, removeNull=True)
return human_readable
@logger
def generate_list_of_takedowns_context(list_of_takedowns_infos):
takedowns_contexts_list = []
for takedown_info in list_of_takedowns_infos:
takedown_context = generate_takedown_info_context(takedown_info)
takedowns_contexts_list.append(takedown_context)
return takedowns_contexts_list
@logger
def generate_takedown_note_context(takedown_note_json):
takedown_note_context = {
"TakedownID": takedown_note_json.get("takedown_id"),
"NoteID": takedown_note_json.get("note_id"),
"GroupID": takedown_note_json.get("group_id"),
"Author": takedown_note_json.get("author"),
"Note": takedown_note_json.get("note"),
"Time": takedown_note_json.get("time")
}
takedown_note_context = return_dict_without_none_values(takedown_note_context)
return takedown_note_context
@logger
def generate_list_of_takedown_notes_contexts(list_of_takedowns_notes):
takedown_notes_contexts_list = []
for takedown_note in list_of_takedowns_notes:
takedown_note_context = generate_takedown_note_context(takedown_note)
takedown_notes_contexts_list.append(takedown_note_context)
return takedown_notes_contexts_list
@logger
def gen_takedown_notes_human_readable(entry_context):
contexts_in_human_readable_format = []
for takedown_note_context in entry_context:
human_readable_dict = {
"Takedown ID": takedown_note_context.get("TakedownID"),
"Note ID": takedown_note_context.get("NoteID"),
"Group ID": takedown_note_context.get("GroupID"),
"Author": takedown_note_context.get("Author"),
"Note": takedown_note_context.get("Note"),
"Time": takedown_note_context.get("Time")
}
human_readable_dict = return_dict_without_none_values(human_readable_dict)
contexts_in_human_readable_format.append(human_readable_dict)
human_readable = tableToMarkdown(TAKEDOWN_INFO_TITLE, contexts_in_human_readable_format,
headers=TAKEDOWN_NOTE_HEADERS)
return human_readable
@logger
def generate_add_note_human_readable(response):
# if the request was successful, the response includes the id of the created note
if "note_id" in response:
human_readable = "### Note added succesfully\n" \
"ID of the note created: {0}".format(response["note_id"])
else:
human_readable = "### Failed to add note\n" \
"An error occured while trying to add the note.\n" \
"The error code is: {0}.\n" \
"The error message is: {1}.".format(response["error_code"], response["error_code"])
return human_readable
@logger
def string_to_bool(string_representing_bool):
return string_representing_bool.lower() == "true"
@logger
def generate_escalate_takedown_human_readable(response):
if "status" in response:
human_readable = "### Takedown escalated successfully"
else:
human_readable = "### Takedown escalation failed\n" \
"An error occured on the takedown escalation attempt.\n" \
"Error code is: {0}\n" \
"Error message from Netcraft is: {1}".format(response["error_code"], response["error_message"])
return human_readable
def add_or_update_note_context_in_takedown(note_context, cur_notes_in_takedown):
if isinstance(cur_notes_in_takedown, dict):
return [note_context]
else:
note_already_in_context = False
for i, cur_note_context in enumerate(cur_notes_in_takedown):
cur_note_context = cur_notes_in_takedown[i]
if cur_note_context["NoteID"] == note_context["NoteID"]:
note_already_in_context = True
cur_notes_in_takedown[i] = note_context
if not note_already_in_context:
cur_notes_in_takedown.append(note_context)
return cur_notes_in_takedown
def add_note_to_suitable_takedown_in_context(note_context, all_takedowns_entry_context):
note_takedown_index = -1
if isinstance(all_takedowns_entry_context, dict):
new_takedown_entry_context = {
"ID": note_context["TakedownID"],
"Note": [note_context]
}
all_takedowns_entry_context = [all_takedowns_entry_context, new_takedown_entry_context] \
if all_takedowns_entry_context else [new_takedown_entry_context]
else:
for i in range(len(all_takedowns_entry_context)):
cur_takedown_context = all_takedowns_entry_context[i]
if cur_takedown_context["ID"] == note_context["TakedownID"]:
note_takedown_index = i
if note_takedown_index == -1:
new_takedown_entry_context = {
"ID": note_context["TakedownID"],
"Note": [note_context]
}
all_takedowns_entry_context.append(new_takedown_entry_context)
else:
takedown_context_to_change = all_takedowns_entry_context[note_takedown_index]
cur_notes_in_takedown = takedown_context_to_change["Note"]
takedown_context_to_change["Note"] = add_or_update_note_context_in_takedown(note_context,
cur_notes_in_takedown)
all_takedowns_entry_context[note_takedown_index] = takedown_context_to_change
return all_takedowns_entry_context
def generate_netcraft_context_with_notes(list_of_notes_contexts):
all_takedowns_entry_context = demisto.context().get("Netcraft", {}).get("Takedown", {})
for note_context in list_of_notes_contexts:
all_takedowns_entry_context = add_note_to_suitable_takedown_in_context(note_context,
all_takedowns_entry_context)
return all_takedowns_entry_context
''' COMMANDS + REQUESTS FUNCTIONS '''
@logger
def escalate_takedown(takedown_id):
data_for_request = {
"takedown_id": takedown_id
}
request_result = http_request("POST", ESCALATE_TAKEDOWN_SUFFIX, data=data_for_request)
return request_result
def escalate_takedown_command():
args = demisto.args()
response = escalate_takedown(args["takedown_id"])
human_readable = generate_escalate_takedown_human_readable(response)
return_outputs(
readable_output=human_readable,
outputs={},
raw_response=response
)
@logger
def add_notes_to_takedown(takedown_id, note, notify):
data_for_request = {
"takedown_id": takedown_id,
"note": note,
"notify": notify
}
data_for_request = return_dict_without_none_values(data_for_request)
request_result = http_request("POST", ACCESS_TAKEDOWN_NOTES_SUFFIX, data=data_for_request)
return request_result
def add_notes_to_takedown_command():
args = demisto.args()
note = args.get("note")
notify = string_to_bool(args.get("notify")) if args.get("notify") else None
takedown_id = int(args["takedown_id"])
response = add_notes_to_takedown(takedown_id, note, notify)
human_readable = generate_add_note_human_readable(response)
return_outputs(
readable_output=human_readable,
outputs=response
)
def get_takedown_notes(takedown_id, group_id, date_from, date_to, author):
params_for_request = {
"takedown_id": takedown_id,
"group_id": group_id,
"date_to": date_to,
"date_from": date_from,
"author": author
}
params_for_request = return_dict_without_none_values(params_for_request)
request_result = http_request("GET", ACCESS_TAKEDOWN_NOTES_SUFFIX, params=params_for_request)
return request_result
def get_takedown_notes_command():
args = demisto.args()
takedown_id = int(args.get("takedown_id")) if args.get("takedown_id") else None
group_id = int(args.get("group_id")) if args.get("group_id") else None
date_from = args.get("date_from")
date_to = args.get("date_to")
author = args.get("author")
list_of_takedowns_notes = get_takedown_notes(takedown_id, group_id, date_from, date_to, author)
list_of_takedowns_notes = list_of_takedowns_notes[:LIMIT]
if takedown_id:
list_of_takedowns_notes = filter_by_id(list_of_takedowns_notes, "takedown_id", int(takedown_id))
list_of_notes_contexts = generate_list_of_takedown_notes_contexts(list_of_takedowns_notes)
entry_context = {
"Netcraft.Takedown(val.ID == obj.ID)": generate_netcraft_context_with_notes(list_of_notes_contexts)
}
human_readable = gen_takedown_notes_human_readable(list_of_notes_contexts)
return_outputs(
readable_output=human_readable,
outputs=entry_context,
raw_response=list_of_takedowns_notes
)
@logger
def get_takedown_info(takedown_id, ip, url, updated_since, date_from, region):
params_for_request = {
"id": takedown_id,
"ip": ip,
"url": url,
"updated_since": updated_since,
"date_from": date_from,
"region": region,
}
params_for_request = return_dict_without_none_values(params_for_request)
request_result = http_request("GET", GET_TAKEDOWN_INFO_SUFFIX, params=params_for_request)
return request_result
def get_takedown_info_command():
args = demisto.args()
takedown_id = int(args.get("id")) if args.get("id") else None
ip = args.get("ip")
url = args.get("url")
updated_since = args.get("updated_since")
date_from = args.get("date_from")
region = args.get("region")
list_of_takedowns_infos = get_takedown_info(takedown_id, ip, url, updated_since, date_from, region)
list_of_takedowns_infos = list_of_takedowns_infos[:LIMIT]
if takedown_id:
list_of_takedowns_infos = filter_by_id(list_of_takedowns_infos, "id", str(takedown_id))
list_of_takedowns_contexts = generate_list_of_takedowns_context(list_of_takedowns_infos)
human_readable = gen_takedown_info_human_readable(list_of_takedowns_contexts)
entry_context = {
'Netcraft.Takedown(val.ID == obj.ID)': list_of_takedowns_contexts
}
return_outputs(
readable_output=human_readable,
raw_response=list_of_takedowns_infos,
outputs=entry_context,
)
@logger
def report_attack(malicious_site_url, comment, is_test_request=False):
data_for_request = {
"attack": malicious_site_url,
"comment": comment
}
if is_test_request:
request_url_suffix = TEST_MODULE_SUFFIX
else:
request_url_suffix = REPORT_MALICIOUS_SUFFIX
request_result = http_request("POST", request_url_suffix, data=data_for_request, should_convert_to_json=False)
return request_result
def report_attack_command():
args = demisto.args()
entry_context: dict = {}
response_lines_array = report_attack(args["attack"], args["comment"])
result_answer = response_lines_array[0]
if result_answer == MALICIOUS_REPORT_SUCCESS:
new_takedown_id = response_lines_array[1]
# Until the API bug is fixed, this list will include info of all takedowns and not just the new one
new_takedown_infos = get_takedown_info(new_takedown_id, None, None, None, None, None)
new_takedown_infos = new_takedown_infos[:LIMIT]
new_takedown_infos = filter_by_id(new_takedown_infos, "id", new_takedown_id)
list_of_new_takedown_contexts = generate_list_of_takedowns_context(new_takedown_infos)
human_readable = gen_takedown_info_human_readable(list_of_new_takedown_contexts, REPORT_MALICIOUS_SUCCESS_TITLE)
entry_context = {
'Netcraft.Takedown(val.ID == obj.ID)': list_of_new_takedown_contexts
}
else:
human_readable = generate_report_malicious_site_human_readable(response_lines_array)
return_outputs(
readable_output=human_readable,
outputs=entry_context,
raw_response=entry_context
)
def test_module():
"""
Performs basic get request to get item samples
"""
test_result = report_attack("https://www.test.com", "test", True)
if test_result[0] != MALICIOUS_REPORT_SUCCESS:
raise Exception("Test request failed.")
demisto.results("ok")
''' COMMANDS MANAGER / SWITCH PANEL '''
LOG('Command being called is %s' % (demisto.command()))
try:
# Remove proxy if not set to true in params
handle_proxy()
if demisto.command() == 'test-module':
test_module()
elif demisto.command() == 'netcraft-report-attack':
report_attack_command()
elif demisto.command() == 'netcraft-get-takedown-info':
get_takedown_info_command()
elif demisto.command() == 'netcraft-get-takedown-notes':
get_takedown_notes_command()
elif demisto.command() == 'netcraft-add-notes-to-takedown':
add_notes_to_takedown_command()
elif demisto.command() == 'netcraft-escalate-takedown':
escalate_takedown_command()
# Log exceptions
except Exception as e:
return_error(str(e))
| mit |
pratikmallya/heat | heat/tests/engine/service/test_stack_snapshot.py | 7 | 11653 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslo_messaging.rpc import dispatcher
import six
from heat.common import exception
from heat.common import template_format
from heat.engine import service
from heat.engine import stack
from heat.objects import snapshot as snapshot_objects
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import utils
class SnapshotServiceTest(common.HeatTestCase):
# TODO(Qiming): Rework this test to handle OS::Nova::Server which
# has a real snapshot support.
def setUp(self):
super(SnapshotServiceTest, self).setUp()
self.ctx = utils.dummy_context()
self.engine = service.EngineService('a-host', 'a-topic')
self.engine.create_periodic_tasks()
utils.setup_dummy_db()
def _create_stack(self, stack_name, files=None):
t = template_format.parse(tools.wp_template)
stk = utils.parse_stack(t, stack_name=stack_name, files=files)
stk.state_set(stk.CREATE, stk.COMPLETE, 'mock completion')
return stk
def test_show_snapshot_not_found(self):
stk = self._create_stack('stack_snapshot_not_found')
snapshot_id = str(uuid.uuid4())
ex = self.assertRaises(dispatcher.ExpectedException,
self.engine.show_snapshot,
self.ctx, stk.identifier(),
snapshot_id)
expected = 'Snapshot with id %s not found' % snapshot_id
self.assertEqual(exception.NotFound, ex.exc_info[0])
self.assertIn(expected, six.text_type(ex.exc_info[1]))
def test_show_snapshot_not_belong_to_stack(self):
stk1 = self._create_stack('stack_snaphot_not_belong_to_stack_1')
stk1._persist_state()
snapshot1 = self.engine.stack_snapshot(
self.ctx, stk1.identifier(), 'snap1')
self.engine.thread_group_mgr.groups[stk1.id].wait()
snapshot_id = snapshot1['id']
stk2 = self._create_stack('stack_snaphot_not_belong_to_stack_2')
stk2._persist_state()
ex = self.assertRaises(dispatcher.ExpectedException,
self.engine.show_snapshot,
self.ctx, stk2.identifier(),
snapshot_id)
expected = ('The Snapshot (%(snapshot)s) for Stack (%(stack)s) '
'could not be found') % {'snapshot': snapshot_id,
'stack': stk2.name}
self.assertEqual(exception.SnapshotNotFound, ex.exc_info[0])
self.assertIn(expected, six.text_type(ex.exc_info[1]))
@mock.patch.object(stack.Stack, 'load')
def test_create_snapshot(self, mock_load):
files = {'a_file': 'the contents'}
stk = self._create_stack('stack_snapshot_create', files=files)
mock_load.return_value = stk
snapshot = self.engine.stack_snapshot(
self.ctx, stk.identifier(), 'snap1')
self.assertIsNotNone(snapshot['id'])
self.assertIsNotNone(snapshot['creation_time'])
self.assertEqual('snap1', snapshot['name'])
self.assertEqual("IN_PROGRESS", snapshot['status'])
self.engine.thread_group_mgr.groups[stk.id].wait()
snapshot = self.engine.show_snapshot(
self.ctx, stk.identifier(), snapshot['id'])
self.assertEqual("COMPLETE", snapshot['status'])
self.assertEqual("SNAPSHOT", snapshot['data']['action'])
self.assertEqual("COMPLETE", snapshot['data']['status'])
self.assertEqual(files, snapshot['data']['files'])
self.assertEqual(stk.id, snapshot['data']['id'])
self.assertIsNotNone(stk.updated_time)
self.assertIsNotNone(snapshot['creation_time'])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(stack.Stack, 'load')
def test_create_snapshot_action_in_progress(self, mock_load):
stack_name = 'stack_snapshot_action_in_progress'
stk = self._create_stack(stack_name)
mock_load.return_value = stk
stk.state_set(stk.UPDATE, stk.IN_PROGRESS, 'test_override')
ex = self.assertRaises(dispatcher.ExpectedException,
self.engine.stack_snapshot,
self.ctx, stk.identifier(), 'snap_none')
self.assertEqual(exception.ActionInProgress, ex.exc_info[0])
msg = ("Stack %(stack)s already has an action (%(action)s) "
"in progress.") % {'stack': stack_name, 'action': stk.action}
self.assertEqual(msg, six.text_type(ex.exc_info[1]))
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(stack.Stack, 'load')
def test_delete_snapshot_not_found(self, mock_load):
stk = self._create_stack('stack_snapshot_delete_not_found')
mock_load.return_value = stk
snapshot_id = str(uuid.uuid4())
ex = self.assertRaises(dispatcher.ExpectedException,
self.engine.delete_snapshot,
self.ctx, stk.identifier(), snapshot_id)
self.assertEqual(exception.NotFound, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(stack.Stack, 'load')
def test_delete_snapshot_not_belong_to_stack(self, mock_load):
stk1 = self._create_stack('stack_snapshot_delete_not_belong_1')
mock_load.return_value = stk1
snapshot1 = self.engine.stack_snapshot(
self.ctx, stk1.identifier(), 'snap1')
self.engine.thread_group_mgr.groups[stk1.id].wait()
snapshot_id = snapshot1['id']
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
mock_load.reset_mock()
stk2 = self._create_stack('stack_snapshot_delete_not_belong_2')
mock_load.return_value = stk2
ex = self.assertRaises(dispatcher.ExpectedException,
self.engine.delete_snapshot,
self.ctx,
stk2.identifier(),
snapshot_id)
expected = ('The Snapshot (%(snapshot)s) for Stack (%(stack)s) '
'could not be found') % {'snapshot': snapshot_id,
'stack': stk2.name}
self.assertEqual(exception.SnapshotNotFound, ex.exc_info[0])
self.assertIn(expected, six.text_type(ex.exc_info[1]))
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
mock_load.reset_mock()
@mock.patch.object(stack.Stack, 'load')
def test_delete_snapshot_in_progress(self, mock_load):
# can not delete the snapshot in snapshotting
stk = self._create_stack('test_delete_snapshot_in_progress')
mock_load.return_value = stk
snapshot = mock.Mock()
snapshot.id = str(uuid.uuid4())
snapshot.status = 'IN_PROGRESS'
self.patchobject(snapshot_objects.Snapshot,
'get_snapshot_by_stack').return_value = snapshot
ex = self.assertRaises(dispatcher.ExpectedException,
self.engine.delete_snapshot,
self.ctx, stk.identifier(), snapshot.id)
msg = 'Deleting in-progress snapshot is not supported'
self.assertIn(msg, six.text_type(ex.exc_info[1]))
self.assertEqual(exception.NotSupported, ex.exc_info[0])
@mock.patch.object(stack.Stack, 'load')
def test_delete_snapshot(self, mock_load):
stk = self._create_stack('stack_snapshot_delete_normal')
mock_load.return_value = stk
snapshot = self.engine.stack_snapshot(
self.ctx, stk.identifier(), 'snap1')
self.engine.thread_group_mgr.groups[stk.id].wait()
snapshot_id = snapshot['id']
self.engine.delete_snapshot(self.ctx, stk.identifier(), snapshot_id)
self.engine.thread_group_mgr.groups[stk.id].wait()
ex = self.assertRaises(dispatcher.ExpectedException,
self.engine.show_snapshot, self.ctx,
stk.identifier(), snapshot_id)
self.assertEqual(exception.NotFound, ex.exc_info[0])
self.assertTrue(2, mock_load.call_count)
@mock.patch.object(stack.Stack, 'load')
def test_list_snapshots(self, mock_load):
stk = self._create_stack('stack_snapshot_list')
mock_load.return_value = stk
snapshot = self.engine.stack_snapshot(
self.ctx, stk.identifier(), 'snap1')
self.assertIsNotNone(snapshot['id'])
self.assertEqual("IN_PROGRESS", snapshot['status'])
self.engine.thread_group_mgr.groups[stk.id].wait()
snapshots = self.engine.stack_list_snapshots(
self.ctx, stk.identifier())
expected = {
"id": snapshot["id"],
"name": "snap1",
"status": "COMPLETE",
"status_reason": "Stack SNAPSHOT completed successfully",
"data": stk.prepare_abandon(),
"creation_time": snapshot['creation_time']}
self.assertEqual([expected], snapshots)
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
@mock.patch.object(stack.Stack, 'load')
def test_restore_snapshot(self, mock_load):
stk = self._create_stack('stack_snapshot_restore_normal')
mock_load.return_value = stk
snapshot = self.engine.stack_snapshot(
self.ctx, stk.identifier(), 'snap1')
self.engine.thread_group_mgr.groups[stk.id].wait()
snapshot_id = snapshot['id']
self.engine.stack_restore(self.ctx, stk.identifier(), snapshot_id)
self.engine.thread_group_mgr.groups[stk.id].wait()
self.assertEqual((stk.RESTORE, stk.COMPLETE), stk.state)
self.assertEqual(2, mock_load.call_count)
@mock.patch.object(stack.Stack, 'load')
def test_restore_snapshot_other_stack(self, mock_load):
stk1 = self._create_stack('stack_snapshot_restore_other_stack_1')
mock_load.return_value = stk1
snapshot1 = self.engine.stack_snapshot(
self.ctx, stk1.identifier(), 'snap1')
self.engine.thread_group_mgr.groups[stk1.id].wait()
snapshot_id = snapshot1['id']
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
mock_load.reset_mock()
stk2 = self._create_stack('stack_snapshot_restore_other_stack_1')
mock_load.return_value = stk2
ex = self.assertRaises(dispatcher.ExpectedException,
self.engine.stack_restore,
self.ctx, stk2.identifier(),
snapshot_id)
expected = ('The Snapshot (%(snapshot)s) for Stack (%(stack)s) '
'could not be found') % {'snapshot': snapshot_id,
'stack': stk2.name}
self.assertEqual(exception.SnapshotNotFound, ex.exc_info[0])
self.assertIn(expected, six.text_type(ex.exc_info[1]))
mock_load.assert_called_once_with(self.ctx, stack=mock.ANY)
| apache-2.0 |
byt3bl33d3r/mitmproxy | libmproxy/console/__init__.py | 11 | 21599 | from __future__ import absolute_import
import mailcap
import mimetypes
import tempfile
import os
import os.path
import shlex
import signal
import stat
import subprocess
import sys
import traceback
import urwid
import weakref
from .. import controller, flow, script
from . import flowlist, flowview, help, window, signals, options
from . import grideditor, palettes, contentview, statusbar, palettepicker
EVENTLOG_SIZE = 500
class ConsoleState(flow.State):
def __init__(self):
flow.State.__init__(self)
self.focus = None
self.follow_focus = None
self.default_body_view = contentview.get("Auto")
self.flowsettings = weakref.WeakKeyDictionary()
self.last_search = None
def __setattr__(self, name, value):
self.__dict__[name] = value
signals.update_settings.send(self)
def add_flow_setting(self, flow, key, value):
d = self.flowsettings.setdefault(flow, {})
d[key] = value
def get_flow_setting(self, flow, key, default=None):
d = self.flowsettings.get(flow, {})
return d.get(key, default)
def add_flow(self, f):
super(ConsoleState, self).add_flow(f)
if self.focus is None:
self.set_focus(0)
elif self.follow_focus:
self.set_focus(len(self.view) - 1)
self.set_flow_marked(f, False)
return f
def update_flow(self, f):
super(ConsoleState, self).update_flow(f)
if self.focus is None:
self.set_focus(0)
return f
def set_limit(self, limit):
ret = flow.State.set_limit(self, limit)
self.set_focus(self.focus)
return ret
def get_focus(self):
if not self.view or self.focus is None:
return None, None
return self.view[self.focus], self.focus
def set_focus(self, idx):
if self.view:
if idx >= len(self.view):
idx = len(self.view) - 1
elif idx < 0:
idx = 0
self.focus = idx
else:
self.focus = None
def set_focus_flow(self, f):
self.set_focus(self.view.index(f))
def get_from_pos(self, pos):
if len(self.view) <= pos or pos < 0:
return None, None
return self.view[pos], pos
def get_next(self, pos):
return self.get_from_pos(pos + 1)
def get_prev(self, pos):
return self.get_from_pos(pos - 1)
def delete_flow(self, f):
if f in self.view and self.view.index(f) <= self.focus:
self.focus -= 1
if self.focus < 0:
self.focus = None
ret = flow.State.delete_flow(self, f)
self.set_focus(self.focus)
return ret
def clear(self):
marked_flows = []
for f in self.flows:
if self.flow_marked(f):
marked_flows.append(f)
super(ConsoleState, self).clear()
for f in marked_flows:
self.add_flow(f)
self.set_flow_marked(f, True)
if len(self.flows.views) == 0:
self.focus = None
else:
self.focus = 0
self.set_focus(self.focus)
def flow_marked(self, flow):
return self.get_flow_setting(flow, "marked", False)
def set_flow_marked(self, flow, marked):
self.add_flow_setting(flow, "marked", marked)
class Options(object):
attributes = [
"app",
"app_domain",
"app_ip",
"anticache",
"anticomp",
"client_replay",
"eventlog",
"keepserving",
"kill",
"intercept",
"limit",
"no_server",
"refresh_server_playback",
"rfile",
"scripts",
"showhost",
"replacements",
"rheaders",
"setheaders",
"server_replay",
"stickycookie",
"stickyauth",
"stream_large_bodies",
"verbosity",
"wfile",
"nopop",
"palette",
"palette_transparent"
]
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
for i in self.attributes:
if not hasattr(self, i):
setattr(self, i, None)
class ConsoleMaster(flow.FlowMaster):
palette = []
def __init__(self, server, options):
flow.FlowMaster.__init__(self, server, ConsoleState())
self.stream_path = None
self.options = options
for i in options.replacements:
self.replacehooks.add(*i)
for i in options.setheaders:
self.setheaders.add(*i)
r = self.set_intercept(options.intercept)
if r:
print >> sys.stderr, "Intercept error:", r
sys.exit(1)
if options.limit:
self.set_limit(options.limit)
r = self.set_stickycookie(options.stickycookie)
if r:
print >> sys.stderr, "Sticky cookies error:", r
sys.exit(1)
r = self.set_stickyauth(options.stickyauth)
if r:
print >> sys.stderr, "Sticky auth error:", r
sys.exit(1)
self.set_stream_large_bodies(options.stream_large_bodies)
self.refresh_server_playback = options.refresh_server_playback
self.anticache = options.anticache
self.anticomp = options.anticomp
self.killextra = options.kill
self.rheaders = options.rheaders
self.nopop = options.nopop
self.showhost = options.showhost
self.palette = options.palette
self.palette_transparent = options.palette_transparent
self.eventlog = options.eventlog
self.eventlist = urwid.SimpleListWalker([])
if options.client_replay:
self.client_playback_path(options.client_replay)
if options.server_replay:
self.server_playback_path(options.server_replay)
if options.scripts:
for i in options.scripts:
err = self.load_script(i)
if err:
print >> sys.stderr, "Script load error:", err
sys.exit(1)
if options.outfile:
err = self.start_stream_to_path(
options.outfile[0],
options.outfile[1]
)
if err:
print >> sys.stderr, "Stream file error:", err
sys.exit(1)
self.view_stack = []
if options.app:
self.start_app(self.options.app_host, self.options.app_port)
signals.call_in.connect(self.sig_call_in)
signals.pop_view_state.connect(self.sig_pop_view_state)
signals.push_view_state.connect(self.sig_push_view_state)
signals.sig_add_event.connect(self.sig_add_event)
def __setattr__(self, name, value):
self.__dict__[name] = value
signals.update_settings.send(self)
def sig_add_event(self, sender, e, level):
needed = dict(error=0, info=1, debug=2).get(level, 1)
if self.options.verbosity < needed:
return
if level == "error":
e = urwid.Text(("error", str(e)))
else:
e = urwid.Text(str(e))
self.eventlist.append(e)
if len(self.eventlist) > EVENTLOG_SIZE:
self.eventlist.pop(0)
self.eventlist.set_focus(len(self.eventlist) - 1)
def add_event(self, e, level):
signals.add_event(e, level)
def sig_call_in(self, sender, seconds, callback, args=()):
def cb(*_):
return callback(*args)
self.loop.set_alarm_in(seconds, cb)
def sig_pop_view_state(self, sender):
if len(self.view_stack) > 1:
self.view_stack.pop()
self.loop.widget = self.view_stack[-1]
else:
signals.status_prompt_onekey.send(
self,
prompt = "Quit",
keys = (
("yes", "y"),
("no", "n"),
),
callback = self.quit,
)
def sig_push_view_state(self, sender, window):
self.view_stack.append(window)
self.loop.widget = window
self.loop.draw_screen()
def start_stream_to_path(self, path, mode="wb"):
path = os.path.expanduser(path)
try:
f = file(path, mode)
self.start_stream(f, None)
except IOError as v:
return str(v)
self.stream_path = path
def _run_script_method(self, method, s, f):
status, val = s.run(method, f)
if val:
if status:
signals.add_event("Method %s return: %s" % (method, val), "debug")
else:
signals.add_event(
"Method %s error: %s" %
(method, val[1]), "error")
def run_script_once(self, command, f):
if not command:
return
signals.add_event("Running script on flow: %s" % command, "debug")
try:
s = script.Script(command, self)
except script.ScriptError as v:
signals.status_message.send(
message = "Error loading script."
)
signals.add_event("Error loading script:\n%s" % v.args[0], "error")
return
if f.request:
self._run_script_method("request", s, f)
if f.response:
self._run_script_method("response", s, f)
if f.error:
self._run_script_method("error", s, f)
s.unload()
signals.flow_change.send(self, flow = f)
def set_script(self, command):
if not command:
return
ret = self.load_script(command)
if ret:
signals.status_message.send(message=ret)
def toggle_eventlog(self):
self.eventlog = not self.eventlog
signals.pop_view_state.send(self)
self.view_flowlist()
def _readflows(self, path):
"""
Utitility function that reads a list of flows
or prints an error to the UI if that fails.
Returns
- None, if there was an error.
- a list of flows, otherwise.
"""
try:
return flow.read_flows_from_paths(path)
except flow.FlowReadError as e:
signals.status_message.send(message=e.strerror)
def client_playback_path(self, path):
if not isinstance(path, list):
path = [path]
flows = self._readflows(path)
if flows:
self.start_client_playback(flows, False)
def server_playback_path(self, path):
if not isinstance(path, list):
path = [path]
flows = self._readflows(path)
if flows:
self.start_server_playback(
flows,
self.killextra, self.rheaders,
False, self.nopop,
self.options.replay_ignore_params,
self.options.replay_ignore_content,
self.options.replay_ignore_payload_params,
self.options.replay_ignore_host
)
def spawn_editor(self, data):
fd, name = tempfile.mkstemp('', "mproxy")
os.write(fd, data)
os.close(fd)
c = os.environ.get("EDITOR")
# if no EDITOR is set, assume 'vi'
if not c:
c = "vi"
cmd = shlex.split(c)
cmd.append(name)
self.ui.stop()
try:
subprocess.call(cmd)
except:
signals.status_message.send(
message = "Can't start editor: %s" % " ".join(c)
)
else:
data = open(name, "rb").read()
self.ui.start()
os.unlink(name)
return data
def spawn_external_viewer(self, data, contenttype):
if contenttype:
contenttype = contenttype.split(";")[0]
ext = mimetypes.guess_extension(contenttype) or ""
else:
ext = ""
fd, name = tempfile.mkstemp(ext, "mproxy")
os.write(fd, data)
os.close(fd)
# read-only to remind the user that this is a view function
os.chmod(name, stat.S_IREAD)
cmd = None
shell = False
if contenttype:
c = mailcap.getcaps()
cmd, _ = mailcap.findmatch(c, contenttype, filename=name)
if cmd:
shell = True
if not cmd:
# hm which one should get priority?
c = os.environ.get("PAGER") or os.environ.get("EDITOR")
if not c:
c = "less"
cmd = shlex.split(c)
cmd.append(name)
self.ui.stop()
try:
subprocess.call(cmd, shell=shell)
except:
signals.status_message.send(
message="Can't start external viewer: %s" % " ".join(c)
)
self.ui.start()
os.unlink(name)
def set_palette(self, name):
self.palette = name
self.ui.register_palette(
palettes.palettes[name].palette(self.palette_transparent)
)
self.ui.clear()
def ticker(self, *userdata):
changed = self.tick(self.masterq, timeout=0)
if changed:
self.loop.draw_screen()
signals.update_settings.send()
self.loop.set_alarm_in(0.01, self.ticker)
def run(self):
self.ui = urwid.raw_display.Screen()
self.ui.set_mouse_tracking()
self.ui.set_terminal_properties(256)
self.set_palette(self.palette)
self.loop = urwid.MainLoop(
urwid.SolidFill("x"),
screen = self.ui,
)
self.server.start_slave(
controller.Slave,
controller.Channel(self.masterq, self.should_exit)
)
if self.options.rfile:
ret = self.load_flows_path(self.options.rfile)
if ret and self.state.flow_count():
signals.add_event(
"File truncated or corrupted. "
"Loaded as many flows as possible.",
"error"
)
elif ret and not self.state.flow_count():
self.shutdown()
print >> sys.stderr, "Could not load file:", ret
sys.exit(1)
self.loop.set_alarm_in(0.01, self.ticker)
# It's not clear why we need to handle this explicitly - without this,
# mitmproxy hangs on keyboard interrupt. Remove if we ever figure it
# out.
def exit(s, f):
raise urwid.ExitMainLoop
signal.signal(signal.SIGINT, exit)
self.loop.set_alarm_in(
0.0001,
lambda *args: self.view_flowlist()
)
try:
self.loop.run()
except Exception:
self.loop.stop()
sys.stdout.flush()
print >> sys.stderr, traceback.format_exc()
print >> sys.stderr, "mitmproxy has crashed!"
print >> sys.stderr, "Please lodge a bug report at:"
print >> sys.stderr, "\thttps://github.com/mitmproxy/mitmproxy"
print >> sys.stderr, "Shutting down..."
sys.stderr.flush()
self.shutdown()
def view_help(self, helpctx):
signals.push_view_state.send(
self,
window = window.Window(
self,
help.HelpView(helpctx),
None,
statusbar.StatusBar(self, help.footer),
None
)
)
def view_options(self):
for i in self.view_stack:
if isinstance(i["body"], options.Options):
return
signals.push_view_state.send(
self,
window = window.Window(
self,
options.Options(self),
None,
statusbar.StatusBar(self, options.footer),
options.help_context,
)
)
def view_palette_picker(self):
signals.push_view_state.send(
self,
window = window.Window(
self,
palettepicker.PalettePicker(self),
None,
statusbar.StatusBar(self, palettepicker.footer),
palettepicker.help_context,
)
)
def view_grideditor(self, ge):
signals.push_view_state.send(
self,
window = window.Window(
self,
ge,
None,
statusbar.StatusBar(self, grideditor.FOOTER),
ge.make_help()
)
)
def view_flowlist(self):
if self.ui.started:
self.ui.clear()
if self.state.follow_focus:
self.state.set_focus(self.state.flow_count())
if self.eventlog:
body = flowlist.BodyPile(self)
else:
body = flowlist.FlowListBox(self)
signals.push_view_state.send(
self,
window = window.Window(
self,
body,
None,
statusbar.StatusBar(self, flowlist.footer),
flowlist.help_context
)
)
def view_flow(self, flow, tab_offset=0):
self.state.set_focus_flow(flow)
signals.push_view_state.send(
self,
window = window.Window(
self,
flowview.FlowView(self, self.state, flow, tab_offset),
flowview.FlowViewHeader(self, flow),
statusbar.StatusBar(self, flowview.footer),
flowview.help_context
)
)
def _write_flows(self, path, flows):
if not path:
return
path = os.path.expanduser(path)
try:
f = file(path, "wb")
fw = flow.FlowWriter(f)
for i in flows:
fw.add(i)
f.close()
except IOError as v:
signals.status_message.send(message=v.strerror)
def save_one_flow(self, path, flow):
return self._write_flows(path, [flow])
def save_flows(self, path):
return self._write_flows(path, self.state.view)
def save_marked_flows(self, path):
marked_flows = []
for f in self.state.view:
if self.state.flow_marked(f):
marked_flows.append(f)
return self._write_flows(path, marked_flows)
def load_flows_callback(self, path):
if not path:
return
ret = self.load_flows_path(path)
return ret or "Flows loaded from %s" % path
def load_flows_path(self, path):
reterr = None
try:
flow.FlowMaster.load_flows_file(self, path)
except flow.FlowReadError as v:
reterr = str(v)
signals.flowlist_change.send(self)
return reterr
def accept_all(self):
self.state.accept_all(self)
def set_limit(self, txt):
v = self.state.set_limit(txt)
signals.flowlist_change.send(self)
return v
def set_intercept(self, txt):
return self.state.set_intercept(txt)
def change_default_display_mode(self, t):
v = contentview.get_by_shortcut(t)
self.state.default_body_view = v
self.refresh_focus()
def edit_scripts(self, scripts):
commands = [x[0] for x in scripts] # remove outer array
if commands == [s.command for s in self.scripts]:
return
self.unload_scripts()
for command in commands:
self.load_script(command)
signals.update_settings.send(self)
def stop_client_playback_prompt(self, a):
if a != "n":
self.stop_client_playback()
def stop_server_playback_prompt(self, a):
if a != "n":
self.stop_server_playback()
def quit(self, a):
if a != "n":
raise urwid.ExitMainLoop
def shutdown(self):
self.state.killall(self)
flow.FlowMaster.shutdown(self)
def clear_flows(self):
self.state.clear()
signals.flowlist_change.send(self)
def toggle_follow_flows(self):
# toggle flow follow
self.state.follow_focus = not self.state.follow_focus
# jump to most recent flow if follow is now on
if self.state.follow_focus:
self.state.set_focus(self.state.flow_count())
signals.flowlist_change.send(self)
def delete_flow(self, f):
self.state.delete_flow(f)
signals.flowlist_change.send(self)
def refresh_focus(self):
if self.state.view:
signals.flow_change.send(
self,
flow = self.state.view[self.state.focus]
)
def process_flow(self, f):
if self.state.intercept and f.match(
self.state.intercept) and not f.request.is_replay:
f.intercept(self)
else:
f.reply()
signals.flowlist_change.send(self)
signals.flow_change.send(self, flow = f)
def clear_events(self):
self.eventlist[:] = []
# Handlers
def handle_error(self, f):
f = flow.FlowMaster.handle_error(self, f)
if f:
self.process_flow(f)
return f
def handle_request(self, f):
f = flow.FlowMaster.handle_request(self, f)
if f:
self.process_flow(f)
return f
def handle_response(self, f):
f = flow.FlowMaster.handle_response(self, f)
if f:
self.process_flow(f)
return f
| mit |
jiangzhuo/kbengine | kbe/src/lib/python/Lib/tkinter/ttk.py | 61 | 55714 | """Ttk wrapper.
This module provides classes to allow using Tk themed widget set.
Ttk is based on a revised and enhanced version of
TIP #48 (http://tip.tcl.tk/48) specified style engine.
Its basic idea is to separate, to the extent possible, the code
implementing a widget's behavior from the code implementing its
appearance. Widget class bindings are primarily responsible for
maintaining the widget state and invoking callbacks, all aspects
of the widgets appearance lies at Themes.
"""
__version__ = "0.3.1"
__author__ = "Guilherme Polo <ggpolo@gmail.com>"
__all__ = ["Button", "Checkbutton", "Combobox", "Entry", "Frame", "Label",
"Labelframe", "LabelFrame", "Menubutton", "Notebook", "Panedwindow",
"PanedWindow", "Progressbar", "Radiobutton", "Scale", "Scrollbar",
"Separator", "Sizegrip", "Style", "Treeview",
# Extensions
"LabeledScale", "OptionMenu",
# functions
"tclobjs_to_py", "setup_master"]
import tkinter
from tkinter import _flatten, _join, _stringify, _splitdict
# Verify if Tk is new enough to not need the Tile package
_REQUIRE_TILE = True if tkinter.TkVersion < 8.5 else False
def _load_tile(master):
if _REQUIRE_TILE:
import os
tilelib = os.environ.get('TILE_LIBRARY')
if tilelib:
# append custom tile path to the list of directories that
# Tcl uses when attempting to resolve packages with the package
# command
master.tk.eval(
'global auto_path; '
'lappend auto_path {%s}' % tilelib)
master.tk.eval('package require tile') # TclError may be raised here
master._tile_loaded = True
def _format_optvalue(value, script=False):
"""Internal function."""
if script:
# if caller passes a Tcl script to tk.call, all the values need to
# be grouped into words (arguments to a command in Tcl dialect)
value = _stringify(value)
elif isinstance(value, (list, tuple)):
value = _join(value)
return value
def _format_optdict(optdict, script=False, ignore=None):
"""Formats optdict to a tuple to pass it to tk.call.
E.g. (script=False):
{'foreground': 'blue', 'padding': [1, 2, 3, 4]} returns:
('-foreground', 'blue', '-padding', '1 2 3 4')"""
opts = []
for opt, value in optdict.items():
if not ignore or opt not in ignore:
opts.append("-%s" % opt)
if value is not None:
opts.append(_format_optvalue(value, script))
return _flatten(opts)
def _mapdict_values(items):
# each value in mapdict is expected to be a sequence, where each item
# is another sequence containing a state (or several) and a value
# E.g. (script=False):
# [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]
# returns:
# ['active selected', 'grey', 'focus', [1, 2, 3, 4]]
opt_val = []
for *state, val in items:
# hacks for bakward compatibility
state[0] # raise IndexError if empty
if len(state) == 1:
# if it is empty (something that evaluates to False), then
# format it to Tcl code to denote the "normal" state
state = state[0] or ''
else:
# group multiple states
state = ' '.join(state) # raise TypeError if not str
opt_val.append(state)
if val is not None:
opt_val.append(val)
return opt_val
def _format_mapdict(mapdict, script=False):
"""Formats mapdict to pass it to tk.call.
E.g. (script=False):
{'expand': [('active', 'selected', 'grey'), ('focus', [1, 2, 3, 4])]}
returns:
('-expand', '{active selected} grey focus {1, 2, 3, 4}')"""
opts = []
for opt, value in mapdict.items():
opts.extend(("-%s" % opt,
_format_optvalue(_mapdict_values(value), script)))
return _flatten(opts)
def _format_elemcreate(etype, script=False, *args, **kw):
"""Formats args and kw according to the given element factory etype."""
spec = None
opts = ()
if etype in ("image", "vsapi"):
if etype == "image": # define an element based on an image
# first arg should be the default image name
iname = args[0]
# next args, if any, are statespec/value pairs which is almost
# a mapdict, but we just need the value
imagespec = _join(_mapdict_values(args[1:]))
spec = "%s %s" % (iname, imagespec)
else:
# define an element whose visual appearance is drawn using the
# Microsoft Visual Styles API which is responsible for the
# themed styles on Windows XP and Vista.
# Availability: Tk 8.6, Windows XP and Vista.
class_name, part_id = args[:2]
statemap = _join(_mapdict_values(args[2:]))
spec = "%s %s %s" % (class_name, part_id, statemap)
opts = _format_optdict(kw, script)
elif etype == "from": # clone an element
# it expects a themename and optionally an element to clone from,
# otherwise it will clone {} (empty element)
spec = args[0] # theme name
if len(args) > 1: # elementfrom specified
opts = (_format_optvalue(args[1], script),)
if script:
spec = '{%s}' % spec
opts = ' '.join(opts)
return spec, opts
def _format_layoutlist(layout, indent=0, indent_size=2):
"""Formats a layout list so we can pass the result to ttk::style
layout and ttk::style settings. Note that the layout doesn't has to
be a list necessarily.
E.g.:
[("Menubutton.background", None),
("Menubutton.button", {"children":
[("Menubutton.focus", {"children":
[("Menubutton.padding", {"children":
[("Menubutton.label", {"side": "left", "expand": 1})]
})]
})]
}),
("Menubutton.indicator", {"side": "right"})
]
returns:
Menubutton.background
Menubutton.button -children {
Menubutton.focus -children {
Menubutton.padding -children {
Menubutton.label -side left -expand 1
}
}
}
Menubutton.indicator -side right"""
script = []
for layout_elem in layout:
elem, opts = layout_elem
opts = opts or {}
fopts = ' '.join(_format_optdict(opts, True, ("children",)))
head = "%s%s%s" % (' ' * indent, elem, (" %s" % fopts) if fopts else '')
if "children" in opts:
script.append(head + " -children {")
indent += indent_size
newscript, indent = _format_layoutlist(opts['children'], indent,
indent_size)
script.append(newscript)
indent -= indent_size
script.append('%s}' % (' ' * indent))
else:
script.append(head)
return '\n'.join(script), indent
def _script_from_settings(settings):
"""Returns an appropriate script, based on settings, according to
theme_settings definition to be used by theme_settings and
theme_create."""
script = []
# a script will be generated according to settings passed, which
# will then be evaluated by Tcl
for name, opts in settings.items():
# will format specific keys according to Tcl code
if opts.get('configure'): # format 'configure'
s = ' '.join(_format_optdict(opts['configure'], True))
script.append("ttk::style configure %s %s;" % (name, s))
if opts.get('map'): # format 'map'
s = ' '.join(_format_mapdict(opts['map'], True))
script.append("ttk::style map %s %s;" % (name, s))
if 'layout' in opts: # format 'layout' which may be empty
if not opts['layout']:
s = 'null' # could be any other word, but this one makes sense
else:
s, _ = _format_layoutlist(opts['layout'])
script.append("ttk::style layout %s {\n%s\n}" % (name, s))
if opts.get('element create'): # format 'element create'
eopts = opts['element create']
etype = eopts[0]
# find where args end, and where kwargs start
argc = 1 # etype was the first one
while argc < len(eopts) and not hasattr(eopts[argc], 'items'):
argc += 1
elemargs = eopts[1:argc]
elemkw = eopts[argc] if argc < len(eopts) and eopts[argc] else {}
spec, opts = _format_elemcreate(etype, True, *elemargs, **elemkw)
script.append("ttk::style element create %s %s %s %s" % (
name, etype, spec, opts))
return '\n'.join(script)
def _list_from_statespec(stuple):
"""Construct a list from the given statespec tuple according to the
accepted statespec accepted by _format_mapdict."""
nval = []
for val in stuple:
typename = getattr(val, 'typename', None)
if typename is None:
nval.append(val)
else: # this is a Tcl object
val = str(val)
if typename == 'StateSpec':
val = val.split()
nval.append(val)
it = iter(nval)
return [_flatten(spec) for spec in zip(it, it)]
def _list_from_layouttuple(tk, ltuple):
"""Construct a list from the tuple returned by ttk::layout, this is
somewhat the reverse of _format_layoutlist."""
ltuple = tk.splitlist(ltuple)
res = []
indx = 0
while indx < len(ltuple):
name = ltuple[indx]
opts = {}
res.append((name, opts))
indx += 1
while indx < len(ltuple): # grab name's options
opt, val = ltuple[indx:indx + 2]
if not opt.startswith('-'): # found next name
break
opt = opt[1:] # remove the '-' from the option
indx += 2
if opt == 'children':
val = _list_from_layouttuple(tk, val)
opts[opt] = val
return res
def _val_or_dict(tk, options, *args):
"""Format options then call Tk command with args and options and return
the appropriate result.
If no option is specified, a dict is returned. If a option is
specified with the None value, the value for that option is returned.
Otherwise, the function just sets the passed options and the caller
shouldn't be expecting a return value anyway."""
options = _format_optdict(options)
res = tk.call(*(args + options))
if len(options) % 2: # option specified without a value, return its value
return res
return _splitdict(tk, res, conv=_tclobj_to_py)
def _convert_stringval(value):
"""Converts a value to, hopefully, a more appropriate Python object."""
value = str(value)
try:
value = int(value)
except (ValueError, TypeError):
pass
return value
def _to_number(x):
if isinstance(x, str):
if '.' in x:
x = float(x)
else:
x = int(x)
return x
def _tclobj_to_py(val):
"""Return value converted from Tcl object to Python object."""
if val and hasattr(val, '__len__') and not isinstance(val, str):
if getattr(val[0], 'typename', None) == 'StateSpec':
val = _list_from_statespec(val)
else:
val = list(map(_convert_stringval, val))
elif hasattr(val, 'typename'): # some other (single) Tcl object
val = _convert_stringval(val)
return val
def tclobjs_to_py(adict):
"""Returns adict with its values converted from Tcl objects to Python
objects."""
for opt, val in adict.items():
adict[opt] = _tclobj_to_py(val)
return adict
def setup_master(master=None):
"""If master is not None, itself is returned. If master is None,
the default master is returned if there is one, otherwise a new
master is created and returned.
If it is not allowed to use the default root and master is None,
RuntimeError is raised."""
if master is None:
if tkinter._support_default_root:
master = tkinter._default_root or tkinter.Tk()
else:
raise RuntimeError(
"No master specified and tkinter is "
"configured to not support default root")
return master
class Style(object):
"""Manipulate style database."""
_name = "ttk::style"
def __init__(self, master=None):
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
self.master = master
self.tk = self.master.tk
def configure(self, style, query_opt=None, **kw):
"""Query or sets the default value of the specified option(s) in
style.
Each key in kw is an option and each value is either a string or
a sequence identifying the value for that option."""
if query_opt is not None:
kw[query_opt] = None
return _val_or_dict(self.tk, kw, self._name, "configure", style)
def map(self, style, query_opt=None, **kw):
"""Query or sets dynamic values of the specified option(s) in
style.
Each key in kw is an option and each value should be a list or a
tuple (usually) containing statespecs grouped in tuples, or list,
or something else of your preference. A statespec is compound of
one or more states and then a value."""
if query_opt is not None:
return _list_from_statespec(self.tk.splitlist(
self.tk.call(self._name, "map", style, '-%s' % query_opt)))
return _splitdict(
self.tk,
self.tk.call(self._name, "map", style, *_format_mapdict(kw)),
conv=_tclobj_to_py)
def lookup(self, style, option, state=None, default=None):
"""Returns the value specified for option in style.
If state is specified it is expected to be a sequence of one
or more states. If the default argument is set, it is used as
a fallback value in case no specification for option is found."""
state = ' '.join(state) if state else ''
return self.tk.call(self._name, "lookup", style, '-%s' % option,
state, default)
def layout(self, style, layoutspec=None):
"""Define the widget layout for given style. If layoutspec is
omitted, return the layout specification for given style.
layoutspec is expected to be a list or an object different than
None that evaluates to False if you want to "turn off" that style.
If it is a list (or tuple, or something else), each item should be
a tuple where the first item is the layout name and the second item
should have the format described below:
LAYOUTS
A layout can contain the value None, if takes no options, or
a dict of options specifying how to arrange the element.
The layout mechanism uses a simplified version of the pack
geometry manager: given an initial cavity, each element is
allocated a parcel. Valid options/values are:
side: whichside
Specifies which side of the cavity to place the
element; one of top, right, bottom or left. If
omitted, the element occupies the entire cavity.
sticky: nswe
Specifies where the element is placed inside its
allocated parcel.
children: [sublayout... ]
Specifies a list of elements to place inside the
element. Each element is a tuple (or other sequence)
where the first item is the layout name, and the other
is a LAYOUT."""
lspec = None
if layoutspec:
lspec = _format_layoutlist(layoutspec)[0]
elif layoutspec is not None: # will disable the layout ({}, '', etc)
lspec = "null" # could be any other word, but this may make sense
# when calling layout(style) later
return _list_from_layouttuple(self.tk,
self.tk.call(self._name, "layout", style, lspec))
def element_create(self, elementname, etype, *args, **kw):
"""Create a new element in the current theme of given etype."""
spec, opts = _format_elemcreate(etype, False, *args, **kw)
self.tk.call(self._name, "element", "create", elementname, etype,
spec, *opts)
def element_names(self):
"""Returns the list of elements defined in the current theme."""
return self.tk.splitlist(self.tk.call(self._name, "element", "names"))
def element_options(self, elementname):
"""Return the list of elementname's options."""
return self.tk.splitlist(self.tk.call(self._name, "element", "options", elementname))
def theme_create(self, themename, parent=None, settings=None):
"""Creates a new theme.
It is an error if themename already exists. If parent is
specified, the new theme will inherit styles, elements and
layouts from the specified parent theme. If settings are present,
they are expected to have the same syntax used for theme_settings."""
script = _script_from_settings(settings) if settings else ''
if parent:
self.tk.call(self._name, "theme", "create", themename,
"-parent", parent, "-settings", script)
else:
self.tk.call(self._name, "theme", "create", themename,
"-settings", script)
def theme_settings(self, themename, settings):
"""Temporarily sets the current theme to themename, apply specified
settings and then restore the previous theme.
Each key in settings is a style and each value may contain the
keys 'configure', 'map', 'layout' and 'element create' and they
are expected to have the same format as specified by the methods
configure, map, layout and element_create respectively."""
script = _script_from_settings(settings)
self.tk.call(self._name, "theme", "settings", themename, script)
def theme_names(self):
"""Returns a list of all known themes."""
return self.tk.splitlist(self.tk.call(self._name, "theme", "names"))
def theme_use(self, themename=None):
"""If themename is None, returns the theme in use, otherwise, set
the current theme to themename, refreshes all widgets and emits
a <<ThemeChanged>> event."""
if themename is None:
# Starting on Tk 8.6, checking this global is no longer needed
# since it allows doing self.tk.call(self._name, "theme", "use")
return self.tk.eval("return $ttk::currentTheme")
# using "ttk::setTheme" instead of "ttk::style theme use" causes
# the variable currentTheme to be updated, also, ttk::setTheme calls
# "ttk::style theme use" in order to change theme.
self.tk.call("ttk::setTheme", themename)
class Widget(tkinter.Widget):
"""Base class for Tk themed widgets."""
def __init__(self, master, widgetname, kw=None):
"""Constructs a Ttk Widget with the parent master.
STANDARD OPTIONS
class, cursor, takefocus, style
SCROLLABLE WIDGET OPTIONS
xscrollcommand, yscrollcommand
LABEL WIDGET OPTIONS
text, textvariable, underline, image, compound, width
WIDGET STATES
active, disabled, focus, pressed, selected, background,
readonly, alternate, invalid
"""
master = setup_master(master)
if not getattr(master, '_tile_loaded', False):
# Load tile now, if needed
_load_tile(master)
tkinter.Widget.__init__(self, master, widgetname, kw=kw)
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the empty
string if the point does not lie within any element.
x and y are pixel coordinates relative to the widget."""
return self.tk.call(self._w, "identify", x, y)
def instate(self, statespec, callback=None, *args, **kw):
"""Test the widget's state.
If callback is not specified, returns True if the widget state
matches statespec and False otherwise. If callback is specified,
then it will be invoked with *args, **kw if the widget state
matches statespec. statespec is expected to be a sequence."""
ret = self.tk.getboolean(
self.tk.call(self._w, "instate", ' '.join(statespec)))
if ret and callback:
return callback(*args, **kw)
return bool(ret)
def state(self, statespec=None):
"""Modify or inquire widget state.
Widget state is returned if statespec is None, otherwise it is
set according to the statespec flags and then a new state spec
is returned indicating which flags were changed. statespec is
expected to be a sequence."""
if statespec is not None:
statespec = ' '.join(statespec)
return self.tk.splitlist(str(self.tk.call(self._w, "state", statespec)))
class Button(Widget):
"""Ttk Button widget, displays a textual label and/or image, and
evaluates a command when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Button widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, default, width
"""
Widget.__init__(self, master, "ttk::button", kw)
def invoke(self):
"""Invokes the command associated with the button."""
return self.tk.call(self._w, "invoke")
class Checkbutton(Widget):
"""Ttk Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Checkbutton widget with the parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, offvalue, onvalue, variable
"""
Widget.__init__(self, master, "ttk::checkbutton", kw)
def invoke(self):
"""Toggles between the selected and deselected states and
invokes the associated command. If the widget is currently
selected, sets the option variable to the offvalue option
and deselects the widget; otherwise, sets the option variable
to the option onvalue.
Returns the result of the associated command."""
return self.tk.call(self._w, "invoke")
class Entry(Widget, tkinter.Entry):
"""Ttk Entry widget displays a one-line text string and allows that
string to be edited by the user."""
def __init__(self, master=None, widget=None, **kw):
"""Constructs a Ttk Entry widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand
WIDGET-SPECIFIC OPTIONS
exportselection, invalidcommand, justify, show, state,
textvariable, validate, validatecommand, width
VALIDATION MODES
none, key, focus, focusin, focusout, all
"""
Widget.__init__(self, master, widget or "ttk::entry", kw)
def bbox(self, index):
"""Return a tuple of (x, y, width, height) which describes the
bounding box of the character given by index."""
return self._getints(self.tk.call(self._w, "bbox", index))
def identify(self, x, y):
"""Returns the name of the element at position x, y, or the
empty string if the coordinates are outside the window."""
return self.tk.call(self._w, "identify", x, y)
def validate(self):
"""Force revalidation, independent of the conditions specified
by the validate option. Returns False if validation fails, True
if it succeeds. Sets or clears the invalid state accordingly."""
return bool(self.tk.getboolean(self.tk.call(self._w, "validate")))
class Combobox(Entry):
"""Ttk Combobox widget combines a text field with a pop-down list of
values."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Combobox widget with the parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
exportselection, justify, height, postcommand, state,
textvariable, values, width
"""
Entry.__init__(self, master, "ttk::combobox", **kw)
def current(self, newindex=None):
"""If newindex is supplied, sets the combobox value to the
element at position newindex in the list of values. Otherwise,
returns the index of the current value in the list of values
or -1 if the current value does not appear in the list."""
if newindex is None:
return self.tk.getint(self.tk.call(self._w, "current"))
return self.tk.call(self._w, "current", newindex)
def set(self, value):
"""Sets the value of the combobox to value."""
self.tk.call(self._w, "set", value)
class Frame(Widget):
"""Ttk Frame widget is a container, used to group other widgets
together."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Frame with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
borderwidth, relief, padding, width, height
"""
Widget.__init__(self, master, "ttk::frame", kw)
class Label(Widget):
"""Ttk Label widget displays a textual label and/or image."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Label with parent master.
STANDARD OPTIONS
class, compound, cursor, image, style, takefocus, text,
textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
anchor, background, font, foreground, justify, padding,
relief, text, wraplength
"""
Widget.__init__(self, master, "ttk::label", kw)
class Labelframe(Widget):
"""Ttk Labelframe widget is a container used to group other widgets
together. It has an optional label, which may be a plain text string
or another widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Labelframe with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
labelanchor, text, underline, padding, labelwidget, width,
height
"""
Widget.__init__(self, master, "ttk::labelframe", kw)
LabelFrame = Labelframe # tkinter name compatibility
class Menubutton(Widget):
"""Ttk Menubutton widget displays a textual label and/or image, and
displays a menu when pressed."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Menubutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
direction, menu
"""
Widget.__init__(self, master, "ttk::menubutton", kw)
class Notebook(Widget):
"""Ttk Notebook widget manages a collection of windows and displays
a single one at a time. Each child window is associated with a tab,
which the user may select to change the currently-displayed window."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Notebook with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
height, padding, width
TAB OPTIONS
state, sticky, padding, text, image, compound, underline
TAB IDENTIFIERS (tab_id)
The tab_id argument found in several methods may take any of
the following forms:
* An integer between zero and the number of tabs
* The name of a child window
* A positional specification of the form "@x,y", which
defines the tab
* The string "current", which identifies the
currently-selected tab
* The string "end", which returns the number of tabs (only
valid for method index)
"""
Widget.__init__(self, master, "ttk::notebook", kw)
def add(self, child, **kw):
"""Adds a new tab to the notebook.
If window is currently managed by the notebook but hidden, it is
restored to its previous position."""
self.tk.call(self._w, "add", child, *(_format_optdict(kw)))
def forget(self, tab_id):
"""Removes the tab specified by tab_id, unmaps and unmanages the
associated window."""
self.tk.call(self._w, "forget", tab_id)
def hide(self, tab_id):
"""Hides the tab specified by tab_id.
The tab will not be displayed, but the associated window remains
managed by the notebook and its configuration remembered. Hidden
tabs may be restored with the add command."""
self.tk.call(self._w, "hide", tab_id)
def identify(self, x, y):
"""Returns the name of the tab element at position x, y, or the
empty string if none."""
return self.tk.call(self._w, "identify", x, y)
def index(self, tab_id):
"""Returns the numeric index of the tab specified by tab_id, or
the total number of tabs if tab_id is the string "end"."""
return self.tk.getint(self.tk.call(self._w, "index", tab_id))
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified position.
pos is either the string end, an integer index, or the name of
a managed child. If child is already managed by the notebook,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def select(self, tab_id=None):
"""Selects the specified tab.
The associated child window will be displayed, and the
previously-selected window (if different) is unmapped. If tab_id
is omitted, returns the widget name of the currently selected
pane."""
return self.tk.call(self._w, "select", tab_id)
def tab(self, tab_id, option=None, **kw):
"""Query or modify the options of the specific tab_id.
If kw is not given, returns a dict of the tab option values. If option
is specified, returns the value of that option. Otherwise, sets the
options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "tab", tab_id)
def tabs(self):
"""Returns a list of windows managed by the notebook."""
return self.tk.splitlist(self.tk.call(self._w, "tabs") or ())
def enable_traversal(self):
"""Enable keyboard traversal for a toplevel window containing
this notebook.
This will extend the bindings for the toplevel window containing
this notebook as follows:
Control-Tab: selects the tab following the currently selected
one
Shift-Control-Tab: selects the tab preceding the currently
selected one
Alt-K: where K is the mnemonic (underlined) character of any
tab, will select that tab.
Multiple notebooks in a single toplevel may be enabled for
traversal, including nested notebooks. However, notebook traversal
only works properly if all panes are direct children of the
notebook."""
# The only, and good, difference I see is about mnemonics, which works
# after calling this method. Control-Tab and Shift-Control-Tab always
# works (here at least).
self.tk.call("ttk::notebook::enableTraversal", self._w)
class Panedwindow(Widget, tkinter.PanedWindow):
"""Ttk Panedwindow widget displays a number of subwindows, stacked
either vertically or horizontally."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Panedwindow with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, width, height
PANE OPTIONS
weight
"""
Widget.__init__(self, master, "ttk::panedwindow", kw)
forget = tkinter.PanedWindow.forget # overrides Pack.forget
def insert(self, pos, child, **kw):
"""Inserts a pane at the specified positions.
pos is either the string end, and integer index, or the name
of a child. If child is already managed by the paned window,
moves it to the specified position."""
self.tk.call(self._w, "insert", pos, child, *(_format_optdict(kw)))
def pane(self, pane, option=None, **kw):
"""Query or modify the options of the specified pane.
pane is either an integer index or the name of a managed subwindow.
If kw is not given, returns a dict of the pane option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "pane", pane)
def sashpos(self, index, newpos=None):
"""If newpos is specified, sets the position of sash number index.
May adjust the positions of adjacent sashes to ensure that
positions are monotonically increasing. Sash positions are further
constrained to be between 0 and the total size of the widget.
Returns the new position of sash number index."""
return self.tk.getint(self.tk.call(self._w, "sashpos", index, newpos))
PanedWindow = Panedwindow # tkinter name compatibility
class Progressbar(Widget):
"""Ttk Progressbar widget shows the status of a long-running
operation. They can operate in two modes: determinate mode shows the
amount completed relative to the total amount of work to be done, and
indeterminate mode provides an animated display to let the user know
that something is happening."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Progressbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient, length, mode, maximum, value, variable, phase
"""
Widget.__init__(self, master, "ttk::progressbar", kw)
def start(self, interval=None):
"""Begin autoincrement mode: schedules a recurring timer event
that calls method step every interval milliseconds.
interval defaults to 50 milliseconds (20 steps/second) if ommited."""
self.tk.call(self._w, "start", interval)
def step(self, amount=None):
"""Increments the value option by amount.
amount defaults to 1.0 if omitted."""
self.tk.call(self._w, "step", amount)
def stop(self):
"""Stop autoincrement mode: cancels any recurring timer event
initiated by start."""
self.tk.call(self._w, "stop")
class Radiobutton(Widget):
"""Ttk Radiobutton widgets are used in groups to show or change a
set of mutually-exclusive options."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Radiobutton with parent master.
STANDARD OPTIONS
class, compound, cursor, image, state, style, takefocus,
text, textvariable, underline, width
WIDGET-SPECIFIC OPTIONS
command, value, variable
"""
Widget.__init__(self, master, "ttk::radiobutton", kw)
def invoke(self):
"""Sets the option variable to the option value, selects the
widget, and invokes the associated command.
Returns the result of the command, or an empty string if
no command is specified."""
return self.tk.call(self._w, "invoke")
class Scale(Widget, tkinter.Scale):
"""Ttk Scale widget is typically used to control the numeric value of
a linked variable that varies uniformly over some range."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scale with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, from, length, orient, to, value, variable
"""
Widget.__init__(self, master, "ttk::scale", kw)
def configure(self, cnf=None, **kw):
"""Modify or query scale options.
Setting a value for any of the "from", "from_" or "to" options
generates a <<RangeChanged>> event."""
if cnf:
kw.update(cnf)
Widget.configure(self, **kw)
if any(['from' in kw, 'from_' in kw, 'to' in kw]):
self.event_generate('<<RangeChanged>>')
def get(self, x=None, y=None):
"""Get the current value of the value option, or the value
corresponding to the coordinates x, y if they are specified.
x and y are pixel coordinates relative to the scale widget
origin."""
return self.tk.call(self._w, 'get', x, y)
class Scrollbar(Widget, tkinter.Scrollbar):
"""Ttk Scrollbar controls the viewport of a scrollable widget."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Scrollbar with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
command, orient
"""
Widget.__init__(self, master, "ttk::scrollbar", kw)
class Separator(Widget):
"""Ttk Separator widget displays a horizontal or vertical separator
bar."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Separator with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus
WIDGET-SPECIFIC OPTIONS
orient
"""
Widget.__init__(self, master, "ttk::separator", kw)
class Sizegrip(Widget):
"""Ttk Sizegrip allows the user to resize the containing toplevel
window by pressing and dragging the grip."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Sizegrip with parent master.
STANDARD OPTIONS
class, cursor, state, style, takefocus
"""
Widget.__init__(self, master, "ttk::sizegrip", kw)
class Treeview(Widget, tkinter.XView, tkinter.YView):
"""Ttk Treeview widget displays a hierarchical collection of items.
Each item has a textual label, an optional image, and an optional list
of data values. The data values are displayed in successive columns
after the tree label."""
def __init__(self, master=None, **kw):
"""Construct a Ttk Treeview with parent master.
STANDARD OPTIONS
class, cursor, style, takefocus, xscrollcommand,
yscrollcommand
WIDGET-SPECIFIC OPTIONS
columns, displaycolumns, height, padding, selectmode, show
ITEM OPTIONS
text, image, values, open, tags
TAG OPTIONS
foreground, background, font, image
"""
Widget.__init__(self, master, "ttk::treeview", kw)
def bbox(self, item, column=None):
"""Returns the bounding box (relative to the treeview widget's
window) of the specified item in the form x y width height.
If column is specified, returns the bounding box of that cell.
If the item is not visible (i.e., if it is a descendant of a
closed item or is scrolled offscreen), returns an empty string."""
return self._getints(self.tk.call(self._w, "bbox", item, column)) or ''
def get_children(self, item=None):
"""Returns a tuple of children belonging to item.
If item is not specified, returns root children."""
return self.tk.splitlist(
self.tk.call(self._w, "children", item or '') or ())
def set_children(self, item, *newchildren):
"""Replaces item's child with newchildren.
Children present in item that are not present in newchildren
are detached from tree. No items in newchildren may be an
ancestor of item."""
self.tk.call(self._w, "children", item, newchildren)
def column(self, column, option=None, **kw):
"""Query or modify the options for the specified column.
If kw is not given, returns a dict of the column option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "column", column)
def delete(self, *items):
"""Delete all specified items and all their descendants. The root
item may not be deleted."""
self.tk.call(self._w, "delete", items)
def detach(self, *items):
"""Unlinks all of the specified items from the tree.
The items and all of their descendants are still present, and may
be reinserted at another point in the tree, but will not be
displayed. The root item may not be detached."""
self.tk.call(self._w, "detach", items)
def exists(self, item):
"""Returns True if the specified item is present in the tree,
False otherwise."""
return bool(self.tk.getboolean(self.tk.call(self._w, "exists", item)))
def focus(self, item=None):
"""If item is specified, sets the focus item to item. Otherwise,
returns the current focus item, or '' if there is none."""
return self.tk.call(self._w, "focus", item)
def heading(self, column, option=None, **kw):
"""Query or modify the heading options for the specified column.
If kw is not given, returns a dict of the heading option values. If
option is specified then the value for that option is returned.
Otherwise, sets the options to the corresponding values.
Valid options/values are:
text: text
The text to display in the column heading
image: image_name
Specifies an image to display to the right of the column
heading
anchor: anchor
Specifies how the heading text should be aligned. One of
the standard Tk anchor values
command: callback
A callback to be invoked when the heading label is
pressed.
To configure the tree column heading, call this with column = "#0" """
cmd = kw.get('command')
if cmd and not isinstance(cmd, str):
# callback not registered yet, do it now
kw['command'] = self.master.register(cmd, self._substitute)
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, 'heading', column)
def identify(self, component, x, y):
"""Returns a description of the specified component under the
point given by x and y, or the empty string if no such component
is present at that position."""
return self.tk.call(self._w, "identify", component, x, y)
def identify_row(self, y):
"""Returns the item ID of the item at position y."""
return self.identify("row", 0, y)
def identify_column(self, x):
"""Returns the data column identifier of the cell at position x.
The tree column has ID #0."""
return self.identify("column", x, 0)
def identify_region(self, x, y):
"""Returns one of:
heading: Tree heading area.
separator: Space between two columns headings;
tree: The tree area.
cell: A data cell.
* Availability: Tk 8.6"""
return self.identify("region", x, y)
def identify_element(self, x, y):
"""Returns the element at position x, y.
* Availability: Tk 8.6"""
return self.identify("element", x, y)
def index(self, item):
"""Returns the integer index of item within its parent's list
of children."""
return self.tk.getint(self.tk.call(self._w, "index", item))
def insert(self, parent, index, iid=None, **kw):
"""Creates a new item and return the item identifier of the newly
created item.
parent is the item ID of the parent item, or the empty string
to create a new top-level item. index is an integer, or the value
end, specifying where in the list of parent's children to insert
the new item. If index is less than or equal to zero, the new node
is inserted at the beginning, if index is greater than or equal to
the current number of children, it is inserted at the end. If iid
is specified, it is used as the item identifier, iid must not
already exist in the tree. Otherwise, a new unique identifier
is generated."""
opts = _format_optdict(kw)
if iid:
res = self.tk.call(self._w, "insert", parent, index,
"-id", iid, *opts)
else:
res = self.tk.call(self._w, "insert", parent, index, *opts)
return res
def item(self, item, option=None, **kw):
"""Query or modify the options for the specified item.
If no options are given, a dict with options/values for the item
is returned. If option is specified then the value for that option
is returned. Otherwise, sets the options to the corresponding
values as given by kw."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "item", item)
def move(self, item, parent, index):
"""Moves item to position index in parent's list of children.
It is illegal to move an item under one of its descendants. If
index is less than or equal to zero, item is moved to the
beginning, if greater than or equal to the number of children,
it is moved to the end. If item was detached it is reattached."""
self.tk.call(self._w, "move", item, parent, index)
reattach = move # A sensible method name for reattaching detached items
def next(self, item):
"""Returns the identifier of item's next sibling, or '' if item
is the last child of its parent."""
return self.tk.call(self._w, "next", item)
def parent(self, item):
"""Returns the ID of the parent of item, or '' if item is at the
top level of the hierarchy."""
return self.tk.call(self._w, "parent", item)
def prev(self, item):
"""Returns the identifier of item's previous sibling, or '' if
item is the first child of its parent."""
return self.tk.call(self._w, "prev", item)
def see(self, item):
"""Ensure that item is visible.
Sets all of item's ancestors open option to True, and scrolls
the widget if necessary so that item is within the visible
portion of the tree."""
self.tk.call(self._w, "see", item)
def selection(self, selop=None, items=None):
"""If selop is not specified, returns selected items."""
return self.tk.call(self._w, "selection", selop, items)
def selection_set(self, items):
"""items becomes the new selection."""
self.selection("set", items)
def selection_add(self, items):
"""Add items to the selection."""
self.selection("add", items)
def selection_remove(self, items):
"""Remove items from the selection."""
self.selection("remove", items)
def selection_toggle(self, items):
"""Toggle the selection state of each item in items."""
self.selection("toggle", items)
def set(self, item, column=None, value=None):
"""Query or set the value of given item.
With one argument, return a dictionary of column/value pairs
for the specified item. With two arguments, return the current
value of the specified column. With three arguments, set the
value of given column in given item to the specified value."""
res = self.tk.call(self._w, "set", item, column, value)
if column is None and value is None:
return _splitdict(self.tk, res,
cut_minus=False, conv=_tclobj_to_py)
else:
return res
def tag_bind(self, tagname, sequence=None, callback=None):
"""Bind a callback for the given event sequence to the tag tagname.
When an event is delivered to an item, the callbacks for each
of the item's tags option are called."""
self._bind((self._w, "tag", "bind", tagname), sequence, callback, add=0)
def tag_configure(self, tagname, option=None, **kw):
"""Query or modify the options for the specified tagname.
If kw is not given, returns a dict of the option settings for tagname.
If option is specified, returns the value for that option for the
specified tagname. Otherwise, sets the options to the corresponding
values for the given tagname."""
if option is not None:
kw[option] = None
return _val_or_dict(self.tk, kw, self._w, "tag", "configure",
tagname)
def tag_has(self, tagname, item=None):
"""If item is specified, returns 1 or 0 depending on whether the
specified item has the given tagname. Otherwise, returns a list of
all items which have the specified tag.
* Availability: Tk 8.6"""
return self.tk.getboolean(
self.tk.call(self._w, "tag", "has", tagname, item))
# Extensions
class LabeledScale(Frame):
"""A Ttk Scale widget with a Ttk Label widget indicating its
current value.
The Ttk Scale can be accessed through instance.scale, and Ttk Label
can be accessed through instance.label"""
def __init__(self, master=None, variable=None, from_=0, to=10, **kw):
"""Construct an horizontal LabeledScale with parent master, a
variable to be associated with the Ttk Scale widget and its range.
If variable is not specified, a tkinter.IntVar is created.
WIDGET-SPECIFIC OPTIONS
compound: 'top' or 'bottom'
Specifies how to display the label relative to the scale.
Defaults to 'top'.
"""
self._label_top = kw.pop('compound', 'top') == 'top'
Frame.__init__(self, master, **kw)
self._variable = variable or tkinter.IntVar(master)
self._variable.set(from_)
self._last_valid = from_
self.label = Label(self)
self.scale = Scale(self, variable=self._variable, from_=from_, to=to)
self.scale.bind('<<RangeChanged>>', self._adjust)
# position scale and label according to the compound option
scale_side = 'bottom' if self._label_top else 'top'
label_side = 'top' if scale_side == 'bottom' else 'bottom'
self.scale.pack(side=scale_side, fill='x')
tmp = Label(self).pack(side=label_side) # place holder
self.label.place(anchor='n' if label_side == 'top' else 's')
# update the label as scale or variable changes
self.__tracecb = self._variable.trace_variable('w', self._adjust)
self.bind('<Configure>', self._adjust)
self.bind('<Map>', self._adjust)
def destroy(self):
"""Destroy this widget and possibly its associated variable."""
try:
self._variable.trace_vdelete('w', self.__tracecb)
except AttributeError:
# widget has been destroyed already
pass
else:
del self._variable
Frame.destroy(self)
def _adjust(self, *args):
"""Adjust the label position according to the scale."""
def adjust_label():
self.update_idletasks() # "force" scale redraw
x, y = self.scale.coords()
if self._label_top:
y = self.scale.winfo_y() - self.label.winfo_reqheight()
else:
y = self.scale.winfo_reqheight() + self.label.winfo_reqheight()
self.label.place_configure(x=x, y=y)
from_ = _to_number(self.scale['from'])
to = _to_number(self.scale['to'])
if to < from_:
from_, to = to, from_
newval = self._variable.get()
if not from_ <= newval <= to:
# value outside range, set value back to the last valid one
self.value = self._last_valid
return
self._last_valid = newval
self.label['text'] = newval
self.after_idle(adjust_label)
def _get_value(self):
"""Return current scale value."""
return self._variable.get()
def _set_value(self, val):
"""Set new scale value."""
self._variable.set(val)
value = property(_get_value, _set_value)
class OptionMenu(Menubutton):
"""Themed OptionMenu, based after tkinter's OptionMenu, which allows
the user to select a value from a menu."""
def __init__(self, master, variable, default=None, *values, **kwargs):
"""Construct a themed OptionMenu widget with master as the parent,
the resource textvariable set to variable, the initially selected
value specified by the default parameter, the menu values given by
*values and additional keywords.
WIDGET-SPECIFIC OPTIONS
style: stylename
Menubutton style.
direction: 'above', 'below', 'left', 'right', or 'flush'
Menubutton direction.
command: callback
A callback that will be invoked after selecting an item.
"""
kw = {'textvariable': variable, 'style': kwargs.pop('style', None),
'direction': kwargs.pop('direction', None)}
Menubutton.__init__(self, master, **kw)
self['menu'] = tkinter.Menu(self, tearoff=False)
self._variable = variable
self._callback = kwargs.pop('command', None)
if kwargs:
raise tkinter.TclError('unknown option -%s' % (
next(iter(kwargs.keys()))))
self.set_menu(default, *values)
def __getitem__(self, item):
if item == 'menu':
return self.nametowidget(Menubutton.__getitem__(self, item))
return Menubutton.__getitem__(self, item)
def set_menu(self, default=None, *values):
"""Build a new menu of radiobuttons with *values and optionally
a default value."""
menu = self['menu']
menu.delete(0, 'end')
for val in values:
menu.add_radiobutton(label=val,
command=tkinter._setit(self._variable, val, self._callback))
if default:
self._variable.set(default)
def destroy(self):
"""Destroy this widget and its associated variable."""
del self._variable
Menubutton.destroy(self)
| lgpl-3.0 |
philsch/ansible | lib/ansible/modules/network/lenovo/cnos_reload.py | 59 | 4933 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to reload Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_reload
author: "Dave Kasberg (@dkasberg)"
short_description: Perform switch restart on devices running Lenovo CNOS
description:
- This module allows you to restart the switch using the current startup configuration.
The module is usually invoked after the running configuration has been saved over the startup configuration.
This module uses SSH to manage network device configuration.
The results of the operation can be viewed in results directory.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_reload.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options: {}
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_reload. These are written in the main.yml file of the tasks directory.
---
- name: Test Reload
cnos_reload:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_reload_{{ inventory_hostname }}_output.txt"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Device is Reloading. Please wait..."
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
cliCommand = "reload \n"
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Send the CLi command
output = output + cnos.waitForDeviceResponse(cliCommand, "(y/n):", 2, remote_conn)
# Send the Confirmation y
output = output + cnos.waitForDeviceResponse("y\n", "#", 2, remote_conn)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
errorMsg = cnos.checkOutputForError(output)
errorMsg = None
if(errorMsg is None):
module.exit_json(changed=True, msg="Device is Reloading. Please wait...")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
fyfcauc/android_external_chromium-org | remoting/tools/build/remoting_localize.py | 48 | 26864 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
localize.py -- Generates an output file from the given template replacing
variables and localizing strings.
The script uses Jinja2 template processing library (src/third_party/jinja2).
Variables available to the templates:
- |languages| - the list of languages passed on the command line. ('-l').
- Each NAME=VALUE define ('-d') can be accesses as {{ NAME }}.
- |official_build| is set to '1' when CHROME_BUILD_TYPE environment variable
is set to "_official".
Filters:
- GetCodepage - returns the code page for the given language.
- GetCodepageDecimal same as GetCodepage, but returns a decimal value.
- GetLangId - returns Win32 LANGID.
- GetPrimaryLanguage - returns a named Win32 constant specifing the primary
language ID.
- GetSublanguage - returns a named Win32 constant specifing the sublanguage
ID.
Globals:
- IsRtlLanguage(language) - returns True if the language is right-to-left.
- SelectLanguage(language) - allows to select the language to the used by
{% trans %}{% endtrans %} statements.
"""
import io
import json
from optparse import OptionParser
import os
import sys
from string import Template
# Win32 primary languages IDs.
_LANGUAGE_PRIMARY = {
'LANG_NEUTRAL' : 0x00,
'LANG_INVARIANT' : 0x7f,
'LANG_AFRIKAANS' : 0x36,
'LANG_ALBANIAN' : 0x1c,
'LANG_ALSATIAN' : 0x84,
'LANG_AMHARIC' : 0x5e,
'LANG_ARABIC' : 0x01,
'LANG_ARMENIAN' : 0x2b,
'LANG_ASSAMESE' : 0x4d,
'LANG_AZERI' : 0x2c,
'LANG_BASHKIR' : 0x6d,
'LANG_BASQUE' : 0x2d,
'LANG_BELARUSIAN' : 0x23,
'LANG_BENGALI' : 0x45,
'LANG_BRETON' : 0x7e,
'LANG_BOSNIAN' : 0x1a,
'LANG_BULGARIAN' : 0x02,
'LANG_CATALAN' : 0x03,
'LANG_CHINESE' : 0x04,
'LANG_CORSICAN' : 0x83,
'LANG_CROATIAN' : 0x1a,
'LANG_CZECH' : 0x05,
'LANG_DANISH' : 0x06,
'LANG_DARI' : 0x8c,
'LANG_DIVEHI' : 0x65,
'LANG_DUTCH' : 0x13,
'LANG_ENGLISH' : 0x09,
'LANG_ESTONIAN' : 0x25,
'LANG_FAEROESE' : 0x38,
'LANG_FILIPINO' : 0x64,
'LANG_FINNISH' : 0x0b,
'LANG_FRENCH' : 0x0c,
'LANG_FRISIAN' : 0x62,
'LANG_GALICIAN' : 0x56,
'LANG_GEORGIAN' : 0x37,
'LANG_GERMAN' : 0x07,
'LANG_GREEK' : 0x08,
'LANG_GREENLANDIC' : 0x6f,
'LANG_GUJARATI' : 0x47,
'LANG_HAUSA' : 0x68,
'LANG_HEBREW' : 0x0d,
'LANG_HINDI' : 0x39,
'LANG_HUNGARIAN' : 0x0e,
'LANG_ICELANDIC' : 0x0f,
'LANG_IGBO' : 0x70,
'LANG_INDONESIAN' : 0x21,
'LANG_INUKTITUT' : 0x5d,
'LANG_IRISH' : 0x3c,
'LANG_ITALIAN' : 0x10,
'LANG_JAPANESE' : 0x11,
'LANG_KANNADA' : 0x4b,
'LANG_KASHMIRI' : 0x60,
'LANG_KAZAK' : 0x3f,
'LANG_KHMER' : 0x53,
'LANG_KICHE' : 0x86,
'LANG_KINYARWANDA' : 0x87,
'LANG_KONKANI' : 0x57,
'LANG_KOREAN' : 0x12,
'LANG_KYRGYZ' : 0x40,
'LANG_LAO' : 0x54,
'LANG_LATVIAN' : 0x26,
'LANG_LITHUANIAN' : 0x27,
'LANG_LOWER_SORBIAN' : 0x2e,
'LANG_LUXEMBOURGISH' : 0x6e,
'LANG_MACEDONIAN' : 0x2f,
'LANG_MALAY' : 0x3e,
'LANG_MALAYALAM' : 0x4c,
'LANG_MALTESE' : 0x3a,
'LANG_MANIPURI' : 0x58,
'LANG_MAORI' : 0x81,
'LANG_MAPUDUNGUN' : 0x7a,
'LANG_MARATHI' : 0x4e,
'LANG_MOHAWK' : 0x7c,
'LANG_MONGOLIAN' : 0x50,
'LANG_NEPALI' : 0x61,
'LANG_NORWEGIAN' : 0x14,
'LANG_OCCITAN' : 0x82,
'LANG_ORIYA' : 0x48,
'LANG_PASHTO' : 0x63,
'LANG_PERSIAN' : 0x29,
'LANG_POLISH' : 0x15,
'LANG_PORTUGUESE' : 0x16,
'LANG_PUNJABI' : 0x46,
'LANG_QUECHUA' : 0x6b,
'LANG_ROMANIAN' : 0x18,
'LANG_ROMANSH' : 0x17,
'LANG_RUSSIAN' : 0x19,
'LANG_SAMI' : 0x3b,
'LANG_SANSKRIT' : 0x4f,
'LANG_SCOTTISH_GAELIC' : 0x91,
'LANG_SERBIAN' : 0x1a,
'LANG_SINDHI' : 0x59,
'LANG_SINHALESE' : 0x5b,
'LANG_SLOVAK' : 0x1b,
'LANG_SLOVENIAN' : 0x24,
'LANG_SOTHO' : 0x6c,
'LANG_SPANISH' : 0x0a,
'LANG_SWAHILI' : 0x41,
'LANG_SWEDISH' : 0x1d,
'LANG_SYRIAC' : 0x5a,
'LANG_TAJIK' : 0x28,
'LANG_TAMAZIGHT' : 0x5f,
'LANG_TAMIL' : 0x49,
'LANG_TATAR' : 0x44,
'LANG_TELUGU' : 0x4a,
'LANG_THAI' : 0x1e,
'LANG_TIBETAN' : 0x51,
'LANG_TIGRIGNA' : 0x73,
'LANG_TSWANA' : 0x32,
'LANG_TURKISH' : 0x1f,
'LANG_TURKMEN' : 0x42,
'LANG_UIGHUR' : 0x80,
'LANG_UKRAINIAN' : 0x22,
'LANG_UPPER_SORBIAN' : 0x2e,
'LANG_URDU' : 0x20,
'LANG_UZBEK' : 0x43,
'LANG_VIETNAMESE' : 0x2a,
'LANG_WELSH' : 0x52,
'LANG_WOLOF' : 0x88,
'LANG_XHOSA' : 0x34,
'LANG_YAKUT' : 0x85,
'LANG_YI' : 0x78,
'LANG_YORUBA' : 0x6a,
'LANG_ZULU' : 0x35,
}
# Win32 sublanguage IDs.
_LANGUAGE_SUB = {
'SUBLANG_NEUTRAL' : 0x00,
'SUBLANG_DEFAULT' : 0x01,
'SUBLANG_SYS_DEFAULT' : 0x02,
'SUBLANG_CUSTOM_DEFAULT' : 0x03,
'SUBLANG_CUSTOM_UNSPECIFIED' : 0x04,
'SUBLANG_UI_CUSTOM_DEFAULT' : 0x05,
'SUBLANG_AFRIKAANS_SOUTH_AFRICA' : 0x01,
'SUBLANG_ALBANIAN_ALBANIA' : 0x01,
'SUBLANG_ALSATIAN_FRANCE' : 0x01,
'SUBLANG_AMHARIC_ETHIOPIA' : 0x01,
'SUBLANG_ARABIC_SAUDI_ARABIA' : 0x01,
'SUBLANG_ARABIC_IRAQ' : 0x02,
'SUBLANG_ARABIC_EGYPT' : 0x03,
'SUBLANG_ARABIC_LIBYA' : 0x04,
'SUBLANG_ARABIC_ALGERIA' : 0x05,
'SUBLANG_ARABIC_MOROCCO' : 0x06,
'SUBLANG_ARABIC_TUNISIA' : 0x07,
'SUBLANG_ARABIC_OMAN' : 0x08,
'SUBLANG_ARABIC_YEMEN' : 0x09,
'SUBLANG_ARABIC_SYRIA' : 0x0a,
'SUBLANG_ARABIC_JORDAN' : 0x0b,
'SUBLANG_ARABIC_LEBANON' : 0x0c,
'SUBLANG_ARABIC_KUWAIT' : 0x0d,
'SUBLANG_ARABIC_UAE' : 0x0e,
'SUBLANG_ARABIC_BAHRAIN' : 0x0f,
'SUBLANG_ARABIC_QATAR' : 0x10,
'SUBLANG_ARMENIAN_ARMENIA' : 0x01,
'SUBLANG_ASSAMESE_INDIA' : 0x01,
'SUBLANG_AZERI_LATIN' : 0x01,
'SUBLANG_AZERI_CYRILLIC' : 0x02,
'SUBLANG_BASHKIR_RUSSIA' : 0x01,
'SUBLANG_BASQUE_BASQUE' : 0x01,
'SUBLANG_BELARUSIAN_BELARUS' : 0x01,
'SUBLANG_BENGALI_INDIA' : 0x01,
'SUBLANG_BENGALI_BANGLADESH' : 0x02,
'SUBLANG_BOSNIAN_BOSNIA_HERZEGOVINA_LATIN' : 0x05,
'SUBLANG_BOSNIAN_BOSNIA_HERZEGOVINA_CYRILLIC' : 0x08,
'SUBLANG_BRETON_FRANCE' : 0x01,
'SUBLANG_BULGARIAN_BULGARIA' : 0x01,
'SUBLANG_CATALAN_CATALAN' : 0x01,
'SUBLANG_CHINESE_TRADITIONAL' : 0x01,
'SUBLANG_CHINESE_SIMPLIFIED' : 0x02,
'SUBLANG_CHINESE_HONGKONG' : 0x03,
'SUBLANG_CHINESE_SINGAPORE' : 0x04,
'SUBLANG_CHINESE_MACAU' : 0x05,
'SUBLANG_CORSICAN_FRANCE' : 0x01,
'SUBLANG_CZECH_CZECH_REPUBLIC' : 0x01,
'SUBLANG_CROATIAN_CROATIA' : 0x01,
'SUBLANG_CROATIAN_BOSNIA_HERZEGOVINA_LATIN' : 0x04,
'SUBLANG_DANISH_DENMARK' : 0x01,
'SUBLANG_DARI_AFGHANISTAN' : 0x01,
'SUBLANG_DIVEHI_MALDIVES' : 0x01,
'SUBLANG_DUTCH' : 0x01,
'SUBLANG_DUTCH_BELGIAN' : 0x02,
'SUBLANG_ENGLISH_US' : 0x01,
'SUBLANG_ENGLISH_UK' : 0x02,
'SUBLANG_ENGLISH_AUS' : 0x03,
'SUBLANG_ENGLISH_CAN' : 0x04,
'SUBLANG_ENGLISH_NZ' : 0x05,
'SUBLANG_ENGLISH_EIRE' : 0x06,
'SUBLANG_ENGLISH_SOUTH_AFRICA' : 0x07,
'SUBLANG_ENGLISH_JAMAICA' : 0x08,
'SUBLANG_ENGLISH_CARIBBEAN' : 0x09,
'SUBLANG_ENGLISH_BELIZE' : 0x0a,
'SUBLANG_ENGLISH_TRINIDAD' : 0x0b,
'SUBLANG_ENGLISH_ZIMBABWE' : 0x0c,
'SUBLANG_ENGLISH_PHILIPPINES' : 0x0d,
'SUBLANG_ENGLISH_INDIA' : 0x10,
'SUBLANG_ENGLISH_MALAYSIA' : 0x11,
'SUBLANG_ENGLISH_SINGAPORE' : 0x12,
'SUBLANG_ESTONIAN_ESTONIA' : 0x01,
'SUBLANG_FAEROESE_FAROE_ISLANDS' : 0x01,
'SUBLANG_FILIPINO_PHILIPPINES' : 0x01,
'SUBLANG_FINNISH_FINLAND' : 0x01,
'SUBLANG_FRENCH' : 0x01,
'SUBLANG_FRENCH_BELGIAN' : 0x02,
'SUBLANG_FRENCH_CANADIAN' : 0x03,
'SUBLANG_FRENCH_SWISS' : 0x04,
'SUBLANG_FRENCH_LUXEMBOURG' : 0x05,
'SUBLANG_FRENCH_MONACO' : 0x06,
'SUBLANG_FRISIAN_NETHERLANDS' : 0x01,
'SUBLANG_GALICIAN_GALICIAN' : 0x01,
'SUBLANG_GEORGIAN_GEORGIA' : 0x01,
'SUBLANG_GERMAN' : 0x01,
'SUBLANG_GERMAN_SWISS' : 0x02,
'SUBLANG_GERMAN_AUSTRIAN' : 0x03,
'SUBLANG_GERMAN_LUXEMBOURG' : 0x04,
'SUBLANG_GERMAN_LIECHTENSTEIN' : 0x05,
'SUBLANG_GREEK_GREECE' : 0x01,
'SUBLANG_GREENLANDIC_GREENLAND' : 0x01,
'SUBLANG_GUJARATI_INDIA' : 0x01,
'SUBLANG_HAUSA_NIGERIA_LATIN' : 0x01,
'SUBLANG_HEBREW_ISRAEL' : 0x01,
'SUBLANG_HINDI_INDIA' : 0x01,
'SUBLANG_HUNGARIAN_HUNGARY' : 0x01,
'SUBLANG_ICELANDIC_ICELAND' : 0x01,
'SUBLANG_IGBO_NIGERIA' : 0x01,
'SUBLANG_INDONESIAN_INDONESIA' : 0x01,
'SUBLANG_INUKTITUT_CANADA' : 0x01,
'SUBLANG_INUKTITUT_CANADA_LATIN' : 0x02,
'SUBLANG_IRISH_IRELAND' : 0x02,
'SUBLANG_ITALIAN' : 0x01,
'SUBLANG_ITALIAN_SWISS' : 0x02,
'SUBLANG_JAPANESE_JAPAN' : 0x01,
'SUBLANG_KANNADA_INDIA' : 0x01,
'SUBLANG_KASHMIRI_SASIA' : 0x02,
'SUBLANG_KASHMIRI_INDIA' : 0x02,
'SUBLANG_KAZAK_KAZAKHSTAN' : 0x01,
'SUBLANG_KHMER_CAMBODIA' : 0x01,
'SUBLANG_KICHE_GUATEMALA' : 0x01,
'SUBLANG_KINYARWANDA_RWANDA' : 0x01,
'SUBLANG_KONKANI_INDIA' : 0x01,
'SUBLANG_KOREAN' : 0x01,
'SUBLANG_KYRGYZ_KYRGYZSTAN' : 0x01,
'SUBLANG_LAO_LAO' : 0x01,
'SUBLANG_LATVIAN_LATVIA' : 0x01,
'SUBLANG_LITHUANIAN' : 0x01,
'SUBLANG_LOWER_SORBIAN_GERMANY' : 0x02,
'SUBLANG_LUXEMBOURGISH_LUXEMBOURG' : 0x01,
'SUBLANG_MACEDONIAN_MACEDONIA' : 0x01,
'SUBLANG_MALAY_MALAYSIA' : 0x01,
'SUBLANG_MALAY_BRUNEI_DARUSSALAM' : 0x02,
'SUBLANG_MALAYALAM_INDIA' : 0x01,
'SUBLANG_MALTESE_MALTA' : 0x01,
'SUBLANG_MAORI_NEW_ZEALAND' : 0x01,
'SUBLANG_MAPUDUNGUN_CHILE' : 0x01,
'SUBLANG_MARATHI_INDIA' : 0x01,
'SUBLANG_MOHAWK_MOHAWK' : 0x01,
'SUBLANG_MONGOLIAN_CYRILLIC_MONGOLIA' : 0x01,
'SUBLANG_MONGOLIAN_PRC' : 0x02,
'SUBLANG_NEPALI_INDIA' : 0x02,
'SUBLANG_NEPALI_NEPAL' : 0x01,
'SUBLANG_NORWEGIAN_BOKMAL' : 0x01,
'SUBLANG_NORWEGIAN_NYNORSK' : 0x02,
'SUBLANG_OCCITAN_FRANCE' : 0x01,
'SUBLANG_ORIYA_INDIA' : 0x01,
'SUBLANG_PASHTO_AFGHANISTAN' : 0x01,
'SUBLANG_PERSIAN_IRAN' : 0x01,
'SUBLANG_POLISH_POLAND' : 0x01,
'SUBLANG_PORTUGUESE' : 0x02,
'SUBLANG_PORTUGUESE_BRAZILIAN' : 0x01,
'SUBLANG_PUNJABI_INDIA' : 0x01,
'SUBLANG_QUECHUA_BOLIVIA' : 0x01,
'SUBLANG_QUECHUA_ECUADOR' : 0x02,
'SUBLANG_QUECHUA_PERU' : 0x03,
'SUBLANG_ROMANIAN_ROMANIA' : 0x01,
'SUBLANG_ROMANSH_SWITZERLAND' : 0x01,
'SUBLANG_RUSSIAN_RUSSIA' : 0x01,
'SUBLANG_SAMI_NORTHERN_NORWAY' : 0x01,
'SUBLANG_SAMI_NORTHERN_SWEDEN' : 0x02,
'SUBLANG_SAMI_NORTHERN_FINLAND' : 0x03,
'SUBLANG_SAMI_LULE_NORWAY' : 0x04,
'SUBLANG_SAMI_LULE_SWEDEN' : 0x05,
'SUBLANG_SAMI_SOUTHERN_NORWAY' : 0x06,
'SUBLANG_SAMI_SOUTHERN_SWEDEN' : 0x07,
'SUBLANG_SAMI_SKOLT_FINLAND' : 0x08,
'SUBLANG_SAMI_INARI_FINLAND' : 0x09,
'SUBLANG_SANSKRIT_INDIA' : 0x01,
'SUBLANG_SCOTTISH_GAELIC' : 0x01,
'SUBLANG_SERBIAN_BOSNIA_HERZEGOVINA_LATIN' : 0x06,
'SUBLANG_SERBIAN_BOSNIA_HERZEGOVINA_CYRILLIC' : 0x07,
'SUBLANG_SERBIAN_MONTENEGRO_LATIN' : 0x0b,
'SUBLANG_SERBIAN_MONTENEGRO_CYRILLIC' : 0x0c,
'SUBLANG_SERBIAN_SERBIA_LATIN' : 0x09,
'SUBLANG_SERBIAN_SERBIA_CYRILLIC' : 0x0a,
'SUBLANG_SERBIAN_CROATIA' : 0x01,
'SUBLANG_SERBIAN_LATIN' : 0x02,
'SUBLANG_SERBIAN_CYRILLIC' : 0x03,
'SUBLANG_SINDHI_INDIA' : 0x01,
'SUBLANG_SINDHI_PAKISTAN' : 0x02,
'SUBLANG_SINDHI_AFGHANISTAN' : 0x02,
'SUBLANG_SINHALESE_SRI_LANKA' : 0x01,
'SUBLANG_SOTHO_NORTHERN_SOUTH_AFRICA' : 0x01,
'SUBLANG_SLOVAK_SLOVAKIA' : 0x01,
'SUBLANG_SLOVENIAN_SLOVENIA' : 0x01,
'SUBLANG_SPANISH' : 0x01,
'SUBLANG_SPANISH_MEXICAN' : 0x02,
'SUBLANG_SPANISH_MODERN' : 0x03,
'SUBLANG_SPANISH_GUATEMALA' : 0x04,
'SUBLANG_SPANISH_COSTA_RICA' : 0x05,
'SUBLANG_SPANISH_PANAMA' : 0x06,
'SUBLANG_SPANISH_DOMINICAN_REPUBLIC' : 0x07,
'SUBLANG_SPANISH_VENEZUELA' : 0x08,
'SUBLANG_SPANISH_COLOMBIA' : 0x09,
'SUBLANG_SPANISH_PERU' : 0x0a,
'SUBLANG_SPANISH_ARGENTINA' : 0x0b,
'SUBLANG_SPANISH_ECUADOR' : 0x0c,
'SUBLANG_SPANISH_CHILE' : 0x0d,
'SUBLANG_SPANISH_URUGUAY' : 0x0e,
'SUBLANG_SPANISH_PARAGUAY' : 0x0f,
'SUBLANG_SPANISH_BOLIVIA' : 0x10,
'SUBLANG_SPANISH_EL_SALVADOR' : 0x11,
'SUBLANG_SPANISH_HONDURAS' : 0x12,
'SUBLANG_SPANISH_NICARAGUA' : 0x13,
'SUBLANG_SPANISH_PUERTO_RICO' : 0x14,
'SUBLANG_SPANISH_US' : 0x15,
'SUBLANG_SWAHILI_KENYA' : 0x01,
'SUBLANG_SWEDISH' : 0x01,
'SUBLANG_SWEDISH_FINLAND' : 0x02,
'SUBLANG_SYRIAC_SYRIA' : 0x01,
'SUBLANG_TAJIK_TAJIKISTAN' : 0x01,
'SUBLANG_TAMAZIGHT_ALGERIA_LATIN' : 0x02,
'SUBLANG_TAMIL_INDIA' : 0x01,
'SUBLANG_TATAR_RUSSIA' : 0x01,
'SUBLANG_TELUGU_INDIA' : 0x01,
'SUBLANG_THAI_THAILAND' : 0x01,
'SUBLANG_TIBETAN_PRC' : 0x01,
'SUBLANG_TIGRIGNA_ERITREA' : 0x02,
'SUBLANG_TSWANA_SOUTH_AFRICA' : 0x01,
'SUBLANG_TURKISH_TURKEY' : 0x01,
'SUBLANG_TURKMEN_TURKMENISTAN' : 0x01,
'SUBLANG_UIGHUR_PRC' : 0x01,
'SUBLANG_UKRAINIAN_UKRAINE' : 0x01,
'SUBLANG_UPPER_SORBIAN_GERMANY' : 0x01,
'SUBLANG_URDU_PAKISTAN' : 0x01,
'SUBLANG_URDU_INDIA' : 0x02,
'SUBLANG_UZBEK_LATIN' : 0x01,
'SUBLANG_UZBEK_CYRILLIC' : 0x02,
'SUBLANG_VIETNAMESE_VIETNAM' : 0x01,
'SUBLANG_WELSH_UNITED_KINGDOM' : 0x01,
'SUBLANG_WOLOF_SENEGAL' : 0x01,
'SUBLANG_XHOSA_SOUTH_AFRICA' : 0x01,
'SUBLANG_YAKUT_RUSSIA' : 0x01,
'SUBLANG_YI_PRC' : 0x01,
'SUBLANG_YORUBA_NIGERIA' : 0x01,
'SUBLANG_ZULU_SOUTH_AFRICA' : 0x01,
}
'''
This dictionary defines the language lookup table. The key is the language ISO
country code, and the value specifies the corresponding code page, primary
language and sublanguage.
LCID resource: http://msdn.microsoft.com/en-us/library/ms776294.aspx
Codepage resource: http://www.science.co.il/language/locale-codes.asp
Language ID resource: http://msdn.microsoft.com/en-us/library/ms776294.aspx
There is no appropriate sublang for Spanish (Latin America) [es-419], so we
use Mexico. SUBLANG_DEFAULT would incorrectly map to Spain. Unlike other
Latin American countries, Mexican Spanish is supported by VERSIONINFO:
http://msdn.microsoft.com/en-us/library/aa381058.aspx
'''
_LANGUAGE_MAP = {
# Language neutral LCID, unicode(1200) code page.
'neutral' : [ 1200, 'LANG_NEUTRAL', 'SUBLANG_NEUTRAL' ],
# LANG_USER_DEFAULT LCID, unicode(1200) code page.
'userdefault' : [ 1200, 'LANG_NEUTRAL', 'SUBLANG_DEFAULT' ],
'fake-bidi' : [ 1255, 'LANG_HEBREW', 'SUBLANG_DEFAULT' ],
'af' : [ 1252, 'LANG_AFRIKAANS', 'SUBLANG_DEFAULT' ],
'am' : [ 1200, 'LANG_AMHARIC', 'SUBLANG_DEFAULT' ],
'ar' : [ 1256, 'LANG_ARABIC', 'SUBLANG_DEFAULT' ],
'bg' : [ 1251, 'LANG_BULGARIAN', 'SUBLANG_DEFAULT' ],
'bn' : [ 1200, 'LANG_BENGALI', 'SUBLANG_DEFAULT' ],
'ca' : [ 1252, 'LANG_CATALAN', 'SUBLANG_DEFAULT' ],
'cs' : [ 1250, 'LANG_CZECH', 'SUBLANG_DEFAULT' ],
'da' : [ 1252, 'LANG_DANISH', 'SUBLANG_DEFAULT' ],
'de' : [ 1252, 'LANG_GERMAN', 'SUBLANG_GERMAN' ],
'el' : [ 1253, 'LANG_GREEK', 'SUBLANG_DEFAULT' ],
'en' : [ 1200, 'LANG_ENGLISH', 'SUBLANG_ENGLISH_US' ],
'en-GB' : [ 1038, 'LANG_ENGLISH', 'SUBLANG_ENGLISH_UK' ],
'es' : [ 1252, 'LANG_SPANISH', 'SUBLANG_SPANISH_MODERN' ],
# LCID for Mexico; Windows does not support L.A. LCID.
'es-419' : [ 1252, 'LANG_SPANISH', 'SUBLANG_SPANISH_MEXICAN' ],
'et' : [ 1257, 'LANG_ESTONIAN', 'SUBLANG_DEFAULT' ],
'eu' : [ 1252, 'LANG_BASQUE', 'SUBLANG_DEFAULT' ],
'fa' : [ 1256, 'LANG_PERSIAN', 'SUBLANG_DEFAULT' ],
'fi' : [ 1252, 'LANG_FINNISH', 'SUBLANG_DEFAULT' ],
'fil' : [ 1252, 'LANG_FILIPINO', 'SUBLANG_DEFAULT' ],
'fr' : [ 1252, 'LANG_FRENCH', 'SUBLANG_FRENCH' ],
'fr-CA' : [ 1252, 'LANG_FRENCH', 'SUBLANG_FRENCH_CANADIAN' ],
'gl' : [ 1252, 'LANG_GALICIAN', 'SUBLANG_DEFAULT' ],
'gu' : [ 1200, 'LANG_GUJARATI', 'SUBLANG_DEFAULT' ],
'he' : [ 1255, 'LANG_HEBREW', 'SUBLANG_DEFAULT' ],
'hi' : [ 1200, 'LANG_HINDI', 'SUBLANG_DEFAULT' ],
'hr' : [ 1252, 'LANG_CROATIAN', 'SUBLANG_DEFAULT' ],
'hu' : [ 1250, 'LANG_HUNGARIAN', 'SUBLANG_DEFAULT' ],
'id' : [ 1252, 'LANG_INDONESIAN', 'SUBLANG_DEFAULT' ],
'is' : [ 1252, 'LANG_ICELANDIC', 'SUBLANG_DEFAULT' ],
'it' : [ 1252, 'LANG_ITALIAN', 'SUBLANG_DEFAULT' ],
'iw' : [ 1255, 'LANG_HEBREW', 'SUBLANG_DEFAULT' ],
'ja' : [ 932, 'LANG_JAPANESE', 'SUBLANG_DEFAULT' ],
'kn' : [ 1200, 'LANG_KANNADA', 'SUBLANG_DEFAULT' ],
'ko' : [ 949, 'LANG_KOREAN', 'SUBLANG_KOREAN' ],
'lt' : [ 1257, 'LANG_LITHUANIAN', 'SUBLANG_LITHUANIAN' ],
'lv' : [ 1257, 'LANG_LATVIAN', 'SUBLANG_DEFAULT' ],
'ml' : [ 1200, 'LANG_MALAYALAM', 'SUBLANG_DEFAULT' ],
'mr' : [ 1200, 'LANG_MARATHI', 'SUBLANG_DEFAULT' ],
# Malay (Malaysia) [ms-MY]
'ms' : [ 1252, 'LANG_MALAY', 'SUBLANG_DEFAULT' ],
'nb' : [ 1252, 'LANG_NORWEGIAN', 'SUBLANG_NORWEGIAN_BOKMAL' ],
'ne' : [ 1200, 'LANG_NEPALI', 'SUBLANG_NEPALI_NEPAL' ],
'nl' : [ 1252, 'LANG_DUTCH', 'SUBLANG_DEFAULT' ],
'nn' : [ 1252, 'LANG_NORWEGIAN', 'SUBLANG_NORWEGIAN_NYNORSK' ],
'no' : [ 1252, 'LANG_NORWEGIAN', 'SUBLANG_DEFAULT' ],
'or' : [ 1200, 'LANG_ORIYA', 'SUBLANG_DEFAULT' ],
'pa' : [ 1200, 'LANG_PUNJABI', 'SUBLANG_PUNJABI_INDIA' ],
'pl' : [ 1250, 'LANG_POLISH', 'SUBLANG_DEFAULT' ],
'pt-BR' : [ 1252, 'LANG_PORTUGUESE', 'SUBLANG_DEFAULT' ],
'pt-PT' : [ 1252, 'LANG_PORTUGUESE', 'SUBLANG_PORTUGUESE' ],
'ro' : [ 1250, 'LANG_ROMANIAN', 'SUBLANG_DEFAULT' ],
'ru' : [ 1251, 'LANG_RUSSIAN', 'SUBLANG_DEFAULT' ],
'sa' : [ 1200, 'LANG_SANSKRIT', 'SUBLANG_SANSKRIT_INDIA' ],
'si' : [ 1200, 'LANG_SINHALESE', 'SUBLANG_SINHALESE_SRI_LANKA' ],
'sk' : [ 1250, 'LANG_SLOVAK', 'SUBLANG_DEFAULT' ],
'sl' : [ 1250, 'LANG_SLOVENIAN', 'SUBLANG_DEFAULT' ],
'sr' : [ 1250, 'LANG_SERBIAN', 'SUBLANG_SERBIAN_LATIN' ],
'sv' : [ 1252, 'LANG_SWEDISH', 'SUBLANG_SWEDISH' ],
'sw' : [ 1252, 'LANG_SWAHILI', 'SUBLANG_DEFAULT' ],
'ta' : [ 1200, 'LANG_TAMIL', 'SUBLANG_DEFAULT' ],
'te' : [ 1200, 'LANG_TELUGU', 'SUBLANG_DEFAULT' ],
'th' : [ 874, 'LANG_THAI', 'SUBLANG_DEFAULT' ],
'ti' : [ 1200, 'LANG_TIGRIGNA', 'SUBLANG_TIGRIGNA_ERITREA' ],
'tr' : [ 1254, 'LANG_TURKISH', 'SUBLANG_DEFAULT' ],
'uk' : [ 1251, 'LANG_UKRAINIAN', 'SUBLANG_DEFAULT' ],
'ur' : [ 1200, 'LANG_URDU', 'SUBLANG_DEFAULT' ],
'vi' : [ 1258, 'LANG_VIETNAMESE', 'SUBLANG_DEFAULT' ],
'zh-CN' : [ 936, 'LANG_CHINESE', 'SUBLANG_CHINESE_SIMPLIFIED' ],
'zh-HK' : [ 950, 'LANG_CHINESE', 'SUBLANG_CHINESE_HONGKONG' ],
'zh-TW' : [ 950, 'LANG_CHINESE', 'SUBLANG_CHINESE_TRADITIONAL' ],
'zu' : [ 1200, 'LANG_ZULU', 'SUBLANG_DEFAULT' ],
}
# Right-To-Left languages
_RTL_LANGUAGES = (
'ar', # Arabic
'fa', # Farsi
'iw', # Hebrew
'ks', # Kashmiri
'ku', # Kurdish
'ps', # Pashto
'ur', # Urdu
'yi', # Yiddish
)
def GetCodepage(language):
""" Returns the codepage for the given |language|. """
lang = _LANGUAGE_MAP[language]
return "%04x" % lang[0]
def GetCodepageDecimal(language):
""" Returns the codepage for the given |language| as a decimal value. """
lang = _LANGUAGE_MAP[language]
return "%d" % lang[0]
def GetLangId(language):
""" Returns the language id for the given |language|. """
lang = _LANGUAGE_MAP[language]
return "%04x" % (_LANGUAGE_PRIMARY[lang[1]] | (_LANGUAGE_SUB[lang[2]] << 10))
def GetPrimaryLanguage(language):
""" Returns the primary language ID for the given |language|. """
lang = _LANGUAGE_MAP[language]
return _LANGUAGE_PRIMARY[lang[1]]
def GetSublanguage(language):
""" Returns the sublanguage ID for the given |language|. """
lang = _LANGUAGE_MAP[language]
return _LANGUAGE_SUB[lang[2]]
def IsRtlLanguage(language):
return language in _RTL_LANGUAGES;
def NormalizeLanguageCode(language):
lang = language.replace('_', '-', 1)
if lang == 'en-US':
lang = 'en'
return lang
def GetDataPackageSuffix(language):
lang = NormalizeLanguageCode(language)
if lang == 'en':
lang = 'en-US'
return lang
def GetJsonSuffix(language):
return language.replace('-', '_', 1)
def ReadValuesFromFile(values_dict, file_name):
"""
Reads NAME=VALUE settings from the specified file.
Everything to the left of the first '=' is the keyword,
everything to the right is the value. No stripping of
white space, so beware.
The file must exist, otherwise you get the Python exception from open().
"""
for line in open(file_name, 'r').readlines():
key, val = line.rstrip('\r\n').split('=', 1)
values_dict[key] = val
def ReadMessagesFromFile(file_name):
"""
Reads messages from a 'chrome_messages_json' file.
The file must exist, otherwise you get the Python exception from open().
"""
messages_file = io.open(file_name, encoding='utf-8-sig')
messages = json.load(messages_file)
messages_file.close()
values = {}
for key in messages.keys():
values[key] = unicode(messages[key]['message']);
return values
def WriteIfChanged(file_name, contents, encoding='utf-16'):
"""
Writes the specified contents to the specified file_name
iff the contents are different than the current contents.
"""
try:
target = io.open(file_name, 'r')
old_contents = target.read()
except EnvironmentError:
pass
except UnicodeDecodeError:
target.close()
os.unlink(file_name)
else:
if contents == old_contents:
return
target.close()
os.unlink(file_name)
io.open(file_name, 'w', encoding=encoding).write(contents)
class MessageMap:
""" Provides a dictionary of localized messages for each language."""
def __init__(self, languages, locale_dir):
self.language = None
self.message_map = {}
# Populate the message map
if locale_dir:
for language in languages:
file_name = os.path.join(locale_dir,
GetJsonSuffix(language),
'messages.json')
self.message_map[language] = ReadMessagesFromFile(file_name)
def GetText(self, message):
""" Returns a localized message for the current language. """
return self.message_map[self.language][message]
def SelectLanguage(self, language):
""" Selects the language to be used when retrieving localized messages. """
self.language = language
def MakeSelectLanguage(self):
""" Returns a function that can be used to select the current language. """
return lambda language: self.SelectLanguage(language)
def MakeGetText(self):
""" Returns a function that can be used to retrieve a localized message. """
return lambda message: self.GetText(message)
# Use '@' as a delimiter for string templates instead of '$' to avoid unintended
# expansion when passing the string from GYP.
class GypTemplate(Template):
delimiter = '@'
def Localize(source, locales, options):
# Set the list of languages to use.
languages = map(NormalizeLanguageCode, locales)
context = { 'languages' : languages }
# Load the localized messages.
message_map = MessageMap(languages, options.locale_dir)
# Add OFFICIAL_BUILD variable the same way chrome/tools/build/version.py
# does.
if os.environ.get('CHROME_BUILD_TYPE') == '_official':
context['official_build'] = '1'
else:
context['official_build'] = '0'
# Add all variables defined in the command line.
if options.define:
for define in options.define:
context.update(dict([define.split('=', 1)]));
# Read NAME=VALUE variables from file.
if options.variables:
for file_name in options.variables:
ReadValuesFromFile(context, file_name)
env = None
template = None
if source:
# Load jinja2 library.
if options.jinja2:
jinja2_path = os.path.normpath(options.jinja2)
else:
jinja2_path = os.path.normpath(
os.path.join(os.path.abspath(__file__),
'../../../../third_party/jinja2'))
sys.path.append(os.path.split(jinja2_path)[0])
from jinja2 import Environment, FileSystemLoader
# Create jinja2 environment.
(template_path, template_name) = os.path.split(source)
env = Environment(loader=FileSystemLoader(template_path),
extensions=['jinja2.ext.do', 'jinja2.ext.i18n'])
# Register custom filters.
env.filters['GetCodepage'] = GetCodepage
env.filters['GetCodepageDecimal'] = GetCodepageDecimal
env.filters['GetLangId'] = GetLangId
env.filters['GetPrimaryLanguage'] = GetPrimaryLanguage
env.filters['GetSublanguage'] = GetSublanguage
# Register the message map with jinja2.i18n extension.
env.globals['IsRtlLanguage'] = IsRtlLanguage
env.globals['SelectLanguage'] = message_map.MakeSelectLanguage()
env.install_gettext_callables(message_map.MakeGetText(),
message_map.MakeGetText());
template = env.get_template(template_name)
# Generate a separate file per each locale if requested.
outputs = []
if options.locale_output:
target = GypTemplate(options.locale_output)
for lang in languages:
context['languages'] = [ lang ]
context['language'] = lang
context['pak_suffix'] = GetDataPackageSuffix(lang)
context['json_suffix'] = GetJsonSuffix(lang)
message_map.SelectLanguage(lang)
template_file_name = target.safe_substitute(context)
outputs.append(template_file_name)
if not options.print_only:
WriteIfChanged(template_file_name, template.render(context),
options.encoding)
else:
outputs.append(options.output)
if not options.print_only:
WriteIfChanged(options.output, template.render(context), options.encoding)
if options.print_only:
# Quote each element so filename spaces don't mess up gyp's attempt to parse
# it into a list.
return " ".join(['"%s"' % x for x in outputs])
return
def DoMain(argv):
usage = "Usage: localize [options] locales"
parser = OptionParser(usage=usage)
parser.add_option(
'-d', '--define', dest='define', action='append', type='string',
help='define a variable (NAME=VALUE).')
parser.add_option(
'--encoding', dest='encoding', type='string', default='utf-16',
help="set the encoding of <output>. 'utf-16' is the default.")
parser.add_option(
'--jinja2', dest='jinja2', type='string',
help="specifies path to the jinja2 library.")
parser.add_option(
'--locale_dir', dest='locale_dir', type='string',
help="set path to localized message files.")
parser.add_option(
'--locale_output', dest='locale_output', type='string',
help='specify the per-locale output file name.')
parser.add_option(
'-o', '--output', dest='output', type='string',
help="specify the output file name.")
parser.add_option(
'--print_only', dest='print_only', action='store_true',
default=False, help='print the output file names only.')
parser.add_option(
'-t', '--template', dest='template', type='string',
help="specify the template file name.")
parser.add_option(
'--variables', dest='variables', action='append', type='string',
help='read variables (NAME=VALUE) from file.')
options, locales = parser.parse_args(argv)
if not locales:
parser.error('At least one locale must be specified')
if bool(options.output) == bool(options.locale_output):
parser.error(
'Either --output or --locale_output must be specified but not both')
if not options.template and not options.print_only:
parser.error('The template name is required unless --print_only is used')
return Localize(options.template, locales, options)
if __name__ == '__main__':
sys.exit(DoMain(sys.argv[1:]))
| bsd-3-clause |
mrquim/mrquimrepo | plugin.video.salts/scrapers/streamdor_scraper.py | 5 | 3428 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib
import scraper
import kodi
import log_utils # @UnusedImport
from salts_lib import scraper_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import QUALITIES
from salts_lib.constants import XHR
BASE_URL = 'https://www.streamdor.com'
Q_MAP = {'hd': QUALITIES.HD720, 'sd': QUALITIES.HIGH}
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'StreamDor'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if not source_url or source_url == FORCE_NO_MATCH: return hosters
url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(url, headers=XHR, cache_limit=8)
js_data = scraper_utils.parse_json(html, url)
quality = Q_MAP.get(js_data.get('Key', {}).get('MovieDefinition'), QUALITIES.HIGH)
value = js_data.get('Value', {})
stream_url = value.get('VideoLink')
if stream_url and value.get('ProviderSource', '').lower() == 'youtube':
host = 'youtube.com'
source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': False}
hosters.append(source)
return hosters
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = scraper_utils.urljoin(self.base_url, '/search/searchBoxSuggestion')
html = self._http_get(search_url, params={'top': 8, 'query': title}, cache_limit=8)
js_data = scraper_utils.parse_json(html, search_url)
for item in js_data:
entityName = match_title_year = item.get('Value', '')
if entityName:
match_title, match_year2 = scraper_utils.extra_year(match_title_year)
match_year = str(item.get('ReleaseYear', ''))
if not match_year: match_year = match_year2
match_url = '/ontology/EntityDetails?' + urllib.urlencode({'entityName': entityName, 'ignoreMediaLinkError': 'false'})
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}
results.append(result)
return results
| gpl-2.0 |
LPM-HMS/COSMOS-2.0 | cosmos/models/Workflow.py | 2 | 33046 | """
Tools for defining, running and terminating Cosmos workflows.
"""
import atexit
import datetime
import getpass
import os
import re
import signal
import sys
import time
import warnings
import funcsigs
import networkx as nx
from flask import url_for
from networkx.algorithms.dag import descendants, topological_sort
from sqlalchemy import orm
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import validates, synonym, relationship
from sqlalchemy.schema import Column
from sqlalchemy.types import Boolean, Integer, String, DateTime, VARCHAR
from cosmos import (
TaskStatus,
StageStatus,
WorkflowStatus,
signal_workflow_status_change,
)
from cosmos.core.cmd_fxn import signature
from cosmos.db import Base
from cosmos.models.Task import Task
from cosmos.util.helpers import duplicates, get_logger, mkdir
from cosmos.util.iterstuff import only_one
from cosmos.util.sqla import Enum_ColumnType, MutableDict, JSONEncodedDict
from cosmos.constants import TERMINATION_SIGNALS
opj = os.path.join
WORKFLOW_LOG_AWKWARD_SILENCE_INTERVAL = 300
class DuplicateUid(Exception):
pass
class InvalidParams(Exception):
pass
def default_task_log_output_dir(task, subdir="", prefix=""):
"""The default function for computing Task.log_output_dir"""
return os.path.abspath(opj(prefix, "log", subdir, task.stage.name, str(task.uid)))
@signal_workflow_status_change.connect
def _workflow_status_changed(workflow):
if workflow.status in [
WorkflowStatus.successful,
WorkflowStatus.failed,
WorkflowStatus.killed,
]:
logfunc = (
workflow.log.warning
if workflow.status in [WorkflowStatus.failed, WorkflowStatus.killed]
else workflow.log.info
)
workflow.finished_on = datetime.datetime.now()
logfunc(
"%s %s (%s/%s Tasks completed) in %s"
% (
workflow,
workflow.status,
sum(t.successful for t in workflow.tasks),
len(workflow.tasks),
workflow.wall_time,
)
)
if workflow.status == WorkflowStatus.successful:
workflow.successful = True
workflow.finished_on = datetime.datetime.now()
class Workflow(Base):
"""
An collection Stages and Tasks encoded as a DAG
"""
__tablename__ = "workflow"
id = Column(Integer, primary_key=True)
name = Column(VARCHAR(200), unique=True, nullable=False)
successful = Column(Boolean, nullable=False)
created_on = Column(DateTime)
started_on = Column(DateTime)
finished_on = Column(DateTime)
max_cores = Column(Integer)
max_gpus = Column(Integer)
primary_log_path = Column(String(255))
_log = None
info = Column(MutableDict.as_mutable(JSONEncodedDict))
_status = Column(Enum_ColumnType(WorkflowStatus, length=255), default=WorkflowStatus.no_attempt)
stages = relationship(
"Stage",
cascade="all, merge, delete-orphan",
order_by="Stage.number",
passive_deletes=True,
backref="workflow",
)
exclude_from_dict = ["info"]
_dont_garbage_collect = None
termination_signal = None
@property
def wall_time(self):
if self.started_on is None or self.finished_on is None:
return None
else:
return self.finished_on - self.started_on
@declared_attr
def status(cls):
def get_status(self):
return self._status
def set_status(self, value):
if self._status != value:
self._status = value
signal_workflow_status_change.send(self)
return synonym("_status", descriptor=property(get_status, set_status))
@validates("name")
def validate_name(self, key, name):
assert re.match(r"^[\w-]+$", name), (
"Invalid workflow name, characters are limited to letters, numbers, " "hyphens and underscores"
)
return name
@orm.reconstructor
def constructor(self):
self.__init__(manual_instantiation=False)
def __init__(self, manual_instantiation=True, *args, **kwargs):
# FIXME provide the cosmos_app instance?
if manual_instantiation:
raise TypeError("Do not instantiate an Workflow manually. Use the Cosmos.start method.")
super(Workflow, self).__init__(*args, **kwargs)
# assert self.output_dir is not None, 'output_dir cannot be None'
if self.info is None:
# mutable dict column defaults to None
self.info = dict()
self.jobmanager = None
if not self.created_on:
self.created_on = datetime.datetime.now()
self._dont_garbage_collect = []
@property
def log(self):
if self._log is None:
self._log = get_logger("%s" % self, self.primary_log_path)
return self._log
def make_output_dirs(self):
"""
Create directory paths of all output files
"""
dirs = set()
for task in self.tasks:
for out_name, v in list(task.output_map.items()):
dirname = lambda p: p if out_name.endswith("dir") or p is None else os.path.dirname(p)
if isinstance(v, (tuple, list)):
dirs.update(list(map(dirname, v)))
elif isinstance(v, dict):
raise NotImplemented()
else:
dirs.add(dirname(v))
for d in dirs:
# don't add urls
if d is not None and "://" not in d:
mkdir(d)
def add_task(
self,
func,
params=None,
parents=None,
stage_name=None,
uid=None,
drm=None,
queue=None,
must_succeed=True,
time_req=None,
core_req=None,
mem_req=None,
gpu_req=None,
max_attempts=None,
noop=False,
job_class=None,
drm_options=None,
environment_variables=None,
if_duplicate="raise",
):
"""
Adds a new Task to the Workflow. If the Task already exists (and was successful), return the successful Task stored in the database
:param callable func: A function which returns a string which will get converted to a shell script to be executed. `func` will not get called until
all of its dependencies have completed.
:param dict params: Parameters to `func`. Must be jsonable so that it can be stored in the database. Any Dependency objects will get resolved into
a string, and the Dependency.task will be added to this Task's parents.
:param list[Tasks] parents: A list of dependent Tasks.
:param str uid: A unique identifier for this Task, primarily used for skipping previously successful Tasks.
If a Task with this stage_name and uid already exists in the database (and was successful), the
database version will be returned and a new one will not be created.
:param str stage_name: The name of the Stage to add this Task to. Defaults to `func.__name__`.
:param str drm: The drm to use for this Task (example 'local', 'ge' or 'drmaa:lsf'). Defaults to the `default_drm` parameter of :meth:`Cosmos.start`
:param job_class: The name of a job_class to submit to; defaults to the `default_job_class` parameter of :meth:`Cosmos.start`
:param queue: The name of a queue to submit to; defaults to the `default_queue` parameter of :meth:`Cosmos.start`
:param bool must_succeed: Default True. If False, the Workflow will not fail if this Task does not succeed. Dependent Jobs will not be executed.
:param bool time_req: The time requirement; will set the Task.time_req attribute which is intended to be used by :func:`get_submit_args` to request resources.
:param int core_req: Number of cpus required for this Task. Can also be set in the `params` dict or the default value of the Task function signature, but this value takes precedence.
Warning! In future versions, this will be the only way to set it.
:param int mem_req: Number of MB of RAM required for this Task. Can also be set in the `params` dict or the default value of the Task function signature, but this value takes predence.
Warning! In future versions, this will be the only way to set it.
:param int gpu_req: Number of gpus required for this Task.
:param int max_attempts: The maximum number of times to retry a failed job. Defaults to the `default_max_attempts` parameter of :meth:`Cosmos.start`
:param bool noop: Task is a No-op and will always be marked as successful.
:param dict drm_options: Options for Distributed Resource Management (cluster).
:param dict environment_variables: Environment variables to pass to the DRM (if supported).
:param str if_duplicate: If "raise", raises an error if a Task with the same UID has already been added to this
Workflow. If "return", return that Task, allowing for an easy way to avoid duplicate work.
:rtype: cosmos.api.Task
"""
# Avoid cyclical import dependencies
from cosmos.job.drm.DRM_Base import DRM
from cosmos.models.Stage import Stage
from cosmos import recursive_resolve_dependency
# parents
if parents is None:
parents = []
elif isinstance(parents, Task):
parents = [parents]
else:
parents = list(parents)
# params
if params is None:
params = dict()
for k, v in list(params.items()):
# decompose `Dependency` objects to values and parents
new_val, parent_tasks = recursive_resolve_dependency(v)
params[k] = new_val
parents.extend(parent_tasks - set(parents))
# uid
if uid is None:
raise AssertionError("uid parameter must be specified")
# Fix me assert params are all JSONable
# uid = str(params)
else:
assert isinstance(uid, str), "uid must be a string"
if stage_name is None:
stage_name = str(func.__name__)
# Get the right Stage
stage = only_one((s for s in self.stages if s.name == stage_name), None)
if stage is None:
stage = Stage(workflow=self, name=stage_name, status=StageStatus.no_attempt)
self.session.add(stage)
# Check if task is already in stage
task = stage.get_task(uid, None)
if task is not None:
# if task is already in stage, but unsuccessful, raise an error (duplicate params) since unsuccessful tasks
# were already removed on workflow load
if task.successful:
# If the user manually edited the dag and this a resume, parents might need to be-readded
task.parents.extend(set(parents).difference(set(task.parents)))
for p in parents:
if p.stage not in stage.parents:
stage.parents.append(p.stage)
return task
else:
if if_duplicate == "raise":
raise DuplicateUid(
"Duplicate uid, you have added a Task to Stage %s with the uid (unique identifier) `%s` twice. "
"Task uids must be unique within the same Stage." % (stage_name, uid)
)
elif if_duplicate == "return":
if task.params != params:
raise InvalidParams(
f"Tried to add a task with the same uid, but different parameters."
)
return task
else:
raise ValueError(f"{if_duplicate} is not valid")
else:
# Create Task
sig = funcsigs.signature(func)
def params_or_signature_default_or(name, default):
if name in params:
return params[name]
if name in sig.parameters:
param_default = sig.parameters[name].default
if param_default is funcsigs._empty:
return default
else:
return param_default
return default
task = Task(
stage=stage,
params=params,
parents=parents,
uid=uid,
drm=drm if drm is not None else self.cosmos_app.default_drm,
job_class=job_class if job_class is not None else self.cosmos_app.default_job_class,
queue=queue if queue is not None else self.cosmos_app.default_queue,
must_succeed=must_succeed,
core_req=core_req if core_req is not None else params_or_signature_default_or("core_req", 1),
mem_req=mem_req if mem_req is not None else params_or_signature_default_or("mem_req", None),
time_req=time_req if time_req is not None else self.cosmos_app.default_time_req,
successful=False,
max_attempts=max_attempts
if max_attempts is not None
else self.cosmos_app.default_max_attempts,
attempt=1,
NOOP=noop,
gpu_req=gpu_req if gpu_req is not None else params_or_signature_default_or("gpu_req", 0),
environment_variables=environment_variables
if environment_variables is not None
else self.cosmos_app.default_environment_variables,
)
task.cmd_fxn = func
if drm_options is None:
task.drm_options = {}
else:
task.drm_options = drm_options
# use default for any keys not set
if self.cosmos_app.default_drm_options is not None:
for key, val in list(self.cosmos_app.default_drm_options.items()):
if key not in task.drm_options:
task.drm_options[key] = val
DRM.validate_drm_options(task.drm, task.drm_options)
# Add Stage Dependencies
for p in parents:
if p.stage not in stage.parents:
stage.parents.append(p.stage)
self._dont_garbage_collect.append(task)
return task
def run(
self,
max_cores=None,
dry=False,
set_successful=True,
cmd_wrapper=signature.default_cmd_fxn_wrapper,
log_out_dir_func=default_task_log_output_dir,
max_gpus=None,
do_cleanup_atexit=True,
lethal_signals=TERMINATION_SIGNALS,
):
"""
Runs this Workflow's DAG
:param int max_cores: The maximum number of cores to use at once. A value of None indicates no maximum.
:param int max_attempts: The maximum number of times to retry a failed job.
Can be overridden with on a per-Task basis with Workflow.add_task(..., max_attempts=N, ...)
:param callable log_out_dir_func: A function that returns a Task's logging directory (must be unique).
It receives one parameter: the Task instance.
By default a Task's log output is stored in log/stage_name/task_id.
See _default_task_log_output_dir for more info.
:param callable cmd_wrapper: A decorator which will be applied to every Task's cmd_fxn.
:param bool dry: If True, do not actually run any jobs.
:param bool set_successful: Sets this workflow as successful if all tasks finish without a failure.
You might set this to False if you intend to add and
run more tasks in this workflow later.
:param do_cleanup_atexit: if False, do not attempt to cleanup unhandled exits.
:param lethal_signals: signals to catch and shutdown
Returns True if all tasks in the workflow ran successfully, False otherwise.
If dry is specified, returns None.
"""
if cmd_wrapper == signature.default_cmd_fxn_wrapper:
warnings.warn(
f"Having functions return bash strings as the default behavior is deprecated. While "
f"this behavior will be supported, it is recommended that you set cmd_wrapper to "
f"cosmos.api.py_call which will be the new default."
f"See examples/ex3.py. "
)
try:
try:
assert os.path.exists(os.getcwd()), "current working dir does not exist! %s" % os.getcwd()
assert hasattr(
self, "cosmos_app"
), "Workflow was not initialized using the Workflow.start method"
assert hasattr(log_out_dir_func, "__call__"), "log_out_dir_func must be a function"
assert self.session, "Workflow must be part of a sqlalchemy session"
session = self.session
self.log.info(
"Preparing to run %s using DRM `%s`, cwd is `%s`",
self,
self.cosmos_app.default_drm,
os.getcwd(),
)
try:
user = getpass.getuser()
except:
# fallback to uid if we can't respove a user name
user = os.getuid()
self.log.info("Running as %s@%s, pid %s", user, os.uname()[1], os.getpid())
self.max_cores = max_cores
self.max_gpus = max_gpus
#
# Run some validation checks
#
# check GPU env variables are set correctly
if self.max_gpus is not None and self.cosmos_app.default_drm == "local":
if "COSMOS_LOCAL_GPU_DEVICES" not in os.environ:
raise EnvironmentError(
"COSMOS_LOCAL_GPU_DEVICES environment variable must be set to a "
"comma delimited list of gpu devices if using a local DRM to manage "
"GPUs"
)
# check for duplicate output files
output_fnames_to_task_and_key = dict()
for task in self.tasks:
for key, fname in list(task.output_map.items()):
current_value = output_fnames_to_task_and_key.setdefault(fname, (task, key))
if current_value != (task, key):
task2, key2 = current_value
raise ValueError(
"Duplicate output files detected!: "
'{task}.params["{key}"] == {task2}.params["{key2}"] == {fname}'.format(
**locals()
)
)
output_fnames_to_task_and_key[fname] = (task, key)
from ..job.JobManager import JobManager
if self.jobmanager is None:
self.jobmanager = JobManager(
get_submit_args=self.cosmos_app.get_submit_args,
cmd_wrapper=cmd_wrapper,
log_out_dir_func=log_out_dir_func,
logger=self.log,
session=self.session,
workflow=self,
)
self.status = WorkflowStatus.running
self.successful = False
if self.started_on is None:
self.started_on = datetime.datetime.now()
task_graph = self.task_graph()
stage_graph = self.stage_graph()
assert len(set(self.stages)) == len(self.stages), "duplicate stage name detected: %s" % (
next(duplicates(self.stages))
)
# renumber stages
stage_graph_no_cycles = nx.DiGraph()
stage_graph_no_cycles.add_nodes_from(stage_graph.nodes())
stage_graph_no_cycles.add_edges_from(stage_graph.edges())
for cycle in nx.simple_cycles(stage_graph):
stage_graph_no_cycles.remove_edge(cycle[-1], cycle[0])
for i, s in enumerate(topological_sort(stage_graph_no_cycles)):
s.number = i + 1
if s.status != StageStatus.successful:
s.status = StageStatus.no_attempt
# Make sure everything is in the sqlalchemy session
session.add(self)
successful = list([t for t in task_graph.nodes() if t.successful])
# print stages
for s in sorted(self.stages, key=lambda s: s.number):
self.log.info("%s %s" % (s, s.status))
# Create Task Queue
task_queue = _copy_graph(task_graph)
self.log.info("Skipping %s successful tasks..." % len(successful))
task_queue.remove_nodes_from(successful)
if do_cleanup_atexit:
handle_exits(self)
if self.max_cores is not None:
self.log.info("Ensuring there are enough cores...")
# make sure we've got enough cores
for t in task_queue:
assert int(t.core_req) <= self.max_cores, (
"%s requires more cpus (%s) than `max_cores` (%s)"
% (t, t.core_req, self.max_cores,)
)
# Run this thing!
self.log.info("Committing to SQL db...")
session.commit()
except KeyboardInterrupt:
# haven't started submitting yet, just raise the exception
self.log.fatal("ctrl+c caught")
self.terminate(due_to_failure=False)
raise
if not dry:
_run(self, session, task_queue, lethal_signals=lethal_signals)
# set status
if self.status == WorkflowStatus.failed_but_running:
self.status = WorkflowStatus.failed
# set stage status to failed
for s in self.stages:
if s.status == StageStatus.running_but_failed:
s.status = StageStatus.failed
session.commit()
return False
elif self.status == WorkflowStatus.running:
if set_successful:
self.status = WorkflowStatus.successful
session.commit()
return True
else:
self.log.warning('%s exited with status "%s"', self, self.status)
session.commit()
return False
else:
self.log.info("Workflow dry run is complete")
return None
except Exception as ex:
self.log.fatal("Exception was raised")
self.log.fatal(ex, exc_info=True)
self.terminate(due_to_failure=False)
raise
def terminate(self, due_to_failure=True):
self.log.info("Terminating %s, due_to_failure=%s" % (self, due_to_failure))
if self.jobmanager:
self.log.info(
"Processing finished tasks and terminating {num_running_tasks} running tasks".format(
num_running_tasks=len(self.jobmanager.running_tasks),
)
)
_process_finished_tasks(self.jobmanager)
self.jobmanager.terminate()
if due_to_failure:
self.status = WorkflowStatus.failed
else:
self.status = WorkflowStatus.killed
self.session.commit()
@property
def tasks(self):
return [t for s in self.stages for t in s.tasks]
# return session.query(Task).join(Stage).filter(Stage.workflow == ex).all()
def stage_graph(self):
"""
:return: (networkx.DiGraph) a DAG of the stages
"""
g = nx.DiGraph()
g.add_nodes_from(self.stages)
g.add_edges_from((s, c) for s in self.stages for c in s.children if c)
return g
def task_graph(self):
"""
:return: (networkx.DiGraph) a DAG of the tasks
"""
g = nx.DiGraph()
g.add_nodes_from(self.tasks)
g.add_edges_from([(t, c) for t in self.tasks for c in t.children])
return g
def get_stage(self, name_or_id):
if isinstance(name_or_id, int):
f = lambda s: s.id == name_or_id
else:
f = lambda s: s.name == name_or_id
for stage in self.stages:
if f(stage):
return stage
raise ValueError("Stage with name %s does not exist" % name_or_id)
@property
def url(self):
return url_for("cosmos.workflow", name=self.name)
def __repr__(self):
return "<Workflow[%s] %s>" % (self.id or "", self.name)
def __unicode__(self):
return self.__repr__()
def delete(self, delete_files=False):
"""
:param delete_files: (bool) If True, delete :attr:`output_dir` directory and all contents on the filesystem
"""
if hasattr(self, "log"):
self.log.info("Deleting %s, delete_files=%s" % (self, delete_files))
for h in self.log.handlers:
h.flush()
h.close()
self.log.removeHandler(h)
if delete_files:
raise NotImplementedError("This should delete all Task.output_files")
print("%s Deleting from SQL..." % self, file=sys.stderr)
self.session.delete(self)
self.session.commit()
print("%s Deleted" % self, file=sys.stderr)
def get_first_failed_task(self, key=lambda t: t.finished_on):
"""
Return the first failed Task (chronologically).
If no Task failed, return None.
"""
for t in sorted([t for t in self.tasks if key(t) is not None], key=key):
if t.exit_status:
return t
return None
def _run(workflow, session, task_queue, lethal_signals):
"""
Do the workflow!
"""
def signal_handler(signum, frame):
workflow.log.critical(f"caught signal: {signum}, shutdown procedure will initiate shortly")
workflow.termination_signal = signum
for sig in lethal_signals:
# catch lethal signals (like a ctrl+c)
signal.signal(sig, signal_handler)
workflow.log.info("Executing TaskGraph")
available_cores = True
last_log_timestamp = time.time()
while len(task_queue) > 0:
if available_cores:
_run_queued_and_ready_tasks(task_queue, workflow)
available_cores = False
for task in _process_finished_tasks(workflow.jobmanager):
if task.status == TaskStatus.failed and not task.must_succeed:
pass # it's ok if the task failed
elif task.status == TaskStatus.failed and task.must_succeed:
if workflow.info["fail_fast"]:
workflow.log.info(
"%s Exiting run loop at first Task failure, exit_status: %s: %s",
workflow,
task.exit_status,
task,
)
workflow.terminate(due_to_failure=True)
return
# pop all descendents when a task fails; the rest of the graph can still execute
remove_nodes = descendants(task_queue, task).union({task,})
# graph_failed.add_edges(task_queue.subgraph(remove_nodes).edges())
task_queue.remove_nodes_from(remove_nodes)
workflow.status = WorkflowStatus.failed_but_running
workflow.log.info("%s tasks left in the queue" % len(task_queue))
elif task.status == TaskStatus.successful:
# just pop this task
task_queue.remove_node(task)
elif task.status == TaskStatus.no_attempt:
# the task must have failed, and is being reattempted
pass
else:
raise AssertionError("Unexpected finished task status %s for %s" % (task.status, task))
available_cores = True
last_log_timestamp = time.time()
# only commit Task changes after processing a batch of finished ones
session.commit()
if last_log_timestamp + WORKFLOW_LOG_AWKWARD_SILENCE_INTERVAL < time.time():
num_running = len(list(workflow.jobmanager.running_tasks))
workflow.log.info(
"Cosmos is still alive, just waiting on %d running_tasks, task_queue is len %d",
num_running,
len(task_queue),
)
last_log_timestamp = time.time()
# conveniently, this returns early if we catch a signal
time.sleep(workflow.jobmanager.poll_interval)
if workflow.termination_signal:
workflow.log.info(
"%s Early termination requested (%d): stopping workflow",
workflow,
workflow.termination_signal,
)
workflow.terminate(due_to_failure=False)
return
def _get_one_submittable_task_given_resource_constraints(tasks, cores_left, gpus_left):
tasks = sorted(tasks, key=lambda t: (t.gpu_req, t.core_req, t.id))
for task in tasks:
if task.gpu_req <= gpus_left and task.cpu_req <= cores_left:
return task
else:
return None
def _get_all_submittable_tasks_given_resource_constraints(workflow, ready_tasks):
ready_tasks = list(ready_tasks)
# get the list of submittable tasks given resource constraints
cores_used = sum([t.core_req for t in workflow.jobmanager.running_tasks])
gpus_used = sum([t.gpu_req for t in workflow.jobmanager.running_tasks])
if workflow.max_cores is None:
cores_left = float("inf")
else:
cores_left = workflow.max_cores - cores_used
if workflow.max_gpus is None:
gpus_left = float("inf")
else:
gpus_left = workflow.max_gpus - gpus_used
submittable_tasks = []
while len(ready_tasks) > 0:
task = _get_one_submittable_task_given_resource_constraints(ready_tasks, cores_left, gpus_left)
if task is None:
break
else:
ready_tasks.remove(task)
cores_left -= task.core_req
gpus_left -= task.gpu_req
submittable_tasks.append(task)
return submittable_tasks
def _run_queued_and_ready_tasks(task_queue, workflow):
ready_tasks = [
task
for task, degree in list(task_queue.in_degree())
if degree == 0 and task.status == TaskStatus.no_attempt
]
if workflow.max_cores is None and workflow.max_gpus is None:
submittable_tasks = sorted(ready_tasks, key=lambda t: t.id)
else:
submittable_tasks = _get_all_submittable_tasks_given_resource_constraints(workflow, ready_tasks)
# submit in a batch for speed
workflow.jobmanager.run_tasks(submittable_tasks)
if len(submittable_tasks) < len(ready_tasks):
workflow.log.info(
"Reached resource limits of max_cores of {workflow.max_cores}, "
"or max_gpu of {workflow.max_gpus}, "
"waiting for a task to finish...".format(**locals())
)
# only commit submitted Tasks after submitting a batch
workflow.session.commit()
def _process_finished_tasks(jobmanager):
for task in jobmanager.get_finished_tasks():
if task.NOOP or task.exit_status == 0:
task.status = TaskStatus.successful
yield task
else:
task.status = TaskStatus.failed
yield task
def handle_exits(workflow):
@atexit.register
def cleanup_check():
try:
if workflow is not None and workflow.status in {
WorkflowStatus.running,
WorkflowStatus.failed_but_running,
}:
workflow.log.error("%s Still running when atexit() was called, terminating" % workflow)
workflow.terminate(due_to_failure=True)
except SQLAlchemyError:
workflow.log.error(
"%s Unknown status when atexit() was called (SQL error), terminating" % workflow
)
workflow.terminate(due_to_failure=True)
def _copy_graph(graph):
graph2 = nx.DiGraph()
graph2.add_edges_from(graph.edges())
graph2.add_nodes_from(graph.nodes())
return graph2
| gpl-3.0 |
eXistenZNL/SickRage | autoProcessTV/lib/requests/packages/urllib3/packages/six.py | 2375 | 11628 | """Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
| gpl-3.0 |
arguman/arguman.org | web/blog/views.py | 8 | 1267 | from django.conf import settings
from django.contrib.syndication.views import Feed
from django.utils.feedgenerator import Atom1Feed
from django.views.generic import ListView
from django.views.generic.detail import DetailView
from blog.models import Post
class BlogIndexView(ListView):
template_name = "blog/index.html"
queryset = Post.published_objects.all()
context_object_name = "posts"
paginate_by = 30
class BlogDetailView(DetailView):
template_name = "blog/detail.html"
model = Post
context_object_name = "post"
def get_queryset(self):
if self.request.user.is_superuser:
return self.model.objects.all()
return self.model.published_objects.all()
class BlogPostsRssFeed(Feed):
title = settings.BLOG_FEED_TITLE
link = settings.BLOG_URL
description = settings.BLOG_FEED_DESCRIPTION
def items(self):
return Post.objects.all()[:20]
def item_description(self, post):
return post.content
def item_pubdate(self, post):
return post.date_created
def item_categories(self, post):
return [tag.name for tag in post.tags.all()]
class BlogPostsAtomFeed(BlogPostsRssFeed):
feed_type = Atom1Feed
subtitle = settings.BLOG_FEED_DESCRIPTION
| agpl-3.0 |
HLFH/CouchPotatoServer | libs/requests/packages/charade/gb2312prober.py | 2994 | 1681 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import GB2312DistributionAnalysis
from .mbcssm import GB2312SMModel
class GB2312Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(GB2312SMModel)
self._mDistributionAnalyzer = GB2312DistributionAnalysis()
self.reset()
def get_charset_name(self):
return "GB2312"
| gpl-3.0 |
f0rki/cb-multios | original-challenges/CGC_Video_Format_Parser_and_Viewer/support/pollerGen.py | 1 | 14513 | '''
Copyright (c) 2014 Cromulence LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
import struct
import sys
import random
import string
import time
''' This python script generates 1000 polls at a time. The first argument may be
a seed but if one is not provided then it generates one from time(). All polls
are written to poller/for-testing and from there can be partitioned out.
'''
def bitsNeeded ( cnt ):
if cnt == 0:
return 1
z = 0
while ( cnt ):
z += 1
cnt >>= 1
return z
class bitstream:
def __init__( self ):
### pdata is the final output stream
### It is written to when the bits reach 8
self.pdata = ''
### Current number of bits in the queue
self.cbit = 0
### Current value being appended to
self.tvalue = 0
return
def appendBits( self, value, bitCount ):
for bc in range(bitCount):
nextBit = (value >> ( bitCount - (bc+1) ) ) & 1
self.tvalue |= (nextBit << (7-self.cbit))
self.cbit += 1
### If we have filled up a byte then pack it up
if self.cbit == 8:
self.pdata += struct.pack('B', self.tvalue)
self.cbit = 0
self.tvalue = 0
return bitCount
def padItOut( self ):
### If there are remaining bits pack them on
if self.cbit != 0:
self.pdata += struct.pack('B', self.tvalue)
self.cbit = 0
self.tvalue = 0
return
def outStream( self ):
return self.pdata
class cvf:
def __init__( self ):
self.height = 0;
self.width = 0;
self.data = ''
self.customAvailable = [ 0,1,2,3,4,5,6,7]
self.customUsed = []
### This is needed in case the size of the video
### will go beyond 4096
self.frameCountOffset = 0
self.pixelDicts = []
## Append empty lists for custom dictionary place holders
self.pixelDicts.append( [ [], [], [], [], [], [], [], [] ] )
## Append standard dictionaries
self.pixelDicts.append( [' ', '.'] )
self.pixelDicts.append( [' ', '.', '|', '#'] )
self.pixelDicts.append( [' ', '.', '|', '#', '@', '$', '(', ')'])
self.pixelDicts.append( [ ' ', '.', '|', '#', '@', '$', '(', ')', '*', 'H', 'O', 'E', 'W', 'M', '%', '&'])
self.pixelDicts.append( [ ' ', '.', '|', '#', '@', '$', '(', ')', '*', 'H', 'O', 'E', 'W', 'M', '%', '&', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u','v', 'w', 'x', 'y','z'] )
self.pixelDicts.append( ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
self.pixelDicts.append( [ ' ', '!', '"', '#', '$', '%', '&', '\'', '(', ')', '*', '+', ',', '-', '.', '/', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>', '?', '@', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', '[', '\\', ']', '^', '_', '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '{', '|', '}', '~'] )
self.frameCount = 0
self.frames = []
self.viewableFrames = []
self.name = ''
self.description = ''
### Set magic
self.data = "\x00CVF"
### Generate the header
self.genHeader()
self.genDesc()
self.genCustomPixelDict()
for x in range(self.frameCount):
self.genFrame()
def genIncFrame( self ):
tempFrame = struct.pack('H', 0x5555)
frame_type = 1
index_type = 0
dict_type = random.randint( 0, 7 )
### Ensure that there is at least one custom dict
if dict_type == 0 and len(self.customUsed) == 0:
dict_type = random.randint( 1, 7 )
### If a custom dictionary is selected then select which one
if dict_type == 0:
custom_dict = random.choice( self.customUsed )
dictionary = self.pixelDicts[0][ custom_dict ][:]
else:
custom_dict = 0
dictionary = self.pixelDicts[dict_type][:]
### Write the flag
flag = ( frame_type << 7 ) | ( index_type << 6 ) | (dict_type << 3) | ( custom_dict)
tempFrame += struct.pack('B', flag)
### This is the maximum number of pixels that we
### can generate
maxPixels = self.height * self.width
genMax = maxPixels / 2
if genMax <= 1:
genMax = maxPixels
### This is the number that we will actually generate
cntPixelsToGenerate = random.randint( 1, genMax )
### The count must be encoded in bits.
### Since this is not 0 indexed we do not subtract 1
pixelCountEncodingBitCount = bitsNeeded( maxPixels )
### This is the size of each index field
pixelIndexEncodingBitCount = bitsNeeded( maxPixels - 1)
### The actual pixel value bit length needs to be calcd
pixelValueEncodingBitCount = bitsNeeded( len(dictionary) - 1)
### Get a copy of the previous image.
pixelImage = list( self.frames[-1] )
pixelIndexList = []
pixelValueList = []
### Generate the new pixel data
for x in range(cntPixelsToGenerate):
### Select image index
ic = random.randint(0, maxPixels-1)
### Select pixel value
iv = random.choice( dictionary )
### Pull out index of the pixel
pi = dictionary.index( iv )
pixelIndexList.append( ic )
pixelValueList.append( pi )
### Update the image for later
pixelImage[ ic ] = iv
### The bitstream is needed for handling the appending
### of individual bits.
bs = bitstream()
### Append the pixel count
bs.appendBits( cntPixelsToGenerate, pixelCountEncodingBitCount)
for x in range(cntPixelsToGenerate):
bs.appendBits( pixelIndexList[x], pixelIndexEncodingBitCount)
bs.appendBits( pixelValueList[x], pixelValueEncodingBitCount)
### No more additions so pad it out then append the stream
bs.padItOut()
tempFrame += bs.outStream()
if len(self.data) + len(tempFrame) > 4096:
ti = struct.unpack('>H', self.data[ self.frameCountOffset-2: self.frameCountOffset])[0]
cl = list(self.data)
self.data = cl[:self.frameCountOffset-2]
self.data += struct.pack('>H', ti-1)
self.data += cl[self.frameCountOffset:]
self.data = ''.join( self.data )
return
else:
self.data += tempFrame
### Fix the frame
img = ''
for x in range(len(pixelImage)):
if x != 0 and x % self.width == 0:
img += '\n'
img += pixelImage[x]
img += '\n'
self.viewableFrames.append( img )
self.frames.append(''.join(pixelImage))
return
def genFullFrame( self ):
### A temp frame is needed in case it will pass size limits
tempFrame = struct.pack('H', 0x5555)
### Frame type is statically 0
frame_type = 0
### Index is not used with full frame
index_type = 0
### Select the dictionary to use 0 for custom
dict_type = random.randint( 0, 7 )
### Ensure that there is at least one custom dict
if dict_type == 0 and len(self.customUsed) == 0:
dict_type = random.randint( 1, 7 )
### If a custom dictionary is selected then select which one
if dict_type == 0:
custom_dict = random.choice( self.customUsed )
dictionary = self.pixelDicts[0][ custom_dict ][:]
else:
custom_dict = 0
dictionary = self.pixelDicts[dict_type][:]
### Write the flag
flag = ( frame_type << 7 ) | ( index_type << 6 ) | (dict_type << 3) | ( custom_dict)
tempFrame += struct.pack('B', flag)
### Randomly select a pixel and generate both
### the pixel image and the index array
pixel_image = []
index_array = []
length = self.height * self.width
for p in range(length):
q = random.choice( dictionary )
pixel_image.append( q )
index_array.append( dictionary.index( q ) )
### Calculate the number of bits for each pixel
pixel_bits_required = bitsNeeded( len(dictionary) - 1)
### Initialize the bit counter and bit data
bit_counter = 0
bit_data = 0
### Loop through each index and write the data
for i in range( len(index_array) ):
for j in range(pixel_bits_required):
cb = (index_array[i] >> ( (pixel_bits_required-1)-j)) & 1
bit_data |= ( cb << ( 7-bit_counter) )
bit_counter += 1
if bit_counter == 8:
tempFrame += struct.pack('B', bit_data)
bit_counter = 0
bit_data = 0
### Add in any extra padding
if bit_counter != 0:
tempFrame += struct.pack('B', bit_data)
if len(self.data) + len(tempFrame) > 4096:
ti = struct.unpack('>H', self.data[ self.frameCountOffset-2: self.frameCountOffset])[0]
cl = list(self.data)
self.data = cl[:self.frameCountOffset-2]
self.data += struct.pack('>H', ti-1)
self.data += cl[self.frameCountOffset:]
self.data = ''.join( self.data )
return
else:
self.data += tempFrame
img = ''
for x in range(len(pixel_image)):
if x != 0 and x % self.width == 0:
img += '\n'
img += pixel_image[x]
img += '\n'
self.viewableFrames.append( img )
self.frames.append( ''.join( pixel_image ))
return
def genFrame(self):
### First frame must be a full frame
if len(self.frames) == 0:
frame_type = 0
else:
frame_type = (random.randint(0, 100)) & 1
### If frame type is full then generate all the pixels
if frame_type == 0:
self.genFullFrame()
else:
self.genIncFrame()
return
def genDesc( self ):
length = random.randint(10,255)
self.data += struct.pack('H', 0x3333)
self.data += struct.pack('B', length)
n = ''
for i in range(length):
n += random.choice( string.lowercase + string.uppercase + string.digits + ' \t\n')
self.data += n
self.description = n
return
def genName(self):
length = random.randint(10,255)
self.data = struct.pack('H', 0x2222)
self.data += struct.pack('B', length)
n = ''
for i in range(length):
n += random.choice( string.lowercase + string.uppercase + string.digits)
self.data += n
self.name = n
return
def genHeader( self ):
self.width = random.randint( 1, 128 )
self.height = random.randint(1, 35 )
self.frameCount = random.randint( 1, 30 )
self.data += struct.pack('H', 0x1111)
self.data += struct.pack('B', self.height)
self.data += struct.pack('B', self.width)
self.data += struct.pack('>H', self.frameCount)
### Save this in case it must be modified
self.frameCountOffset = len(self.data)
return
def genCustomPixelDict( self ):
### number of dicts to generate
toGen = random.randint( 0, 7)
for i in range(toGen):
c = string.lowercase + string.uppercase + string.digits
c = list(c)
## This will be the number of available characters
count = random.randint(10,len(c))
pixels = []
### Randomly select the pixel dictionary index
dictInd = random.choice( self.customAvailable )
for x in range(count):
ch = random.choice( c )
pixels.append( ch )
c.remove(ch)
self.customUsed.append( dictInd )
self.customAvailable.remove(dictInd)
self.data += struct.pack('>H', 0x4444 + dictInd )
self.data += struct.pack('B', count)
self.data += ''.join(pixels)
self.pixelDicts[0][ dictInd] = pixels
return
if __name__ == '__main__':
seed = int(time.time())
if len(sys.argv) >= 2:
try:
seed = int(sys.argv[1])
except:
print '[ERROR] Converting seed to int failed'
sys.exit(0)
random.seed( seed )
for p in range(1000):
x = cvf()
pollxml = '<?xml version="1.0" standalone="no" ?>\n'
pollxml += '<!DOCTYPE pov SYSTEM "/usr/share/cgc-replay/replay.dtd">\n'
pollxml += '<pov>\n'
pollxml += '\t<cbid>CROMU_00032</cbid>\n'
pollxml += '\t<replay>\n'
pollxml += '\t\t<read echo="ascii"><delim>----------------Stream Me Your Video----------------\\n</delim><match><data>----------------Stream Me Your Video----------------\\n</data></match></read>\n'
pollxml += '\t\t<write echo = "ascii"><data>'
sz = struct.pack('I', len(x.data))
for z in sz:
pollxml += '\\x%.2x' %ord(z)
pollxml += '</data></write>\n'
pollxml += '\t\t<write echo = "ascii"><data>'
for z in x.data:
pollxml += '\\x%.2x' %ord(z)
pollxml += '</data></write>\n'
pollxml += '\t\t<read echo="ascii"><delim>--------------------Playing video-------------------\\n</delim><match><data>--------------------Playing video-------------------\\n</data></match></read>\n'
pollxml += '\t\t<read echo="ascii"><delim>\\n</delim><match><data>INFO: Height: %d Width: %d Frames: %d\\n</data></match></read>\n' %(x.height, x.width, len(x.frames))
pollxml += '\t\t<read echo="ascii"><delim>\\n</delim><match><data>INFO: Set your terminal height so that only the \'|\'s show.\\n</data></match></read>\n'
pollxml += '\t\t<read echo="ascii"><delim>\\n</delim><match><data>INFO: This will provide a better viewing experience\\n</data></match></read>\n'
pollxml += '\t\t<read echo="ascii"><delim>\\n</delim><match><data>INFO: When ready press a key...\\n</data></match></read>\n'
pollxml += '\t\t<write echo="ascii"><data>%c</data></write>\n' %(random.choice(string.lowercase))
for i in range(x.height):
pollxml += '\t\t<read echo="ascii"><delim>\\n</delim><match><data>|</data></match></read>\n'
for i in x.viewableFrames:
dataString = ''
for j in i:
dataString += '\\x%.2x' %(ord(j))
pollxml += '\t\t<read echo = "ascii"><delim>%s</delim><match><data>%s</data></match></read>\n' %(dataString, dataString)
pollxml += '\t</replay>\n'
pollxml += '</pov>\n'
nm = "poller/for-testing/POLL%.5d.xml" %p
f = open(nm, 'wb')
f.write(pollxml)
f.close()
### Uncomment this to write the actual image
#f = open('images/img%.5d.cvf' %(p), 'wb')
#f.write(struct.pack('I', len(x.data)))
#f.write(x.data)
#f.close()
| mit |
helldorado/ansible | test/units/modules/network/onyx/test_onyx_pfc_interface.py | 68 | 4284 | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_pfc_interface
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxPfcInterfaceModule(TestOnyxModule):
module = onyx_pfc_interface
def setUp(self):
super(TestOnyxPfcInterfaceModule, self).setUp()
self._pfc_enabled = True
self.mock_get_config = patch.object(
onyx_pfc_interface.OnyxPfcInterfaceModule,
"_get_pfc_config")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_version = patch.object(
onyx_pfc_interface.OnyxPfcInterfaceModule, "_get_os_version")
self.get_version = self.mock_get_version.start()
def tearDown(self):
super(TestOnyxPfcInterfaceModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_get_version.stop()
def load_fixtures(self, commands=None, transport='cli'):
if self._pfc_enabled:
suffix = 'enabled'
else:
suffix = 'disabled'
config_file = 'onyx_pfc_interface_%s.cfg' % suffix
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
self.get_version.return_value = "3.6.5000"
def _test_pfc_if(self, if_name, enabled, changed, commands):
state = 'enabled' if enabled else 'disabled'
set_module_args(dict(name=if_name, state=state))
self.execute_module(changed=changed, commands=commands)
def _test_pfc_no_change(self, enabled):
interfaces = ('Eth1/1', 'Eth1/1/2', 'Po1', 'Mpo2')
changed = False
commands = None
for ifc in interfaces:
self._test_pfc_if(ifc, enabled, changed, commands)
def test_pfc_enabled_no_change(self):
self._pfc_enabled = True
enabled = True
self._test_pfc_no_change(enabled)
def test_pfc_disabled_no_change(self):
self._pfc_enabled = False
enabled = False
self._test_pfc_no_change(enabled)
def _test_pfc_change(self, enabled):
cmd_list = [
('Eth1/1', 'interface ethernet 1/1'),
('Eth1/1/2', 'interface ethernet 1/1/2'),
('Po1', 'interface port-channel 1'),
('Mpo2', 'interface mlag-port-channel 2'),
]
changed = True
suffix = ' dcb priority-flow-control mode on force'
if not enabled:
suffix = ' no dcb priority-flow-control mode force'
for (if_name, cmd) in cmd_list:
commands = [cmd + suffix]
self._test_pfc_if(if_name, enabled, changed, commands)
def test_pfc_disabled_change(self):
self._pfc_enabled = False
enabled = True
self._test_pfc_change(enabled)
def test_pfc_enabled_change(self):
self._pfc_enabled = True
enabled = False
self._test_pfc_change(enabled)
def test_pfc_aggregate(self):
self._pfc_enabled = False
aggregate = [dict(name='Eth1/1'), dict(name='Eth1/1/2')]
set_module_args(dict(aggregate=aggregate, state='enabled'))
commands = [
'interface ethernet 1/1 dcb priority-flow-control mode on force',
'interface ethernet 1/1/2 dcb priority-flow-control mode on force']
self.execute_module(changed=True, commands=commands)
def test_pfc_aggregate_purge(self):
self._pfc_enabled = True
aggregate = [dict(name='Po1'), dict(name='Mpo2')]
set_module_args(dict(aggregate=aggregate, state='enabled', purge=True))
commands = [
'interface ethernet 1/1 no dcb priority-flow-control mode force',
'interface ethernet 1/1/2 no dcb priority-flow-control mode force']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 |
hunch/hunch-sample-app | django/template/debug.py | 9 | 3894 | from django.conf import settings
from django.template import Lexer, Parser, tag_re, NodeList, VariableNode, TemplateSyntaxError
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.utils.safestring import SafeData, EscapeData
from django.utils.formats import localize
class DebugLexer(Lexer):
def __init__(self, template_string, origin):
super(DebugLexer, self).__init__(template_string, origin)
def tokenize(self):
"Return a list of tokens from a given template_string"
result, upto = [], 0
for match in tag_re.finditer(self.template_string):
start, end = match.span()
if start > upto:
result.append(self.create_token(self.template_string[upto:start], (upto, start), False))
upto = start
result.append(self.create_token(self.template_string[start:end], (start, end), True))
upto = end
last_bit = self.template_string[upto:]
if last_bit:
result.append(self.create_token(last_bit, (upto, upto + len(last_bit)), False))
return result
def create_token(self, token_string, source, in_tag):
token = super(DebugLexer, self).create_token(token_string, in_tag)
token.source = self.origin, source
return token
class DebugParser(Parser):
def __init__(self, lexer):
super(DebugParser, self).__init__(lexer)
self.command_stack = []
def enter_command(self, command, token):
self.command_stack.append( (command, token.source) )
def exit_command(self):
self.command_stack.pop()
def error(self, token, msg):
return self.source_error(token.source, msg)
def source_error(self, source,msg):
e = TemplateSyntaxError(msg)
e.source = source
return e
def create_nodelist(self):
return DebugNodeList()
def create_variable_node(self, contents):
return DebugVariableNode(contents)
def extend_nodelist(self, nodelist, node, token):
node.source = token.source
super(DebugParser, self).extend_nodelist(nodelist, node, token)
def unclosed_block_tag(self, parse_until):
command, source = self.command_stack.pop()
msg = "Unclosed tag '%s'. Looking for one of: %s " % (command, ', '.join(parse_until))
raise self.source_error(source, msg)
def compile_function_error(self, token, e):
if not hasattr(e, 'source'):
e.source = token.source
class DebugNodeList(NodeList):
def render_node(self, node, context):
try:
result = node.render(context)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = node.source
raise
except Exception, e:
from sys import exc_info
wrapped = TemplateSyntaxError(u'Caught %s while rendering: %s' %
(e.__class__.__name__, force_unicode(e, errors='replace')))
wrapped.source = node.source
wrapped.exc_info = exc_info()
raise wrapped, None, wrapped.exc_info[2]
return result
class DebugVariableNode(VariableNode):
def render(self, context):
try:
output = self.filter_expression.resolve(context)
output = localize(output, use_l10n=context.use_l10n)
output = force_unicode(output)
except TemplateSyntaxError, e:
if not hasattr(e, 'source'):
e.source = self.source
raise
except UnicodeDecodeError:
return ''
if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData):
return escape(output)
else:
return output
| mit |
oposs/check_mk_mirror | web/htdocs/main.py | 1 | 2879 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
import defaults, config
def page_index():
default_start_url = config.user.get("start_url") or config.start_url
start_url = html.var("start_url", default_start_url).strip()
# Prevent redirecting to absolute URL which could be used to redirect
# users to compromised pages.
if '://' in start_url:
start_url = default_start_url
# Also prevent using of "javascript:" URLs which could used to inject code
if start_url.lower().startswith('javascript:'):
start_url = default_start_url
if "%s" in config.page_heading:
heading = config.page_heading % (config.site(defaults.omd_site).get('alias', _("Multisite")))
else:
heading = config.page_heading
html.write('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">\n'
'<html><head>\n')
html.default_html_headers()
html.write("""<title>%s</title>
</head>
<frameset cols="280,*" frameborder="0" framespacing="0" border="0">
<frame src="side.py" name="side" noresize scrolling="no">
<frame src="%s" name="main" noresize>
</frameset>
</html>
""" % (html.attrencode(heading), html.attrencode(start_url)))
# This function does almost nothing. It just makes sure that
# a livestatus-connection is built up, since connect_to_livestatus()
# handles the _site_switch variable.
def ajax_switch_site():
html.live
| gpl-2.0 |
FCH808/FCH808.github.io | Intro to Machine Learning/ud120-projects/feature_selection/find_signature.py | 2 | 1243 | #!/usr/bin/python
import pickle
import numpy
numpy.random.seed(42)
### the words (features) and authors (labels), already largely processed
words_file = "word_data_overfit.pkl" ### like the file you made in the last mini-project
authors_file = "email_authors_overfit.pkl" ### this too
word_data = pickle.load( open(words_file, "r"))
authors = pickle.load( open(authors_file, "r") )
### test_size is the percentage of events assigned to the test set (remainder go into training)
from sklearn import cross_validation
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(word_data, authors, test_size=0.1, random_state=42)
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
features_train = vectorizer.fit_transform(features_train).toarray()
features_test = vectorizer.transform(features_test).toarray()
### a classic way to overfit is to use a small number
### of data points and a large number of features
### train on only 150 events to put ourselves in this regime
features_train = features_train[:150]
labels_train = labels_train[:150]
### your code goes here
| mit |
nju520/django | tests/indexes/tests.py | 321 | 3037 | from unittest import skipUnless
from django.db import connection
from django.test import TestCase
from .models import Article, ArticleTranslation, IndexTogetherSingleList
class SchemaIndexesTests(TestCase):
"""
Test index handling by the db.backends.schema infrastructure.
"""
def test_index_name_hash(self):
"""
Index names should be deterministic.
"""
with connection.schema_editor() as editor:
index_name = editor._create_index_name(
model=Article,
column_names=("c1", "c2", "c3"),
suffix="123",
)
self.assertEqual(index_name, "indexes_article_c1_7ce4cc86123")
def test_index_together(self):
editor = connection.schema_editor()
index_sql = editor._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
# Ensure the index name is properly quoted
self.assertIn(
connection.ops.quote_name(
editor._create_index_name(Article, ['headline', 'pub_date'], suffix='_idx')
),
index_sql[0]
)
def test_index_together_single_list(self):
# Test for using index_together with a single list (#22172)
index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = connection.schema_editor()._model_indexes_sql(IndexedArticle)
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[2])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
@skipUnless(connection.vendor == 'postgresql',
"This is a postgresql-specific issue")
def test_postgresql_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.schema_editor()._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'mysql', "This is a mysql-specific issue")
def test_no_index_for_foreignkey(self):
"""
MySQL on InnoDB already creates indexes automatically for foreign keys.
(#14180).
"""
storage = connection.introspection.get_storage_engine(
connection.cursor(), ArticleTranslation._meta.db_table
)
if storage != "InnoDB":
self.skip("This test only applies to the InnoDB storage engine")
index_sql = connection.schema_editor()._model_indexes_sql(ArticleTranslation)
self.assertEqual(index_sql, [])
| bsd-3-clause |
EventBuck/EventBuck | conf.py | 1 | 5585 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Canvas Page name.
FACEBOOK_CANVAS_NAME = 'ojo-ticket'
# A random token for use with the Real-time API.
FACEBOOK_REALTIME_VERIFY_TOKEN = 'RANDOM TOKEN'
# The external URL this application is available at where the Real-time API will
# send it's pings.
EXTERNAL_HREF = 'https:u//ojo-ticket.appspot.com/'
# Facebook User IDs of admins. The poor mans admin system.
ADMIN_USER_IDS = ['100002448856234']
UL_DEPARTEMENT = {'ACT':u'Actuariat\
', 'AEE':u'Admin. et éval. en éducation\
', 'ADM':u'Administration\
', 'ADS':u'Administration scolaire\
', 'APR':u'Affaires publ. et représ. int.\
', 'AGC':u'Agroéconomie\
', 'AGF':u'Agroforesterie\
', 'AGN':u'Agronomie\
', 'ALL':u'Allemand\
', 'AME':u'Aménagement du territoire\
', 'ANM':u'Anatomie\
', 'ANG':u'Anglais\
', 'ANL':u'Anglais (langue)\
', 'ANT':u'Anthropologie\
', 'ARA':u'Arabe\
', 'ARL':u'Archéologie\
', 'ARC':u'Architecture\
', 'GAD':u'Archivistique\
', 'ARD':u'Art dramatique\
', 'ANI':u'Art et science de l\'animation\
', 'ART':u'Arts\
', 'ARV':u'Arts visuels\
', 'ASR':u'Assurances\
', 'BCM':u'Biochimie\
', 'BCX':u'Biochimie médicale\
', 'BIF':u'Bio-informatique\
', 'BIO':u'Biologie\
', 'BMO':u'Biologie cell. et moléculaire\
', 'BVG':u'Biologie végétale\
', 'BPH':u'Biophotonique\
', 'CAT':u'Catéchése\
', 'CHM':u'Chimie\
', 'CHN':u'Chinois\
', 'CIN':u'Cinéma\
', 'COM':u'Communication\
', 'CTB':u'Comptabilité\
', 'CNS':u'Consommation\
', 'CSO':u'Counseling et orientation\
', 'CRL':u'Création littéraire\
', 'CRI':u'Criminologie\
', 'DES':u'Design graphique\
', 'DDU':u'Développement durable\
', 'DVE':u'Développement économique\
', 'DRI':u'Développement rural intégré\
', 'DID':u'Didactique\
', 'DRT':u'Droit\
', 'ERU':u'Économie rurale\
', 'ECN':u'Économique\
', 'EDC':u'Éducation\
', 'EPS':u'Éducation physique\
', 'ENP':u'Enseignement préscol. et prim.\
', 'ENS':u'Enseignement secondaire\
', 'EER':u'Ens. en éthique et cult. rel.\
', 'ENT':u'Entrepreneuriat\
', 'ENV':u'Environnement\
', 'EPM':u'Épidémiologie\
', 'EGN':u'Ergonomie\
', 'ERG':u'Ergothérapie\
', 'ESP':u'Espagnol\
', 'ESG':u'Espagnol (langue)\
', 'ETH':u'Éthique\
', 'EFN':u'Ethn. francoph. en Am. du N.\
', 'ETN':u'Ethnologie\
', 'EAN':u'Études anciennes\
', 'FEM':u'Études féministes\
', 'ETI':u'Études internationales\
', 'PTR':u'Études patrimoniales\
', 'GPL':u'Études pluridisciplinaires\
', 'EXD':u'Examen de doctorat\
', 'FOR':u'Foresterie\
', 'FIS':u'Formation interdisc. en santé\
', 'FPT':u'Formation prof. et technique\
', 'FRN':u'Français\
', 'FLE':u'Français lang. étr. ou seconde\
', 'FLS':u'Français langue seconde\
', 'GNT':u'Génétique\
', 'GAA':u'Génie agroalimentaire\
', 'GAE':u'Génie agroenvironnemental\
', 'GAL':u'Génie alimentaire\
', 'GCH':u'Génie chimique\
', 'GCI':u'Génie civil\
', 'GPG':u'Génie de la plasturgie\
', 'GEX':u'Génie des eaux\
', 'GEL':u'Génie électrique\
', 'GSC':u'Génie et sciences\
', 'GGL':u'Génie géologique\
', 'GIN':u'Génie industriel\
', 'GIF':u'Génie informatique\
', 'GLO':u'Génie logiciel\
', 'GMC':u'Génie mécanique\
', 'GML':u'Génie métallurgique\
', 'GMN':u'Génie minier\
', 'GPH':u'Génie physique\
', 'GGR':u'Géographie\
', 'GLG':u'Géologie\
', 'GMT':u'Géomatique\
', 'GSO':u'Gestion des opérations\
', 'GRH':u'Gestion des ress. humaines\
', 'GSE':u'Gestion économique\
', 'GSF':u'Gestion financière\
', 'GIE':u'Gestion internationale\
', 'GUI':u'Gest. urbaine et immobilière\
', 'GRC':u'Grec\
', 'HST':u'Histoire\
', 'HAR':u'Histoire de l\'art\
', 'HTL':u'Histologie\
', 'IFT':u'Informatique\
', 'IDI':u'Interv. en déficience intell.\
', 'IED':u'Intervention éducative\
', 'ITL':u'Italien\
', 'JAP':u'Japonais\
', 'JOU':u'Journalisme\
', 'KIN':u'Kinésiologie\
', 'LMO':u'Langues modernes\
', 'LOA':u'Langues orientales anciennes\
', 'LAT':u'Latin\
', 'LNG':u'Linguistique\
', 'LIT':u'Littérature\
', 'MNG':u'Management\
', 'MRK':u'Marketing\
', 'MAT':u'Mathématiques\
', 'MED':u'Médecine\
', 'MDD':u'Médecine dentaire\
', 'MDX':u'Médecine expérimentale\
', 'MEV':u'Mesure et évaluation\
', 'MQT':u'Méthodes quantitatives\
', 'MET':u'Méthodologie\
', 'MCB':u'Microbiologie\
', 'MSL':u'Muséologie\
', 'MUS':u'Musique\
', 'NRB':u'Neurobiologie\
', 'NUT':u'Nutrition\
', 'OCE':u'Océanographie\
', 'OPV':u'Optique et santé de la vue\
', 'ORT':u'Orthophonie\
', 'PST':u'Pastorale\
', 'PAT':u'Pathologie\
', 'PUN':u'Pédagogie universitaire\
', 'PHA':u'Pharmacie\
', 'PHC':u'Pharmacologie\
', 'PHI':u'Philosophie\
', 'PHS':u'Physiologie\
', 'PHT':u'Physiothérapie\
', 'PHY':u'Physique\
', 'PLG':u'Phytologie\
', 'PFP':u'Planif. financière personnelle\
', 'POR':u'Portugais\
', 'PSA':u'Psychiatrie\
', 'PSE':u'Psychoéducation\
', 'PSY':u'Psychologie\
', 'PPG':u'Psychopédagogie\
', 'RLT':u'Relations industrielles\
', 'RUS':u'Russe\
', 'SAT':u'Santé au travail\
', 'SAC':u'Santé communautaire\
', 'POL':u'Science politique\
', 'SAN':u'Sciences animales\
', 'SBM':u'Sciences biomédicales\
', 'SCR':u'Sciences des religions\
', 'SBO':u'Sciences du bois\
', 'SCG':u'Sciences géomatiques\
', 'SIN':u'Sciences infirmiéres\
', 'STA':u'Sciences, technologie aliments\
', 'SVS':u'Service social\
', 'SOC':u'Sociologie\
', 'SLS':u'Sols\
', 'STT':u'Statistique\
', 'SIO':u'Système information organisat.\
', 'TEN':u'Technologie éducative\
', 'THT':u'Théâtre\
', 'THL':u'Théologie\
', 'TCF':u'Thérapie conjug. et familiale\
', 'TRE':u'Thése, recherche, mémoire\
', 'TXM':u'Toxicomanie\
', 'TRD':u'Traduction\
', 'TED':u'Troubles envahissants du dév.\
'}
| mit |
mlaidouni/Artistfunds | venv/lib/python2.7/site-packages/pip/req/req_set.py | 25 | 32236 | from __future__ import absolute_import
from collections import defaultdict
from itertools import chain
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.compat import expanduser
from pip.download import (is_file_url, is_dir_url, is_vcs_url, url_to_path,
unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError,
HashError, HashErrors, HashUnpinned,
DirectoryUrlHashUnsupported, VcsHashUnsupported)
from pip.req.req_install import InstallRequirement
from pip.utils import (
display_path, dist_in_usersite, ensure_dir, normalize_path)
from pip.utils.hashes import MissingHashes
from pip.utils.logging import indent_log
from pip.vcs import vcs
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
ignore_installed=False, as_egg=False, target_dir=None,
ignore_dependencies=False, force_reinstall=False,
use_user_site=False, session=None, pycompile=True,
isolated=False, wheel_download_dir=None,
wheel_cache=None, require_hashes=False):
"""Create a RequirementSet.
:param wheel_download_dir: Where still-packed .whl files should be
written to. If None they are written to the download_dir parameter.
Separate to download_dir to permit only keeping wheel archives for
pip wheel.
:param download_dir: Where still packed archives should be written to.
If None they are not saved, and are deleted immediately after
unpacking.
:param wheel_cache: The pip wheel cache, for passing to
InstallRequirement.
"""
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
self.download_dir = download_dir
self.upgrade = upgrade
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
self._wheel_cache = wheel_cache
self.require_hashes = require_hashes
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers():
logger.warning("Ignoring %s: markers %r don't match your "
"environment", install_req.name,
install_req.markers)
return []
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
if (parent_req_name is None and existing_req and not
existing_req.constraint and
existing_req.extras == install_req.extras):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, existing_req, name))
if not existing_req:
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
result = []
if not install_req.constraint and existing_req.constraint:
if (install_req.link and not (existing_req.link and
install_req.link.path == existing_req.link.path)):
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(
sorted(set(existing_req.extras).union(
set(install_req.extras))))
logger.debug("Setting %s extras to: %s",
existing_req, existing_req.extras)
# And now we need to scan this.
result = [existing_req]
# Canonicalise to the already-added object for the backref
# check below.
install_req = existing_req
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
name = project_name.lower()
if (name in self.requirements and
not self.requirements[name].constraint or
name in self.requirement_aliases and
not self.requirements[self.requirement_aliases[name]].constraint):
return True
return False
@property
def has_requirements(self):
return list(req for req in self.requirements.values() if not
req.constraint) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
if req.constraint:
continue
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
# make the wheelhouse
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = self.unnamed_requirements + self.requirements.values()
require_hashes = (self.require_hashes or
any(req.has_hash_options for req in root_reqs))
if require_hashes and self.as_egg:
raise InstallationError(
'--egg is not allowed with --require-hashes mode, since it '
'delegates dependency resolution to setuptools and could thus '
'result in installation of unhashed packages.')
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = []
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(self._prepare_file(
finder,
req,
require_hashes=require_hashes,
ignore_dependencies=self.ignore_dependencies))
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
skip_reason = 'satisfied (use --upgrade to upgrade)'
if self.upgrade:
best_installed = False
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(req_to_install, self.upgrade)
except BestVersionAlreadyInstalled:
skip_reason = 'up-to-date'
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
return skip_reason
else:
return None
def _prepare_file(self,
finder,
req_to_install,
require_hashes=False,
ignore_dependencies=False):
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
# ###################### #
# # print log messages # #
# ###################### #
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement already %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
if require_hashes:
raise InstallationError(
'The editable requirement %s cannot be installed when '
'requiring hashes, because there is no single file to '
'hash.' % req_to_install)
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
elif req_to_install.satisfied_by:
if require_hashes:
logger.debug(
'Since it is already installed, we are trusting this '
'package without checking its hash. To ensure a '
'completely repeatable environment, install into an '
'empty virtualenv.')
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(
finder, self.upgrade, require_hashes)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
link = req_to_install.link
# Now that we have the real link, we can tell what kind of
# requirements we have and raise some more informative errors
# than otherwise. (For example, we can raise VcsHashUnsupported
# for a VCS URL rather than HashMissing.)
if require_hashes:
# We could check these first 2 conditions inside
# unpack_url and save repetition of conditions, but then
# we would report less-useful error messages for
# unhashable requirements, complaining that there's no
# hash provided.
if is_vcs_url(link):
raise VcsHashUnsupported()
elif is_file_url(link) and is_dir_url(link):
raise DirectoryUrlHashUnsupported()
if (not req_to_install.original_link and
not req_to_install.is_pinned):
# Unpinned packages are asking for trouble when a new
# version is uploaded. This isn't a security check, but
# it saves users a surprising hash mismatch in the
# future.
#
# file:/// URLs aren't pinnable, so don't complain
# about them not being pinned.
raise HashUnpinned()
hashes = req_to_install.hashes(
trust_internet=not require_hashes)
if require_hashes and not hashes:
# Known-good hashes are missing for this requirement, so
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
hashes = MissingHashes()
try:
download_dir = self.download_dir
# We always delete unpacked sdists after pip ran.
autodelete_unpacked = True
if req_to_install.link.is_wheel \
and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if req_to_install.link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, autodelete_unpacked,
session=self.session, hashes=hashes)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
more_reqs = []
def add_req(subreq):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self._wheel_cache,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
| mit |
ssanderson/numpy | numpy/core/defchararray.py | 21 | 67393 | """
This module contains a set of functions for vectorized string
operations and methods.
.. note::
The `chararray` class exists for backwards compatibility with
Numarray, it is not recommended for new development. Starting from numpy
1.4, if one needs arrays of strings, it is recommended to use arrays of
`dtype` `object_`, `string_` or `unicode_`, and use the free functions
in the `numpy.char` module for fast vectorized string operations.
Some methods will only be available if the corresponding string method is
available in your version of Python.
The preferred alias for `defchararray` is `numpy.char`.
"""
from __future__ import division, absolute_import, print_function
import sys
from .numerictypes import string_, unicode_, integer, object_, bool_, character
from .numeric import ndarray, compare_chararrays
from .numeric import array as narray
from numpy.core.multiarray import _vec_string
from numpy.compat import asbytes, long
import numpy
__all__ = [
'chararray', 'equal', 'not_equal', 'greater_equal', 'less_equal',
'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
'array', 'asarray'
]
_globalvar = 0
if sys.version_info[0] >= 3:
_unicode = str
_bytes = bytes
else:
_unicode = unicode
_bytes = str
_len = len
def _use_unicode(*args):
"""
Helper function for determining the output type of some string
operations.
For an operation on two ndarrays, if at least one is unicode, the
result should be unicode.
"""
for x in args:
if (isinstance(x, _unicode) or
issubclass(numpy.asarray(x).dtype.type, unicode_)):
return unicode_
return string_
def _to_string_or_unicode_array(result):
"""
Helper function to cast a result back into a string or unicode array
if an object array must be used as an intermediary.
"""
return numpy.asarray(result.tolist())
def _clean_args(*args):
"""
Helper function for delegating arguments to Python string
functions.
Many of the Python string operations that have optional arguments
do not use 'None' to indicate a default value. In these cases,
we need to remove all `None` arguments, and those following them.
"""
newargs = []
for chk in args:
if chk is None:
break
newargs.append(chk)
return newargs
def _get_num_chars(a):
"""
Helper function that returns the number of characters per field in
a string or unicode array. This is to abstract out the fact that
for a unicode array this is itemsize / 4.
"""
if issubclass(a.dtype.type, unicode_):
return a.itemsize // 4
return a.itemsize
def equal(x1, x2):
"""
Return (x1 == x2) element-wise.
Unlike `numpy.equal`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
"""
return compare_chararrays(x1, x2, '==', True)
def not_equal(x1, x2):
"""
Return (x1 != x2) element-wise.
Unlike `numpy.not_equal`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, greater_equal, less_equal, greater, less
"""
return compare_chararrays(x1, x2, '!=', True)
def greater_equal(x1, x2):
"""
Return (x1 >= x2) element-wise.
Unlike `numpy.greater_equal`, this comparison is performed by
first stripping whitespace characters from the end of the string.
This behavior is provided for backward-compatibility with
numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, not_equal, less_equal, greater, less
"""
return compare_chararrays(x1, x2, '>=', True)
def less_equal(x1, x2):
"""
Return (x1 <= x2) element-wise.
Unlike `numpy.less_equal`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, not_equal, greater_equal, greater, less
"""
return compare_chararrays(x1, x2, '<=', True)
def greater(x1, x2):
"""
Return (x1 > x2) element-wise.
Unlike `numpy.greater`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, not_equal, greater_equal, less_equal, less
"""
return compare_chararrays(x1, x2, '>', True)
def less(x1, x2):
"""
Return (x1 < x2) element-wise.
Unlike `numpy.greater`, this comparison is performed by first
stripping whitespace characters from the end of the string. This
behavior is provided for backward-compatibility with numarray.
Parameters
----------
x1, x2 : array_like of str or unicode
Input arrays of the same shape.
Returns
-------
out : ndarray or bool
Output array of bools, or a single bool if x1 and x2 are scalars.
See Also
--------
equal, not_equal, greater_equal, less_equal, greater
"""
return compare_chararrays(x1, x2, '<', True)
def str_len(a):
"""
Return len(a) element-wise.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of integers
See also
--------
__builtin__.len
"""
return _vec_string(a, integer, '__len__')
def add(x1, x2):
"""
Return element-wise string concatenation for two arrays of str or unicode.
Arrays `x1` and `x2` must have the same shape.
Parameters
----------
x1 : array_like of str or unicode
Input array.
x2 : array_like of str or unicode
Input array.
Returns
-------
add : ndarray
Output array of `string_` or `unicode_`, depending on input types
of the same shape as `x1` and `x2`.
"""
arr1 = numpy.asarray(x1)
arr2 = numpy.asarray(x2)
out_size = _get_num_chars(arr1) + _get_num_chars(arr2)
dtype = _use_unicode(arr1, arr2)
return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,))
def multiply(a, i):
"""
Return (a * i), that is string multiple concatenation,
element-wise.
Values in `i` of less than 0 are treated as 0 (which yields an
empty string).
Parameters
----------
a : array_like of str or unicode
i : array_like of ints
Returns
-------
out : ndarray
Output array of str or unicode, depending on input types
"""
a_arr = numpy.asarray(a)
i_arr = numpy.asarray(i)
if not issubclass(i_arr.dtype.type, integer):
raise ValueError("Can only multiply by integers")
out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0)
return _vec_string(
a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,))
def mod(a, values):
"""
Return (a % i), that is pre-Python 2.6 string formatting
(iterpolation), element-wise for a pair of array_likes of str
or unicode.
Parameters
----------
a : array_like of str or unicode
values : array_like of values
These values will be element-wise interpolated into the string.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input types
See also
--------
str.__mod__
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, '__mod__', (values,)))
def capitalize(a):
"""
Return a copy of `a` with only the first character of each element
capitalized.
Calls `str.capitalize` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Input array of strings to capitalize.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input
types
See also
--------
str.capitalize
Examples
--------
>>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c
array(['a1b2', '1b2a', 'b2a1', '2a1b'],
dtype='|S4')
>>> np.char.capitalize(c)
array(['A1b2', '1b2a', 'B2a1', '2a1b'],
dtype='|S4')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'capitalize')
def center(a, width, fillchar=' '):
"""
Return a copy of `a` with its elements centered in a string of
length `width`.
Calls `str.center` element-wise.
Parameters
----------
a : array_like of str or unicode
width : int
The length of the resulting strings
fillchar : str or unicode, optional
The padding character to use (default is space).
Returns
-------
out : ndarray
Output array of str or unicode, depending on input
types
See also
--------
str.center
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = long(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar))
def count(a, sub, start=0, end=None):
"""
Returns an array with the number of non-overlapping occurrences of
substring `sub` in the range [`start`, `end`].
Calls `str.count` element-wise.
Parameters
----------
a : array_like of str or unicode
sub : str or unicode
The substring to search for.
start, end : int, optional
Optional arguments `start` and `end` are interpreted as slice
notation to specify the range in which to count.
Returns
-------
out : ndarray
Output array of ints.
See also
--------
str.count
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'],
dtype='|S7')
>>> np.char.count(c, 'A')
array([3, 1, 1])
>>> np.char.count(c, 'aA')
array([3, 1, 0])
>>> np.char.count(c, 'A', start=1, end=4)
array([2, 1, 1])
>>> np.char.count(c, 'A', start=1, end=3)
array([1, 0, 0])
"""
return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end))
def decode(a, encoding=None, errors=None):
"""
Calls `str.decode` element-wise.
The set of available codecs comes from the Python standard library,
and may be extended at runtime. For more information, see the
:mod:`codecs` module.
Parameters
----------
a : array_like of str or unicode
encoding : str, optional
The name of an encoding
errors : str, optional
Specifies how to handle encoding errors
Returns
-------
out : ndarray
See also
--------
str.decode
Notes
-----
The type of the result will depend on the encoding specified.
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'],
dtype='|S7')
>>> np.char.encode(c, encoding='cp037')
array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@',
'\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'],
dtype='|S7')
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'decode', _clean_args(encoding, errors)))
def encode(a, encoding=None, errors=None):
"""
Calls `str.encode` element-wise.
The set of available codecs comes from the Python standard library,
and may be extended at runtime. For more information, see the codecs
module.
Parameters
----------
a : array_like of str or unicode
encoding : str, optional
The name of an encoding
errors : str, optional
Specifies how to handle encoding errors
Returns
-------
out : ndarray
See also
--------
str.encode
Notes
-----
The type of the result will depend on the encoding specified.
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
def endswith(a, suffix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `a` ends with `suffix`, otherwise `False`.
Calls `str.endswith` element-wise.
Parameters
----------
a : array_like of str or unicode
suffix : str
start, end : int, optional
With optional `start`, test beginning at that position. With
optional `end`, stop comparing at that position.
Returns
-------
out : ndarray
Outputs an array of bools.
See also
--------
str.endswith
Examples
--------
>>> s = np.array(['foo', 'bar'])
>>> s[0] = 'foo'
>>> s[1] = 'bar'
>>> s
array(['foo', 'bar'],
dtype='|S3')
>>> np.char.endswith(s, 'ar')
array([False, True], dtype=bool)
>>> np.char.endswith(s, 'a', start=1, end=2)
array([False, True], dtype=bool)
"""
return _vec_string(
a, bool_, 'endswith', [suffix, start] + _clean_args(end))
def expandtabs(a, tabsize=8):
"""
Return a copy of each string element where all tab characters are
replaced by one or more spaces.
Calls `str.expandtabs` element-wise.
Return a copy of each string element where all tab characters are
replaced by one or more spaces, depending on the current column
and the given `tabsize`. The column number is reset to zero after
each newline occurring in the string. This doesn't understand other
non-printing characters or escape sequences.
Parameters
----------
a : array_like of str or unicode
Input array
tabsize : int, optional
Replace tabs with `tabsize` number of spaces. If not given defaults
to 8 spaces.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.expandtabs
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'expandtabs', (tabsize,)))
def find(a, sub, start=0, end=None):
"""
For each element, return the lowest index in the string where
substring `sub` is found.
Calls `str.find` element-wise.
For each element, return the lowest index in the string where
substring `sub` is found, such that `sub` is contained in the
range [`start`, `end`].
Parameters
----------
a : array_like of str or unicode
sub : str or unicode
start, end : int, optional
Optional arguments `start` and `end` are interpreted as in
slice notation.
Returns
-------
out : ndarray or int
Output array of ints. Returns -1 if `sub` is not found.
See also
--------
str.find
"""
return _vec_string(
a, integer, 'find', [sub, start] + _clean_args(end))
def index(a, sub, start=0, end=None):
"""
Like `find`, but raises `ValueError` when the substring is not found.
Calls `str.index` element-wise.
Parameters
----------
a : array_like of str or unicode
sub : str or unicode
start, end : int, optional
Returns
-------
out : ndarray
Output array of ints. Returns -1 if `sub` is not found.
See also
--------
find, str.find
"""
return _vec_string(
a, integer, 'index', [sub, start] + _clean_args(end))
def isalnum(a):
"""
Returns true for each element if all characters in the string are
alphanumeric and there is at least one character, false otherwise.
Calls `str.isalnum` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.isalnum
"""
return _vec_string(a, bool_, 'isalnum')
def isalpha(a):
"""
Returns true for each element if all characters in the string are
alphabetic and there is at least one character, false otherwise.
Calls `str.isalpha` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.isalpha
"""
return _vec_string(a, bool_, 'isalpha')
def isdigit(a):
"""
Returns true for each element if all characters in the string are
digits and there is at least one character, false otherwise.
Calls `str.isdigit` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.isdigit
"""
return _vec_string(a, bool_, 'isdigit')
def islower(a):
"""
Returns true for each element if all cased characters in the
string are lowercase and there is at least one cased character,
false otherwise.
Calls `str.islower` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.islower
"""
return _vec_string(a, bool_, 'islower')
def isspace(a):
"""
Returns true for each element if there are only whitespace
characters in the string and there is at least one character,
false otherwise.
Calls `str.isspace` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.isspace
"""
return _vec_string(a, bool_, 'isspace')
def istitle(a):
"""
Returns true for each element if the element is a titlecased
string and there is at least one character, false otherwise.
Call `str.istitle` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.istitle
"""
return _vec_string(a, bool_, 'istitle')
def isupper(a):
"""
Returns true for each element if all cased characters in the
string are uppercase and there is at least one character, false
otherwise.
Call `str.isupper` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like of str or unicode
Returns
-------
out : ndarray
Output array of bools
See also
--------
str.isupper
"""
return _vec_string(a, bool_, 'isupper')
def join(sep, seq):
"""
Return a string which is the concatenation of the strings in the
sequence `seq`.
Calls `str.join` element-wise.
Parameters
----------
sep : array_like of str or unicode
seq : array_like of str or unicode
Returns
-------
out : ndarray
Output array of str or unicode, depending on input types
See also
--------
str.join
"""
return _to_string_or_unicode_array(
_vec_string(sep, object_, 'join', (seq,)))
def ljust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` left-justified in a
string of length `width`.
Calls `str.ljust` element-wise.
Parameters
----------
a : array_like of str or unicode
width : int
The length of the resulting strings
fillchar : str or unicode, optional
The character to use for padding
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.ljust
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = long(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar))
def lower(a):
"""
Return an array with the elements converted to lowercase.
Call `str.lower` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See also
--------
str.lower
Examples
--------
>>> c = np.array(['A1B C', '1BCA', 'BCA1']); c
array(['A1B C', '1BCA', 'BCA1'],
dtype='|S5')
>>> np.char.lower(c)
array(['a1b c', '1bca', 'bca1'],
dtype='|S5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'lower')
def lstrip(a, chars=None):
"""
For each element in `a`, return a copy with the leading characters
removed.
Calls `str.lstrip` element-wise.
Parameters
----------
a : array-like, {str, unicode}
Input array.
chars : {str, unicode}, optional
The `chars` argument is a string specifying the set of
characters to be removed. If omitted or None, the `chars`
argument defaults to removing whitespace. The `chars` argument
is not a prefix; rather, all combinations of its values are
stripped.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See also
--------
str.lstrip
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'],
dtype='|S7')
The 'a' variable is unstripped from c[1] because whitespace leading.
>>> np.char.lstrip(c, 'a')
array(['AaAaA', ' aA ', 'bBABba'],
dtype='|S7')
>>> np.char.lstrip(c, 'A') # leaves c unchanged
array(['aAaAaA', ' aA ', 'abBABba'],
dtype='|S7')
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all()
... # XXX: is this a regression? this line now returns False
... # np.char.lstrip(c,'') does not modify c at all.
True
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all()
True
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
def partition(a, sep):
"""
Partition each element in `a` around `sep`.
Calls `str.partition` element-wise.
For each element in `a`, split the element as the first
occurrence of `sep`, and return 3 strings containing the part
before the separator, the separator itself, and the part after
the separator. If the separator is not found, return 3 strings
containing the string itself, followed by two empty strings.
Parameters
----------
a : array_like, {str, unicode}
Input array
sep : {str, unicode}
Separator to split each string element in `a`.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type.
The output array will have an extra dimension with 3
elements per input element.
See also
--------
str.partition
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'partition', (sep,)))
def replace(a, old, new, count=None):
"""
For each element in `a`, return a copy of the string with all
occurrences of substring `old` replaced by `new`.
Calls `str.replace` element-wise.
Parameters
----------
a : array-like of str or unicode
old, new : str or unicode
count : int, optional
If the optional argument `count` is given, only the first
`count` occurrences are replaced.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.replace
"""
return _to_string_or_unicode_array(
_vec_string(
a, object_, 'replace', [old, new] + _clean_args(count)))
def rfind(a, sub, start=0, end=None):
"""
For each element in `a`, return the highest index in the string
where substring `sub` is found, such that `sub` is contained
within [`start`, `end`].
Calls `str.rfind` element-wise.
Parameters
----------
a : array-like of str or unicode
sub : str or unicode
start, end : int, optional
Optional arguments `start` and `end` are interpreted as in
slice notation.
Returns
-------
out : ndarray
Output array of ints. Return -1 on failure.
See also
--------
str.rfind
"""
return _vec_string(
a, integer, 'rfind', [sub, start] + _clean_args(end))
def rindex(a, sub, start=0, end=None):
"""
Like `rfind`, but raises `ValueError` when the substring `sub` is
not found.
Calls `str.rindex` element-wise.
Parameters
----------
a : array-like of str or unicode
sub : str or unicode
start, end : int, optional
Returns
-------
out : ndarray
Output array of ints.
See also
--------
rfind, str.rindex
"""
return _vec_string(
a, integer, 'rindex', [sub, start] + _clean_args(end))
def rjust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` right-justified in a
string of length `width`.
Calls `str.rjust` element-wise.
Parameters
----------
a : array_like of str or unicode
width : int
The length of the resulting strings
fillchar : str or unicode, optional
The character to use for padding
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.rjust
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = long(numpy.max(width_arr.flat))
if numpy.issubdtype(a_arr.dtype, numpy.string_):
fillchar = asbytes(fillchar)
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar))
def rpartition(a, sep):
"""
Partition (split) each element around the right-most separator.
Calls `str.rpartition` element-wise.
For each element in `a`, split the element as the last
occurrence of `sep`, and return 3 strings containing the part
before the separator, the separator itself, and the part after
the separator. If the separator is not found, return 3 strings
containing the string itself, followed by two empty strings.
Parameters
----------
a : array_like of str or unicode
Input array
sep : str or unicode
Right-most separator to split each element in array.
Returns
-------
out : ndarray
Output array of string or unicode, depending on input
type. The output array will have an extra dimension with
3 elements per input element.
See also
--------
str.rpartition
"""
return _to_string_or_unicode_array(
_vec_string(a, object_, 'rpartition', (sep,)))
def rsplit(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
string, using `sep` as the delimiter string.
Calls `str.rsplit` element-wise.
Except for splitting from the right, `rsplit`
behaves like `split`.
Parameters
----------
a : array_like of str or unicode
sep : str or unicode, optional
If `sep` is not specified or `None`, any whitespace string
is a separator.
maxsplit : int, optional
If `maxsplit` is given, at most `maxsplit` splits are done,
the rightmost ones.
Returns
-------
out : ndarray
Array of list objects
See also
--------
str.rsplit, split
"""
# This will return an array of lists of different sizes, so we
# leave it as an object array
return _vec_string(
a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
def rstrip(a, chars=None):
"""
For each element in `a`, return a copy with the trailing
characters removed.
Calls `str.rstrip` element-wise.
Parameters
----------
a : array-like of str or unicode
chars : str or unicode, optional
The `chars` argument is a string specifying the set of
characters to be removed. If omitted or None, the `chars`
argument defaults to removing whitespace. The `chars` argument
is not a suffix; rather, all combinations of its values are
stripped.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.rstrip
Examples
--------
>>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c
array(['aAaAaA', 'abBABba'],
dtype='|S7')
>>> np.char.rstrip(c, 'a')
array(['aAaAaA', 'abBABb'],
dtype='|S7')
>>> np.char.rstrip(c, 'A')
array(['aAaAa', 'abBABba'],
dtype='|S7')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
def split(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
string, using `sep` as the delimiter string.
Calls `str.split` element-wise.
Parameters
----------
a : array_like of str or unicode
sep : str or unicode, optional
If `sep` is not specified or `None`, any whitespace string is a
separator.
maxsplit : int, optional
If `maxsplit` is given, at most `maxsplit` splits are done.
Returns
-------
out : ndarray
Array of list objects
See also
--------
str.split, rsplit
"""
# This will return an array of lists of different sizes, so we
# leave it as an object array
return _vec_string(
a, object_, 'split', [sep] + _clean_args(maxsplit))
def splitlines(a, keepends=None):
"""
For each element in `a`, return a list of the lines in the
element, breaking at line boundaries.
Calls `str.splitlines` element-wise.
Parameters
----------
a : array_like of str or unicode
keepends : bool, optional
Line breaks are not included in the resulting list unless
keepends is given and true.
Returns
-------
out : ndarray
Array of list objects
See also
--------
str.splitlines
"""
return _vec_string(
a, object_, 'splitlines', _clean_args(keepends))
def startswith(a, prefix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `a` starts with `prefix`, otherwise `False`.
Calls `str.startswith` element-wise.
Parameters
----------
a : array_like of str or unicode
prefix : str
start, end : int, optional
With optional `start`, test beginning at that position. With
optional `end`, stop comparing at that position.
Returns
-------
out : ndarray
Array of booleans
See also
--------
str.startswith
"""
return _vec_string(
a, bool_, 'startswith', [prefix, start] + _clean_args(end))
def strip(a, chars=None):
"""
For each element in `a`, return a copy with the leading and
trailing characters removed.
Calls `str.strip` element-wise.
Parameters
----------
a : array-like of str or unicode
chars : str or unicode, optional
The `chars` argument is a string specifying the set of
characters to be removed. If omitted or None, the `chars`
argument defaults to removing whitespace. The `chars` argument
is not a prefix or suffix; rather, all combinations of its
values are stripped.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.strip
Examples
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
array(['aAaAaA', ' aA ', 'abBABba'],
dtype='|S7')
>>> np.char.strip(c)
array(['aAaAaA', 'aA', 'abBABba'],
dtype='|S7')
>>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads
array(['AaAaA', ' aA ', 'bBABb'],
dtype='|S7')
>>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails
array(['aAaAa', ' aA ', 'abBABba'],
dtype='|S7')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))
def swapcase(a):
"""
Return element-wise a copy of the string with
uppercase characters converted to lowercase and vice versa.
Calls `str.swapcase` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See also
--------
str.swapcase
Examples
--------
>>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c
array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'],
dtype='|S5')
>>> np.char.swapcase(c)
array(['A1b C', '1B cA', 'B cA1', 'Ca1B'],
dtype='|S5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'swapcase')
def title(a):
"""
Return element-wise title cased version of string or unicode.
Title case words start with uppercase characters, all remaining cased
characters are lowercase.
Calls `str.title` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.title
Examples
--------
>>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c
array(['a1b c', '1b ca', 'b ca1', 'ca1b'],
dtype='|S5')
>>> np.char.title(c)
array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],
dtype='|S5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'title')
def translate(a, table, deletechars=None):
"""
For each element in `a`, return a copy of the string where all
characters occurring in the optional argument `deletechars` are
removed, and the remaining characters have been mapped through the
given translation table.
Calls `str.translate` element-wise.
Parameters
----------
a : array-like of str or unicode
table : str of length 256
deletechars : str
Returns
-------
out : ndarray
Output array of str or unicode, depending on input type
See also
--------
str.translate
"""
a_arr = numpy.asarray(a)
if issubclass(a_arr.dtype.type, unicode_):
return _vec_string(
a_arr, a_arr.dtype, 'translate', (table,))
else:
return _vec_string(
a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
def upper(a):
"""
Return an array with the elements converted to uppercase.
Calls `str.upper` element-wise.
For 8-bit strings, this method is locale-dependent.
Parameters
----------
a : array_like, {str, unicode}
Input array.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See also
--------
str.upper
Examples
--------
>>> c = np.array(['a1b c', '1bca', 'bca1']); c
array(['a1b c', '1bca', 'bca1'],
dtype='|S5')
>>> np.char.upper(c)
array(['A1B C', '1BCA', 'BCA1'],
dtype='|S5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'upper')
def zfill(a, width):
"""
Return the numeric string left-filled with zeros
Calls `str.zfill` element-wise.
Parameters
----------
a : array_like, {str, unicode}
Input array.
width : int
Width of string to left-fill elements in `a`.
Returns
-------
out : ndarray, {str, unicode}
Output array of str or unicode, depending on input type
See also
--------
str.zfill
"""
a_arr = numpy.asarray(a)
width_arr = numpy.asarray(width)
size = long(numpy.max(width_arr.flat))
return _vec_string(
a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,))
def isnumeric(a):
"""
For each element, return True if there are only numeric
characters in the element.
Calls `unicode.isnumeric` element-wise.
Numeric characters include digit characters, and all characters
that have the Unicode numeric value property, e.g. ``U+2155,
VULGAR FRACTION ONE FIFTH``.
Parameters
----------
a : array_like, unicode
Input array.
Returns
-------
out : ndarray, bool
Array of booleans of same shape as `a`.
See also
--------
unicode.isnumeric
"""
if _use_unicode(a) != unicode_:
raise TypeError("isnumeric is only available for Unicode strings and arrays")
return _vec_string(a, bool_, 'isnumeric')
def isdecimal(a):
"""
For each element, return True if there are only decimal
characters in the element.
Calls `unicode.isdecimal` element-wise.
Decimal characters include digit characters, and all characters
that that can be used to form decimal-radix numbers,
e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``.
Parameters
----------
a : array_like, unicode
Input array.
Returns
-------
out : ndarray, bool
Array of booleans identical in shape to `a`.
See also
--------
unicode.isdecimal
"""
if _use_unicode(a) != unicode_:
raise TypeError("isnumeric is only available for Unicode strings and arrays")
return _vec_string(a, bool_, 'isdecimal')
class chararray(ndarray):
"""
chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
strides=None, order=None)
Provides a convenient view on arrays of string and unicode values.
.. note::
The `chararray` class exists for backwards compatibility with
Numarray, it is not recommended for new development. Starting from numpy
1.4, if one needs arrays of strings, it is recommended to use arrays of
`dtype` `object_`, `string_` or `unicode_`, and use the free functions
in the `numpy.char` module for fast vectorized string operations.
Versus a regular NumPy array of type `str` or `unicode`, this
class adds the following functionality:
1) values automatically have whitespace removed from the end
when indexed
2) comparison operators automatically remove whitespace from the
end when comparing values
3) vectorized string operations are provided as methods
(e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)
chararrays should be created using `numpy.char.array` or
`numpy.char.asarray`, rather than this constructor directly.
This constructor creates the array, using `buffer` (with `offset`
and `strides`) if it is not ``None``. If `buffer` is ``None``, then
constructs a new array with `strides` in "C order", unless both
``len(shape) >= 2`` and ``order='Fortran'``, in which case `strides`
is in "Fortran order".
Methods
-------
astype
argsort
copy
count
decode
dump
dumps
encode
endswith
expandtabs
fill
find
flatten
getfield
index
isalnum
isalpha
isdecimal
isdigit
islower
isnumeric
isspace
istitle
isupper
item
join
ljust
lower
lstrip
nonzero
put
ravel
repeat
replace
reshape
resize
rfind
rindex
rjust
rsplit
rstrip
searchsorted
setfield
setflags
sort
split
splitlines
squeeze
startswith
strip
swapaxes
swapcase
take
title
tofile
tolist
tostring
translate
transpose
upper
view
zfill
Parameters
----------
shape : tuple
Shape of the array.
itemsize : int, optional
Length of each array element, in number of characters. Default is 1.
unicode : bool, optional
Are the array elements of type unicode (True) or string (False).
Default is False.
buffer : int, optional
Memory address of the start of the array data. Default is None,
in which case a new array is created.
offset : int, optional
Fixed stride displacement from the beginning of an axis?
Default is 0. Needs to be >=0.
strides : array_like of ints, optional
Strides for the array (see `ndarray.strides` for full description).
Default is None.
order : {'C', 'F'}, optional
The order in which the array data is stored in memory: 'C' ->
"row major" order (the default), 'F' -> "column major"
(Fortran) order.
Examples
--------
>>> charar = np.chararray((3, 3))
>>> charar[:] = 'a'
>>> charar
chararray([['a', 'a', 'a'],
['a', 'a', 'a'],
['a', 'a', 'a']],
dtype='|S1')
>>> charar = np.chararray(charar.shape, itemsize=5)
>>> charar[:] = 'abc'
>>> charar
chararray([['abc', 'abc', 'abc'],
['abc', 'abc', 'abc'],
['abc', 'abc', 'abc']],
dtype='|S5')
"""
def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
offset=0, strides=None, order='C'):
global _globalvar
if unicode:
dtype = unicode_
else:
dtype = string_
# force itemsize to be a Python long, since using NumPy integer
# types results in itemsize.itemsize being used as the size of
# strings in the new array.
itemsize = long(itemsize)
if sys.version_info[0] >= 3 and isinstance(buffer, _unicode):
# On Py3, unicode objects do not have the buffer interface
filler = buffer
buffer = None
else:
filler = None
_globalvar = 1
if buffer is None:
self = ndarray.__new__(subtype, shape, (dtype, itemsize),
order=order)
else:
self = ndarray.__new__(subtype, shape, (dtype, itemsize),
buffer=buffer,
offset=offset, strides=strides,
order=order)
if filler is not None:
self[...] = filler
_globalvar = 0
return self
def __array_finalize__(self, obj):
# The b is a special case because it is used for reconstructing.
if not _globalvar and self.dtype.char not in 'SUbc':
raise ValueError("Can only create a chararray from string data.")
def __getitem__(self, obj):
val = ndarray.__getitem__(self, obj)
if isinstance(val, character):
temp = val.rstrip()
if _len(temp) == 0:
val = ''
else:
val = temp
return val
# IMPLEMENTATION NOTE: Most of the methods of this class are
# direct delegations to the free functions in this module.
# However, those that return an array of strings should instead
# return a chararray, so some extra wrapping is required.
def __eq__(self, other):
"""
Return (self == other) element-wise.
See also
--------
equal
"""
return equal(self, other)
def __ne__(self, other):
"""
Return (self != other) element-wise.
See also
--------
not_equal
"""
return not_equal(self, other)
def __ge__(self, other):
"""
Return (self >= other) element-wise.
See also
--------
greater_equal
"""
return greater_equal(self, other)
def __le__(self, other):
"""
Return (self <= other) element-wise.
See also
--------
less_equal
"""
return less_equal(self, other)
def __gt__(self, other):
"""
Return (self > other) element-wise.
See also
--------
greater
"""
return greater(self, other)
def __lt__(self, other):
"""
Return (self < other) element-wise.
See also
--------
less
"""
return less(self, other)
def __add__(self, other):
"""
Return (self + other), that is string concatenation,
element-wise for a pair of array_likes of str or unicode.
See also
--------
add
"""
return asarray(add(self, other))
def __radd__(self, other):
"""
Return (other + self), that is string concatenation,
element-wise for a pair of array_likes of `string_` or `unicode_`.
See also
--------
add
"""
return asarray(add(numpy.asarray(other), self))
def __mul__(self, i):
"""
Return (self * i), that is string multiple concatenation,
element-wise.
See also
--------
multiply
"""
return asarray(multiply(self, i))
def __rmul__(self, i):
"""
Return (self * i), that is string multiple concatenation,
element-wise.
See also
--------
multiply
"""
return asarray(multiply(self, i))
def __mod__(self, i):
"""
Return (self % i), that is pre-Python 2.6 string formatting
(iterpolation), element-wise for a pair of array_likes of `string_`
or `unicode_`.
See also
--------
mod
"""
return asarray(mod(self, i))
def __rmod__(self, other):
return NotImplemented
def argsort(self, axis=-1, kind='quicksort', order=None):
"""
Return the indices that sort the array lexicographically.
For full documentation see `numpy.argsort`, for which this method is
in fact merely a "thin wrapper."
Examples
--------
>>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
>>> c = c.view(np.chararray); c
chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
dtype='|S5')
>>> c[c.argsort()]
chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
dtype='|S5')
"""
return self.__array__().argsort(axis, kind, order)
argsort.__doc__ = ndarray.argsort.__doc__
def capitalize(self):
"""
Return a copy of `self` with only the first character of each element
capitalized.
See also
--------
char.capitalize
"""
return asarray(capitalize(self))
def center(self, width, fillchar=' '):
"""
Return a copy of `self` with its elements centered in a
string of length `width`.
See also
--------
center
"""
return asarray(center(self, width, fillchar))
def count(self, sub, start=0, end=None):
"""
Returns an array with the number of non-overlapping occurrences of
substring `sub` in the range [`start`, `end`].
See also
--------
char.count
"""
return count(self, sub, start, end)
def decode(self, encoding=None, errors=None):
"""
Calls `str.decode` element-wise.
See also
--------
char.decode
"""
return decode(self, encoding, errors)
def encode(self, encoding=None, errors=None):
"""
Calls `str.encode` element-wise.
See also
--------
char.encode
"""
return encode(self, encoding, errors)
def endswith(self, suffix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `self` ends with `suffix`, otherwise `False`.
See also
--------
char.endswith
"""
return endswith(self, suffix, start, end)
def expandtabs(self, tabsize=8):
"""
Return a copy of each string element where all tab characters are
replaced by one or more spaces.
See also
--------
char.expandtabs
"""
return asarray(expandtabs(self, tabsize))
def find(self, sub, start=0, end=None):
"""
For each element, return the lowest index in the string where
substring `sub` is found.
See also
--------
char.find
"""
return find(self, sub, start, end)
def index(self, sub, start=0, end=None):
"""
Like `find`, but raises `ValueError` when the substring is not found.
See also
--------
char.index
"""
return index(self, sub, start, end)
def isalnum(self):
"""
Returns true for each element if all characters in the string
are alphanumeric and there is at least one character, false
otherwise.
See also
--------
char.isalnum
"""
return isalnum(self)
def isalpha(self):
"""
Returns true for each element if all characters in the string
are alphabetic and there is at least one character, false
otherwise.
See also
--------
char.isalpha
"""
return isalpha(self)
def isdigit(self):
"""
Returns true for each element if all characters in the string are
digits and there is at least one character, false otherwise.
See also
--------
char.isdigit
"""
return isdigit(self)
def islower(self):
"""
Returns true for each element if all cased characters in the
string are lowercase and there is at least one cased character,
false otherwise.
See also
--------
char.islower
"""
return islower(self)
def isspace(self):
"""
Returns true for each element if there are only whitespace
characters in the string and there is at least one character,
false otherwise.
See also
--------
char.isspace
"""
return isspace(self)
def istitle(self):
"""
Returns true for each element if the element is a titlecased
string and there is at least one character, false otherwise.
See also
--------
char.istitle
"""
return istitle(self)
def isupper(self):
"""
Returns true for each element if all cased characters in the
string are uppercase and there is at least one character, false
otherwise.
See also
--------
char.isupper
"""
return isupper(self)
def join(self, seq):
"""
Return a string which is the concatenation of the strings in the
sequence `seq`.
See also
--------
char.join
"""
return join(self, seq)
def ljust(self, width, fillchar=' '):
"""
Return an array with the elements of `self` left-justified in a
string of length `width`.
See also
--------
char.ljust
"""
return asarray(ljust(self, width, fillchar))
def lower(self):
"""
Return an array with the elements of `self` converted to
lowercase.
See also
--------
char.lower
"""
return asarray(lower(self))
def lstrip(self, chars=None):
"""
For each element in `self`, return a copy with the leading characters
removed.
See also
--------
char.lstrip
"""
return asarray(lstrip(self, chars))
def partition(self, sep):
"""
Partition each element in `self` around `sep`.
See also
--------
partition
"""
return asarray(partition(self, sep))
def replace(self, old, new, count=None):
"""
For each element in `self`, return a copy of the string with all
occurrences of substring `old` replaced by `new`.
See also
--------
char.replace
"""
return asarray(replace(self, old, new, count))
def rfind(self, sub, start=0, end=None):
"""
For each element in `self`, return the highest index in the string
where substring `sub` is found, such that `sub` is contained
within [`start`, `end`].
See also
--------
char.rfind
"""
return rfind(self, sub, start, end)
def rindex(self, sub, start=0, end=None):
"""
Like `rfind`, but raises `ValueError` when the substring `sub` is
not found.
See also
--------
char.rindex
"""
return rindex(self, sub, start, end)
def rjust(self, width, fillchar=' '):
"""
Return an array with the elements of `self`
right-justified in a string of length `width`.
See also
--------
char.rjust
"""
return asarray(rjust(self, width, fillchar))
def rpartition(self, sep):
"""
Partition each element in `self` around `sep`.
See also
--------
rpartition
"""
return asarray(rpartition(self, sep))
def rsplit(self, sep=None, maxsplit=None):
"""
For each element in `self`, return a list of the words in
the string, using `sep` as the delimiter string.
See also
--------
char.rsplit
"""
return rsplit(self, sep, maxsplit)
def rstrip(self, chars=None):
"""
For each element in `self`, return a copy with the trailing
characters removed.
See also
--------
char.rstrip
"""
return asarray(rstrip(self, chars))
def split(self, sep=None, maxsplit=None):
"""
For each element in `self`, return a list of the words in the
string, using `sep` as the delimiter string.
See also
--------
char.split
"""
return split(self, sep, maxsplit)
def splitlines(self, keepends=None):
"""
For each element in `self`, return a list of the lines in the
element, breaking at line boundaries.
See also
--------
char.splitlines
"""
return splitlines(self, keepends)
def startswith(self, prefix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
in `self` starts with `prefix`, otherwise `False`.
See also
--------
char.startswith
"""
return startswith(self, prefix, start, end)
def strip(self, chars=None):
"""
For each element in `self`, return a copy with the leading and
trailing characters removed.
See also
--------
char.strip
"""
return asarray(strip(self, chars))
def swapcase(self):
"""
For each element in `self`, return a copy of the string with
uppercase characters converted to lowercase and vice versa.
See also
--------
char.swapcase
"""
return asarray(swapcase(self))
def title(self):
"""
For each element in `self`, return a titlecased version of the
string: words start with uppercase characters, all remaining cased
characters are lowercase.
See also
--------
char.title
"""
return asarray(title(self))
def translate(self, table, deletechars=None):
"""
For each element in `self`, return a copy of the string where
all characters occurring in the optional argument
`deletechars` are removed, and the remaining characters have
been mapped through the given translation table.
See also
--------
char.translate
"""
return asarray(translate(self, table, deletechars))
def upper(self):
"""
Return an array with the elements of `self` converted to
uppercase.
See also
--------
char.upper
"""
return asarray(upper(self))
def zfill(self, width):
"""
Return the numeric string left-filled with zeros in a string of
length `width`.
See also
--------
char.zfill
"""
return asarray(zfill(self, width))
def isnumeric(self):
"""
For each element in `self`, return True if there are only
numeric characters in the element.
See also
--------
char.isnumeric
"""
return isnumeric(self)
def isdecimal(self):
"""
For each element in `self`, return True if there are only
decimal characters in the element.
See also
--------
char.isdecimal
"""
return isdecimal(self)
def array(obj, itemsize=None, copy=True, unicode=None, order=None):
"""
Create a `chararray`.
.. note::
This class is provided for numarray backward-compatibility.
New code (not concerned with numarray compatibility) should use
arrays of type `string_` or `unicode_` and use the free functions
in :mod:`numpy.char <numpy.core.defchararray>` for fast
vectorized string operations instead.
Versus a regular NumPy array of type `str` or `unicode`, this
class adds the following functionality:
1) values automatically have whitespace removed from the end
when indexed
2) comparison operators automatically remove whitespace from the
end when comparing values
3) vectorized string operations are provided as methods
(e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``)
Parameters
----------
obj : array of str or unicode-like
itemsize : int, optional
`itemsize` is the number of characters per scalar in the
resulting array. If `itemsize` is None, and `obj` is an
object array or a Python list, the `itemsize` will be
automatically determined. If `itemsize` is provided and `obj`
is of type str or unicode, then the `obj` string will be
chunked into `itemsize` pieces.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy
will only be made if __array__ returns a copy, if obj is a
nested sequence, or if a copy is needed to satisfy any of the other
requirements (`itemsize`, unicode, `order`, etc.).
unicode : bool, optional
When true, the resulting `chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
`None` and `obj` is one of the following:
- a `chararray`,
- an ndarray of type `str` or `unicode`
- a Python str or unicode object,
then the unicode setting of the output array will be
automatically determined.
order : {'C', 'F', 'A'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest). If order is 'A', then the returned array may
be in any order (either C-, Fortran-contiguous, or even
discontiguous).
"""
if isinstance(obj, (_bytes, _unicode)):
if unicode is None:
if isinstance(obj, _unicode):
unicode = True
else:
unicode = False
if itemsize is None:
itemsize = _len(obj)
shape = _len(obj) // itemsize
if unicode:
if sys.maxunicode == 0xffff:
# On a narrow Python build, the buffer for Unicode
# strings is UCS2, which doesn't match the buffer for
# NumPy Unicode types, which is ALWAYS UCS4.
# Therefore, we need to convert the buffer. On Python
# 2.6 and later, we can use the utf_32 codec. Earlier
# versions don't have that codec, so we convert to a
# numerical array that matches the input buffer, and
# then use NumPy to convert it to UCS4. All of this
# should happen in native endianness.
obj = obj.encode('utf_32')
else:
obj = _unicode(obj)
else:
# Let the default Unicode -> string encoding (if any) take
# precedence.
obj = _bytes(obj)
return chararray(shape, itemsize=itemsize, unicode=unicode,
buffer=obj, order=order)
if isinstance(obj, (list, tuple)):
obj = numpy.asarray(obj)
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):
# If we just have a vanilla chararray, create a chararray
# view around it.
if not isinstance(obj, chararray):
obj = obj.view(chararray)
if itemsize is None:
itemsize = obj.itemsize
# itemsize is in 8-bit chars, so for Unicode, we need
# to divide by the size of a single Unicode character,
# which for NumPy is always 4
if issubclass(obj.dtype.type, unicode_):
itemsize //= 4
if unicode is None:
if issubclass(obj.dtype.type, unicode_):
unicode = True
else:
unicode = False
if unicode:
dtype = unicode_
else:
dtype = string_
if order is not None:
obj = numpy.asarray(obj, order=order)
if (copy or
(itemsize != obj.itemsize) or
(not unicode and isinstance(obj, unicode_)) or
(unicode and isinstance(obj, string_))):
obj = obj.astype((dtype, long(itemsize)))
return obj
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
if itemsize is None:
# Since no itemsize was specified, convert the input array to
# a list so the ndarray constructor will automatically
# determine the itemsize for us.
obj = obj.tolist()
# Fall through to the default case
if unicode:
dtype = unicode_
else:
dtype = string_
if itemsize is None:
val = narray(obj, dtype=dtype, order=order, subok=True)
else:
val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)
return val.view(chararray)
def asarray(obj, itemsize=None, unicode=None, order=None):
"""
Convert the input to a `chararray`, copying the data only if
necessary.
Versus a regular NumPy array of type `str` or `unicode`, this
class adds the following functionality:
1) values automatically have whitespace removed from the end
when indexed
2) comparison operators automatically remove whitespace from the
end when comparing values
3) vectorized string operations are provided as methods
(e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``)
Parameters
----------
obj : array of str or unicode-like
itemsize : int, optional
`itemsize` is the number of characters per scalar in the
resulting array. If `itemsize` is None, and `obj` is an
object array or a Python list, the `itemsize` will be
automatically determined. If `itemsize` is provided and `obj`
is of type str or unicode, then the `obj` string will be
chunked into `itemsize` pieces.
unicode : bool, optional
When true, the resulting `chararray` can contain Unicode
characters, when false only 8-bit characters. If unicode is
`None` and `obj` is one of the following:
- a `chararray`,
- an ndarray of type `str` or 'unicode`
- a Python str or unicode object,
then the unicode setting of the output array will be
automatically determined.
order : {'C', 'F'}, optional
Specify the order of the array. If order is 'C' (default), then the
array will be in C-contiguous order (last-index varies the
fastest). If order is 'F', then the returned array
will be in Fortran-contiguous order (first-index varies the
fastest).
"""
return array(obj, itemsize, copy=False,
unicode=unicode, order=order)
| bsd-3-clause |
tangfeixiong/nova | nova/api/openstack/compute/plugins/v3/cloudpipe.py | 25 | 7410 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Connect your vlan to the world."""
from oslo_config import cfg
from oslo_utils import timeutils
from webob import exc
from nova.api.openstack.compute.schemas.v3 import cloudpipe
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import exception
from nova.i18n import _
from nova import network
from nova import objects
from nova.openstack.common import fileutils
from nova import utils
CONF = cfg.CONF
CONF.import_opt('keys_path', 'nova.crypto')
ALIAS = 'os-cloudpipe'
authorize = extensions.os_compute_authorizer(ALIAS)
class CloudpipeController(wsgi.Controller):
"""Handle creating and listing cloudpipe instances."""
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
self.network_api = network.API(skip_policy_check=True)
self.cloudpipe = pipelib.CloudPipe(skip_policy_check=True)
self.setup()
def setup(self):
"""Ensure the keychains and folders exist."""
# NOTE(vish): One of the drawbacks of doing this in the api is
# the keys will only be on the api node that launched
# the cloudpipe.
fileutils.ensure_tree(CONF.keys_path)
def _get_all_cloudpipes(self, context):
"""Get all cloudpipes."""
instances = self.compute_api.get_all(context,
search_opts={'deleted': False},
want_objects=True)
return [instance for instance in instances
if pipelib.is_vpn_image(instance.image_ref)
and instance.vm_state != vm_states.DELETED]
def _get_cloudpipe_for_project(self, context):
"""Get the cloudpipe instance for a project from context."""
cloudpipes = self._get_all_cloudpipes(context) or [None]
return cloudpipes[0]
def _vpn_dict(self, context, project_id, instance):
elevated = context.elevated()
rv = {'project_id': project_id}
if not instance:
rv['state'] = 'pending'
return rv
rv['instance_id'] = instance.uuid
rv['created_at'] = timeutils.isotime(instance.created_at)
nw_info = compute_utils.get_nw_info_for_instance(instance)
if not nw_info:
return rv
vif = nw_info[0]
ips = [ip for ip in vif.fixed_ips() if ip['version'] == 4]
if ips:
rv['internal_ip'] = ips[0]['address']
# NOTE(vish): Currently network_api.get does an owner check on
# project_id. This is probably no longer necessary
# but rather than risk changes in the db layer,
# we are working around it here by changing the
# project_id in the context. This can be removed
# if we remove the project_id check in the db.
elevated.project_id = project_id
network = self.network_api.get(elevated, vif['network']['id'])
if network:
vpn_ip = network['vpn_public_address']
vpn_port = network['vpn_public_port']
rv['public_ip'] = vpn_ip
rv['public_port'] = vpn_port
if vpn_ip and vpn_port:
if utils.vpn_ping(vpn_ip, vpn_port):
rv['state'] = 'running'
else:
rv['state'] = 'down'
else:
rv['state'] = 'invalid'
return rv
@extensions.expected_errors((400, 403))
@validation.schema(cloudpipe.create)
def create(self, req, body):
"""Create a new cloudpipe instance, if none exists.
Parameters: {cloudpipe: {'project_id': ''}}
"""
context = req.environ['nova.context']
authorize(context)
params = body.get('cloudpipe', {})
project_id = params.get('project_id', context.project_id)
# NOTE(vish): downgrade to project context. Note that we keep
# the same token so we can still talk to glance
context.project_id = project_id
context.user_id = 'project-vpn'
context.is_admin = False
context.roles = []
instance = self._get_cloudpipe_for_project(context)
if not instance:
try:
result = self.cloudpipe.launch_vpn_instance(context)
instance = result[0][0]
except exception.NoMoreNetworks:
msg = _("Unable to claim IP for VPN instances, ensure it "
"isn't running, and try again in a few minutes")
raise exc.HTTPBadRequest(explanation=msg)
return {'instance_id': instance.uuid}
@extensions.expected_errors((400, 403, 404))
def index(self, req):
"""List running cloudpipe instances."""
context = req.environ['nova.context']
authorize(context)
vpns = [self._vpn_dict(context, x['project_id'], x)
for x in self._get_all_cloudpipes(context)]
return {'cloudpipes': vpns}
@wsgi.response(202)
@extensions.expected_errors(400)
@validation.schema(cloudpipe.update)
def update(self, req, id, body):
"""Configure cloudpipe parameters for the project."""
context = req.environ['nova.context']
authorize(context)
if id != "configure-project":
msg = _("Unknown action %s") % id
raise exc.HTTPBadRequest(explanation=msg)
project_id = context.project_id
networks = objects.NetworkList.get_by_project(context, project_id)
params = body['configure_project']
vpn_ip = params['vpn_ip']
vpn_port = params['vpn_port']
for nw in networks:
nw.vpn_public_address = vpn_ip
nw.vpn_public_port = vpn_port
nw.save()
class Cloudpipe(extensions.V3APIExtensionBase):
"""Adds actions to create cloudpipe instances.
When running with the Vlan network mode, you need a mechanism to route
from the public Internet to your vlans. This mechanism is known as a
cloudpipe.
At the time of creating this class, only OpenVPN is supported. Support for
a SSH Bastion host is forthcoming.
"""
name = "Cloudpipe"
alias = ALIAS
version = 1
def get_resources(self):
resource = [extensions.ResourceExtension(ALIAS,
CloudpipeController())]
return resource
def get_controller_extensions(self):
"""It's an abstract function V3APIExtensionBase and the extension
will not be loaded without it.
"""
return []
| apache-2.0 |
microwaveabletoaster/STEPlang | step.py | 1 | 4577 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
import sys, copy, os, glob, time
reserved_methods = ['pause','output', 'input', 'finput', 'sto', 'dupe' ,'+', '-', '/', '*', '%', '?', 'goto', 'end', 'rand', '>', '<', '==', "!="]
parser = ArgumentParser()
parser.add_argument('file',help='the path to the step file')
class StepSyntaxError(Exception):
def __init__(self,message):
super(Exception,self).__init__(message + ' ¯\_(ツ)_/¯')
class Interpreter:
def __init__(self, li):
self.global_index = 0L
self.li = li
self.computering = True
self.memory = []
self.parseStrings()
def parseStrings(self):
comprehending = False
startind = 0
endind = 0
choplist = []
for a in range(0,len(self.li)):
if '\'' in self.li[a] and not comprehending:
self.li[a] = self.li[a][self.li[a].index('\'')+1:]+self.li[a][:self.li[a].index('\'')]
comprehending = True
startind = a
elif comprehending:
if not '\'' in self.li[a]:
self.li[startind] += (" %s" % self.li[a])
else:
self.li[a] = self.li[a][self.li[a].index('\'')+1:]+self.li[a][:self.li[a].index('\'')]
self.li[startind] += (" %s" % self.li[a])
comprehending = False
self.li = self.li[:startind+1] + self.li[a+1:]
# print self.li
return self.parseStrings()
return '¯\_(ツ)_/¯ bush did 9/11 ¯\_(ツ)_/¯'
def interpret(self):
computering = True
while(self.computering):
# print self.computering
try:
self.li.index(str(self.global_index)+'.')
try:
self.li.index('.'+str(self.global_index))
self.execute(self.li.index(str(self.global_index)+'.'),self.li.index('.'+str(self.global_index)))
except ValueError as e:
raise StepSyntaxError('you forgot to close operation %d!!' % self.global_index)
except ValueError as e:
pass
self.global_index += 1
def execute(self, begin, end):
for index in range(begin+1,end):
if '?' in self.li[index]:
if self.memory.pop() != 'yea':
self.li[index] = 'nu uh'
else:
ind = self.li[index].index('?')
self.li[index] = self.li[index][:ind] + self.li[index][ind+1:]
if self.li[index] not in reserved_methods and self.li[index] != 'nu uh' and '.' not in self.li[index]:
self.memory.append(self.li[index])
# print 'SOMETHING STORED'
elif self.li[index] == 'output':
sys.stdout.write(str(self.memory.pop())+'\n')
# print 'OUTPUT CALLED'
elif self.li[index] == 'goto':
test = self.memory.pop()
try:
test = int(test)
except ValueError:
raise StepSyntaxError("whoa! that goto value is not a number!")
self.global_index = test
# print 'GOTO'
elif self.li[index] == 'end':
# print 'ENDED'
self.computering = False
elif self.li[index] == '+':
self.memory.append(self.memory.pop() + self.memory.pop())
elif self.li[index] == '*':
self.memory.append(self.memory.pop() * self.memory.pop())
elif self.li[index] == '/':
self.memory.append(self.memory.pop() / self.memory.pop())
elif self.li[index] == '-':
self.memory.append(self.memory.pop() - self.memory.pop())
elif self.li[index] == '%':
self.memory.append(self.memory.pop() % self.memory.pop())
elif self.li[index] == '<':
if self.memory.pop() < self.memory.pop():
self.memory.append('yea')
else:
self.memory.append('nope')
elif self.li[index] == '>':
if self.memory.pop() > self.memory.pop():
self.memory.append('yea')
else:
self.memory.append('nope')
elif self.li[index] == '==':
if self.memory.pop() == self.memory.pop():
self.memory.append('yea')
else:
self.memory.append('nope')
elif self.li[index] == '!=':
if self.memory.pop() != self.memory.pop():
self.memory.append('yea')
else:
self.memory.append('nope')
elif self.li[index] == 'input':
self.memory.append(raw_input())
elif self.li[index] == 'finput':
try:
self.memory.append(int(raw_input()))
except ValueError:
raise StepSyntaxError('finput requires you to input a number!!')
elif self.li[index] == 'dupe':
self.memory.append(self.memory[len(self.memory)-1])
elif self.li[index] == 'pause':
test = self.memory.pop()
try:
test = float(test)
except ValueError as e:
raise StepSyntaxError('whoops! you need a number for that \"pause\"')
time.sleep(test)
# print self.memory
if __name__ == '__main__':
res = parser.parse_args()
with open(res.file,'r+') as x:
m = Interpreter(x.read().replace('\n',' ').replace('\t',' ').split(' '))
m.interpret()
| unlicense |
elky/django | tests/db_utils/tests.py | 17 | 2050 | """Tests for django.db.utils."""
import unittest
from django.core.exceptions import ImproperlyConfigured
from django.db import DEFAULT_DB_ALIAS, connection
from django.db.utils import ConnectionHandler, ProgrammingError, load_backend
from django.test import SimpleTestCase, TestCase
class ConnectionHandlerTests(SimpleTestCase):
def test_connection_handler_no_databases(self):
"""Empty DATABASES setting defaults to the dummy backend."""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'], 'django.db.backends.dummy')
msg = (
'settings.DATABASES is improperly configured. Please supply the '
'ENGINE value. Check settings documentation for more details.'
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
conns[DEFAULT_DB_ALIAS].ensure_connection()
class DatabaseErrorWrapperTests(TestCase):
@unittest.skipUnless(connection.vendor == 'postgresql', 'PostgreSQL test')
def test_reraising_backend_specific_database_exception(self):
cursor = connection.cursor()
msg = 'table "X" does not exist'
with self.assertRaisesMessage(ProgrammingError, msg) as cm:
cursor.execute('DROP TABLE "X"')
self.assertNotEqual(type(cm.exception), type(cm.exception.__cause__))
self.assertIsNotNone(cm.exception.__cause__)
self.assertIsNotNone(cm.exception.__cause__.pgcode)
self.assertIsNotNone(cm.exception.__cause__.pgerror)
class LoadBackendTests(SimpleTestCase):
def test_load_backend_invalid_name(self):
msg = (
"'foo' isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX is one of:\n"
" 'mysql', 'oracle', 'postgresql', 'sqlite3'"
)
with self.assertRaisesMessage(ImproperlyConfigured, msg) as cm:
load_backend('foo')
self.assertEqual(str(cm.exception.__cause__), "No module named 'foo'")
| bsd-3-clause |
trezorg/django | tests/regressiontests/queries/models.py | 51 | 8305 | """
Various complex queries that have been problematic in the past.
"""
import threading
from django.db import models
class DumbCategory(models.Model):
pass
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
category = models.ForeignKey(NamedCategory, null=True, default=None)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __unicode__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpickleable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
notes = models.ManyToManyField(Note)
def __unicode__(self):
return self.name
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
class Meta:
ordering = ['info']
def __unicode__(self):
return self.info
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True, null=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __unicode__(self):
return self.name
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num', null=True)
def __unicode__(self):
return self.name
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __unicode__(self):
return '%d: %s' % (self.rank, self.author.name)
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __unicode__(self):
return self.title
class Number(models.Model):
num = models.IntegerField()
def __unicode__(self):
return unicode(self.num)
# Symmetrical m2m field with a normal field using the reverse accesor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_query_set(self):
qs = super(CustomManager, self).get_query_set()
return qs.filter(public=True, tag__name='t1')
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __unicode__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_query_set(self):
return super(MemberManager, self).get_query_set().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, primary_key=True)
parent = models.ForeignKey(Member, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", null=True, unique=True)
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity)
# Multiple foreign keys
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __unicode__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA)
b = models.ForeignKey(LeafB)
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __unicode__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection)
# Multi-layer ordering
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __unicode__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject)
class Meta:
ordering = ['single']
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, null=True)
class Meta:
ordering = ['others']
def __unicode__(self):
return self.name
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __unicode__(self):
return self.name
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name")
meal = models.CharField(max_length=20)
def __unicode__(self):
return u"%s at %s" % (self.food, self.meal)
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", to_field="num", null=True)
def __unicode__(self):
return u"%s" % self.num
# Bug #12252
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
num = models.PositiveSmallIntegerField()
def __unicode__(self):
return self.name
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
objectb = models.ForeignKey(ObjectB)
def __unicode__(self):
return self.name
| bsd-3-clause |
scottdangelo/RemoveVolumeMangerLocks | cinder/tests/unit/test_quobyte.py | 11 | 38899 | # Copyright (c) 2014 Quobyte Inc.
# Copyright (c) 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the Quobyte driver module."""
import errno
import os
import six
import traceback
import mock
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import imageutils
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers import quobyte
CONF = cfg.CONF
class DumbVolume(object):
fields = {}
def __setitem__(self, key, value):
self.fields[key] = value
def __getitem__(self, item):
return self.fields[item]
class FakeDb(object):
msg = "Tests are broken: mock this out."
def volume_get(self, *a, **kw):
raise Exception(self.msg)
def snapshot_get_all_for_volume(self, *a, **kw):
"""Mock this if you want results from it."""
return []
class QuobyteDriverTestCase(test.TestCase):
"""Test case for Quobyte driver."""
TEST_QUOBYTE_VOLUME = 'quobyte://quobyte-host/openstack-volumes'
TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL = 'quobyte-host/openstack-volumes'
TEST_SIZE_IN_GB = 1
TEST_MNT_POINT = '/mnt/quobyte'
TEST_MNT_POINT_BASE = '/mnt'
TEST_LOCAL_PATH = '/mnt/quobyte/volume-123'
TEST_FILE_NAME = 'test.txt'
TEST_SHARES_CONFIG_FILE = '/etc/cinder/test-shares.conf'
TEST_TMP_FILE = '/tmp/tempfile'
VOLUME_UUID = 'abcdefab-cdef-abcd-efab-cdefabcdefab'
SNAP_UUID = 'bacadaca-baca-daca-baca-dacadacadaca'
SNAP_UUID_2 = 'bebedede-bebe-dede-bebe-dedebebedede'
def setUp(self):
super(QuobyteDriverTestCase, self).setUp()
self._configuration = mock.Mock(conf.Configuration)
self._configuration.append_config_values(mock.ANY)
self._configuration.quobyte_volume_url = \
self.TEST_QUOBYTE_VOLUME
self._configuration.quobyte_client_cfg = None
self._configuration.quobyte_sparsed_volumes = True
self._configuration.quobyte_qcow2_volumes = False
self._configuration.quobyte_mount_point_base = \
self.TEST_MNT_POINT_BASE
self._driver =\
quobyte.QuobyteDriver(configuration=self._configuration,
db=FakeDb())
self._driver.shares = {}
self._driver.set_nas_security_options(is_new_cinder_install=False)
def assertRaisesAndMessageMatches(
self, excClass, msg, callableObj, *args, **kwargs):
"""Ensure that the specified exception was raised. """
caught = False
try:
callableObj(*args, **kwargs)
except Exception as exc:
caught = True
self.assertEqual(excClass, type(exc),
'Wrong exception caught: %s Stacktrace: %s' %
(exc, traceback.format_exc()))
self.assertIn(msg, six.text_type(exc))
if not caught:
self.fail('Expected raised exception but nothing caught.')
def test_local_path(self):
"""local_path common use case."""
drv = self._driver
volume = DumbVolume()
volume['provider_location'] = self.TEST_QUOBYTE_VOLUME
volume['name'] = 'volume-123'
self.assertEqual(
'/mnt/1331538734b757ed52d0e18c0a7210cd/volume-123',
drv.local_path(volume))
def test_mount_quobyte_should_mount_correctly(self):
with mock.patch.object(self._driver, '_execute') as mock_execute, \
mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver'
'.read_proc_mount') as mock_open:
# Content of /proc/mount (not mounted yet).
mock_open.return_value = six.StringIO(
"/dev/sda5 / ext4 rw,relatime,data=ordered 0 0")
self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT)
mkdir_call = mock.call('mkdir', '-p', self.TEST_MNT_POINT)
mount_call = mock.call(
'mount.quobyte', self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT, run_as_root=False)
getfattr_call = mock.call(
'getfattr', '-n', 'quobyte.info', self.TEST_MNT_POINT,
run_as_root=False)
mock_execute.assert_has_calls(
[mkdir_call, mount_call, getfattr_call], any_order=False)
def test_mount_quobyte_already_mounted_detected_seen_in_proc_mount(self):
with mock.patch.object(self._driver, '_execute') as mock_execute, \
mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver'
'.read_proc_mount') as mock_open:
# Content of /proc/mount (already mounted).
mock_open.return_value = six.StringIO(
"quobyte@%s %s fuse rw,nosuid,nodev,noatime,user_id=1000"
",group_id=100,default_permissions,allow_other 0 0"
% (self.TEST_QUOBYTE_VOLUME, self.TEST_MNT_POINT))
self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT)
mock_execute.assert_called_once_with(
'getfattr', '-n', 'quobyte.info', self.TEST_MNT_POINT,
run_as_root=False)
def test_mount_quobyte_should_suppress_and_log_already_mounted_error(self):
"""test_mount_quobyte_should_suppress_and_log_already_mounted_error
Based on /proc/mount, the file system is not mounted yet. However,
mount.quobyte returns with an 'already mounted' error. This is
a last-resort safe-guard in case /proc/mount parsing was not
successful.
Because _mount_quobyte gets called with ensure=True, the error will
be suppressed and logged instead.
"""
with mock.patch.object(self._driver, '_execute') as mock_execute, \
mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver'
'.read_proc_mount') as mock_open, \
mock.patch('cinder.volume.drivers.quobyte.LOG') as mock_LOG:
# Content of /proc/mount (empty).
mock_open.return_value = six.StringIO()
mock_execute.side_effect = [None, putils.ProcessExecutionError(
stderr='is busy or already mounted')]
self._driver._mount_quobyte(self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT,
ensure=True)
mkdir_call = mock.call('mkdir', '-p', self.TEST_MNT_POINT)
mount_call = mock.call(
'mount.quobyte', self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT, run_as_root=False)
mock_execute.assert_has_calls([mkdir_call, mount_call],
any_order=False)
mock_LOG.warning.assert_called_once_with('%s is already mounted',
self.TEST_QUOBYTE_VOLUME)
def test_mount_quobyte_should_reraise_already_mounted_error(self):
"""test_mount_quobyte_should_reraise_already_mounted_error
Like test_mount_quobyte_should_suppress_and_log_already_mounted_error
but with ensure=False.
"""
with mock.patch.object(self._driver, '_execute') as mock_execute, \
mock.patch('cinder.volume.drivers.quobyte.QuobyteDriver'
'.read_proc_mount') as mock_open:
mock_open.return_value = six.StringIO()
mock_execute.side_effect = [
None, # mkdir
putils.ProcessExecutionError( # mount
stderr='is busy or already mounted')]
self.assertRaises(putils.ProcessExecutionError,
self._driver._mount_quobyte,
self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT,
ensure=False)
mkdir_call = mock.call('mkdir', '-p', self.TEST_MNT_POINT)
mount_call = mock.call(
'mount.quobyte', self.TEST_QUOBYTE_VOLUME,
self.TEST_MNT_POINT, run_as_root=False)
mock_execute.assert_has_calls([mkdir_call, mount_call],
any_order=False)
def test_get_hash_str(self):
"""_get_hash_str should calculation correct value."""
drv = self._driver
self.assertEqual('1331538734b757ed52d0e18c0a7210cd',
drv._get_hash_str(self.TEST_QUOBYTE_VOLUME))
def test_get_available_capacity_with_df(self):
"""_get_available_capacity should calculate correct value."""
drv = self._driver
df_total_size = 2620544
df_avail = 1490560
df_head = 'Filesystem 1K-blocks Used Available Use% Mounted on\n'
df_data = 'quobyte@%s %d 996864 %d 41%% %s' % \
(self.TEST_QUOBYTE_VOLUME, df_total_size, df_avail,
self.TEST_MNT_POINT)
df_output = df_head + df_data
drv._get_mount_point_for_share = mock.Mock(return_value=self.
TEST_MNT_POINT)
drv._execute = mock.Mock(return_value=(df_output, None))
self.assertEqual((df_avail, df_total_size),
drv._get_available_capacity(self.TEST_QUOBYTE_VOLUME))
(drv._get_mount_point_for_share.
assert_called_once_with(self.TEST_QUOBYTE_VOLUME))
(drv._execute.
assert_called_once_with('df',
'--portability',
'--block-size',
'1',
self.TEST_MNT_POINT,
run_as_root=self._driver._execute_as_root))
def test_get_capacity_info(self):
with mock.patch.object(self._driver, '_get_available_capacity') \
as mock_get_available_capacity:
drv = self._driver
df_size = 2620544
df_avail = 1490560
mock_get_available_capacity.return_value = (df_avail, df_size)
size, available, used = drv._get_capacity_info(mock.ANY)
mock_get_available_capacity.assert_called_once_with(mock.ANY)
self.assertEqual(df_size, size)
self.assertEqual(df_avail, available)
self.assertEqual(size - available, used)
def test_load_shares_config(self):
"""_load_shares_config takes the Volume URL and strips quobyte://."""
drv = self._driver
drv._load_shares_config()
self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, drv.shares)
def test_load_shares_config_without_protocol(self):
"""Same as test_load_shares_config, but URL is without quobyte://."""
drv = self._driver
drv.configuration.quobyte_volume_url = \
self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL
drv._load_shares_config()
self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL, drv.shares)
def test_ensure_share_mounted(self):
"""_ensure_share_mounted simple use case."""
with mock.patch.object(self._driver, '_get_mount_point_for_share') as \
mock_get_mount_point, \
mock.patch.object(self._driver, '_mount_quobyte') as \
mock_mount:
drv = self._driver
drv._ensure_share_mounted(self.TEST_QUOBYTE_VOLUME)
mock_get_mount_point.assert_called_once_with(
self.TEST_QUOBYTE_VOLUME)
mock_mount.assert_called_once_with(
self.TEST_QUOBYTE_VOLUME,
mock_get_mount_point.return_value,
ensure=True)
def test_ensure_shares_mounted_should_save_mounting_successfully(self):
"""_ensure_shares_mounted should save share if mounted with success."""
with mock.patch.object(self._driver, '_ensure_share_mounted') \
as mock_ensure_share_mounted:
drv = self._driver
drv._ensure_shares_mounted()
mock_ensure_share_mounted.assert_called_once_with(
self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL)
self.assertIn(self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL,
drv._mounted_shares)
def test_ensure_shares_mounted_should_not_save_mounting_with_error(self):
"""_ensure_shares_mounted should not save if mount raised an error."""
with mock.patch.object(self._driver, '_ensure_share_mounted') \
as mock_ensure_share_mounted:
drv = self._driver
mock_ensure_share_mounted.side_effect = Exception()
drv._ensure_shares_mounted()
mock_ensure_share_mounted.assert_called_once_with(
self.TEST_QUOBYTE_VOLUME_WITHOUT_PROTOCOL)
self.assertEqual(1, len(drv.shares))
self.assertEqual(0, len(drv._mounted_shares))
def test_do_setup(self):
"""do_setup runs successfully."""
drv = self._driver
drv.do_setup(mock.create_autospec(context.RequestContext))
def test_check_for_setup_error_throws_quobyte_volume_url_not_set(self):
"""check_for_setup_error throws if 'quobyte_volume_url' is not set."""
drv = self._driver
drv.configuration.quobyte_volume_url = None
self.assertRaisesAndMessageMatches(exception.VolumeDriverException,
'no Quobyte volume configured',
drv.check_for_setup_error)
def test_check_for_setup_error_throws_client_not_installed(self):
"""check_for_setup_error throws if client is not installed."""
drv = self._driver
drv._execute = mock.Mock(side_effect=OSError
(errno.ENOENT, 'No such file or directory'))
self.assertRaisesAndMessageMatches(exception.VolumeDriverException,
'mount.quobyte is not installed',
drv.check_for_setup_error)
drv._execute.assert_called_once_with('mount.quobyte',
check_exit_code=False,
run_as_root=False)
def test_check_for_setup_error_throws_client_not_executable(self):
"""check_for_setup_error throws if client cannot be executed."""
drv = self._driver
drv._execute = mock.Mock(side_effect=OSError
(errno.EPERM, 'Operation not permitted'))
self.assertRaisesAndMessageMatches(OSError,
'Operation not permitted',
drv.check_for_setup_error)
drv._execute.assert_called_once_with('mount.quobyte',
check_exit_code=False,
run_as_root=False)
def test_find_share_should_throw_error_if_there_is_no_mounted_shares(self):
"""_find_share should throw error if there is no mounted share."""
drv = self._driver
drv._mounted_shares = []
self.assertRaises(exception.NotFound,
drv._find_share,
self.TEST_SIZE_IN_GB)
def test_find_share(self):
"""_find_share simple use case."""
drv = self._driver
drv._mounted_shares = [self.TEST_QUOBYTE_VOLUME]
self.assertEqual(self.TEST_QUOBYTE_VOLUME,
drv._find_share(self.TEST_SIZE_IN_GB))
def test_find_share_does_not_throw_error_if_there_isnt_enough_space(self):
"""_find_share intentionally does not throw when no space is left."""
with mock.patch.object(self._driver, '_get_available_capacity') \
as mock_get_available_capacity:
drv = self._driver
df_size = 2620544
df_avail = 0
mock_get_available_capacity.return_value = (df_avail, df_size)
drv._mounted_shares = [self.TEST_QUOBYTE_VOLUME]
self.assertEqual(self.TEST_QUOBYTE_VOLUME,
drv._find_share(self.TEST_SIZE_IN_GB))
# The current implementation does not call _get_available_capacity.
# Future ones might do and therefore we mocked it.
self.assertGreaterEqual(mock_get_available_capacity.call_count, 0)
def _simple_volume(self, uuid=None):
volume = DumbVolume()
volume['provider_location'] = self.TEST_QUOBYTE_VOLUME
if uuid is None:
volume['id'] = self.VOLUME_UUID
else:
volume['id'] = uuid
# volume['name'] mirrors format from db/sqlalchemy/models.py
volume['name'] = 'volume-%s' % volume['id']
volume['size'] = 10
volume['status'] = 'available'
return volume
def test_create_sparsed_volume(self):
drv = self._driver
volume = self._simple_volume()
drv._create_sparsed_file = mock.Mock()
drv._set_rw_permissions_for_all = mock.Mock()
drv._do_create_volume(volume)
drv._create_sparsed_file.assert_called_once_with(mock.ANY, mock.ANY)
drv._set_rw_permissions_for_all.assert_called_once_with(mock.ANY)
def test_create_nonsparsed_volume(self):
drv = self._driver
volume = self._simple_volume()
old_value = self._configuration.quobyte_sparsed_volumes
self._configuration.quobyte_sparsed_volumes = False
drv._create_regular_file = mock.Mock()
drv._set_rw_permissions_for_all = mock.Mock()
drv._do_create_volume(volume)
drv._create_regular_file.assert_called_once_with(mock.ANY, mock.ANY)
drv._set_rw_permissions_for_all.assert_called_once_with(mock.ANY)
self._configuration.quobyte_sparsed_volumes = old_value
def test_create_qcow2_volume(self):
drv = self._driver
volume = self._simple_volume()
old_value = self._configuration.quobyte_qcow2_volumes
self._configuration.quobyte_qcow2_volumes = True
drv._execute = mock.Mock()
hashed = drv._get_hash_str(volume['provider_location'])
path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
hashed,
self.VOLUME_UUID)
drv._do_create_volume(volume)
assert_calls = [mock.call('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata', path,
str(volume['size'] * units.Gi),
run_as_root=self._driver._execute_as_root),
mock.call('chmod', 'ugo+rw', path,
run_as_root=self._driver._execute_as_root)]
drv._execute.assert_has_calls(assert_calls)
self._configuration.quobyte_qcow2_volumes = old_value
def test_create_volume_should_ensure_quobyte_mounted(self):
"""create_volume ensures shares provided in config are mounted."""
drv = self._driver
drv.LOG = mock.Mock()
drv._find_share = mock.Mock()
drv._do_create_volume = mock.Mock()
drv._ensure_shares_mounted = mock.Mock()
volume = DumbVolume()
volume['size'] = self.TEST_SIZE_IN_GB
drv.create_volume(volume)
drv._find_share.assert_called_once_with(mock.ANY)
drv._do_create_volume.assert_called_once_with(volume)
drv._ensure_shares_mounted.assert_called_once_with()
def test_create_volume_should_return_provider_location(self):
"""create_volume should return provider_location with found share."""
drv = self._driver
drv.LOG = mock.Mock()
drv._ensure_shares_mounted = mock.Mock()
drv._do_create_volume = mock.Mock()
drv._find_share = mock.Mock(return_value=self.TEST_QUOBYTE_VOLUME)
volume = DumbVolume()
volume['size'] = self.TEST_SIZE_IN_GB
result = drv.create_volume(volume)
self.assertEqual(self.TEST_QUOBYTE_VOLUME, result['provider_location'])
drv._do_create_volume.assert_called_once_with(volume)
drv._ensure_shares_mounted.assert_called_once_with()
drv._find_share.assert_called_once_with(self.TEST_SIZE_IN_GB)
def test_create_cloned_volume(self):
drv = self._driver
drv._create_snapshot = mock.Mock()
drv._copy_volume_from_snapshot = mock.Mock()
drv._delete_snapshot = mock.Mock()
volume = self._simple_volume()
src_vref = self._simple_volume()
src_vref['id'] = '375e32b2-804a-49f2-b282-85d1d5a5b9e1'
src_vref['name'] = 'volume-%s' % src_vref['id']
volume_ref = {'id': volume['id'],
'name': volume['name'],
'status': volume['status'],
'provider_location': volume['provider_location'],
'size': volume['size']}
snap_ref = {'volume_name': src_vref['name'],
'name': 'clone-snap-%s' % src_vref['id'],
'size': src_vref['size'],
'volume_size': src_vref['size'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
drv.create_cloned_volume(volume, src_vref)
drv._create_snapshot.assert_called_once_with(snap_ref)
drv._copy_volume_from_snapshot.assert_called_once_with(snap_ref,
volume_ref,
volume['size'])
drv._delete_snapshot.assert_called_once_with(mock.ANY)
@mock.patch('oslo_utils.fileutils.delete_if_exists')
def test_delete_volume(self, mock_delete_if_exists):
volume = self._simple_volume()
volume_filename = 'volume-%s' % self.VOLUME_UUID
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume_filename)
info_file = volume_path + '.info'
with mock.patch.object(self._driver, '_ensure_share_mounted') as \
mock_ensure_share_mounted, \
mock.patch.object(self._driver, '_local_volume_dir') as \
mock_local_volume_dir, \
mock.patch.object(self._driver,
'get_active_image_from_info') as \
mock_active_image_from_info, \
mock.patch.object(self._driver, '_execute') as \
mock_execute, \
mock.patch.object(self._driver, '_local_path_volume') as \
mock_local_path_volume, \
mock.patch.object(self._driver, '_local_path_volume_info') as \
mock_local_path_volume_info:
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
mock_active_image_from_info.return_value = volume_filename
mock_local_path_volume.return_value = volume_path
mock_local_path_volume_info.return_value = info_file
self._driver.delete_volume(volume)
mock_ensure_share_mounted.assert_called_once_with(
volume['provider_location'])
mock_local_volume_dir.assert_called_once_with(volume)
mock_active_image_from_info.assert_called_once_with(volume)
mock_execute.assert_called_once_with('rm', '-f', volume_path,
run_as_root=
self._driver._execute_as_root)
mock_local_path_volume_info.assert_called_once_with(volume)
mock_local_path_volume.assert_called_once_with(volume)
mock_delete_if_exists.assert_any_call(volume_path)
mock_delete_if_exists.assert_any_call(info_file)
def test_delete_should_ensure_share_mounted(self):
"""delete_volume should ensure that corresponding share is mounted."""
drv = self._driver
drv._execute = mock.Mock()
volume = DumbVolume()
volume['name'] = 'volume-123'
volume['provider_location'] = self.TEST_QUOBYTE_VOLUME
drv._ensure_share_mounted = mock.Mock()
drv.delete_volume(volume)
(drv._ensure_share_mounted.
assert_called_once_with(self.TEST_QUOBYTE_VOLUME))
drv._execute.assert_called_once_with('rm', '-f',
mock.ANY,
run_as_root=False)
def test_delete_should_not_delete_if_provider_location_not_provided(self):
"""delete_volume shouldn't delete if provider_location missed."""
drv = self._driver
drv._ensure_share_mounted = mock.Mock()
drv._execute = mock.Mock()
volume = DumbVolume()
volume['name'] = 'volume-123'
volume['provider_location'] = None
drv.delete_volume(volume)
assert not drv._ensure_share_mounted.called
assert not drv._execute.called
def test_extend_volume(self):
drv = self._driver
volume = self._simple_volume()
volume_path = '%s/%s/volume-%s' % (self.TEST_MNT_POINT_BASE,
drv._get_hash_str(
self.TEST_QUOBYTE_VOLUME),
self.VOLUME_UUID)
qemu_img_info_output = """image: volume-%s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 473K
""" % self.VOLUME_UUID
img_info = imageutils.QemuImgInfo(qemu_img_info_output)
drv.get_active_image_from_info = mock.Mock(return_value=volume['name'])
image_utils.qemu_img_info = mock.Mock(return_value=img_info)
image_utils.resize_image = mock.Mock()
drv.extend_volume(volume, 3)
drv.get_active_image_from_info.assert_called_once_with(volume)
image_utils.qemu_img_info.assert_called_once_with(volume_path)
image_utils.resize_image.assert_called_once_with(volume_path, 3)
def test_copy_volume_from_snapshot(self):
drv = self._driver
# lots of test vars to be prepared at first
dest_volume = self._simple_volume(
'c1073000-0000-0000-0000-0000000c1073')
src_volume = self._simple_volume()
vol_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(self.TEST_QUOBYTE_VOLUME))
src_vol_path = os.path.join(vol_dir, src_volume['name'])
dest_vol_path = os.path.join(vol_dir, dest_volume['name'])
info_path = os.path.join(vol_dir, src_volume['name']) + '.info'
snapshot = {'volume_name': src_volume['name'],
'name': 'clone-snap-%s' % src_volume['id'],
'size': src_volume['size'],
'volume_size': src_volume['size'],
'volume_id': src_volume['id'],
'id': 'tmp-snap-%s' % src_volume['id'],
'volume': src_volume}
snap_file = dest_volume['name'] + '.' + snapshot['id']
snap_path = os.path.join(vol_dir, snap_file)
size = dest_volume['size']
qemu_img_output = """image: %s
file format: raw
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
backing file: %s
""" % (snap_file, src_volume['name'])
img_info = imageutils.QemuImgInfo(qemu_img_output)
# mocking and testing starts here
image_utils.convert_image = mock.Mock()
drv._read_info_file = mock.Mock(return_value=
{'active': snap_file,
snapshot['id']: snap_file})
image_utils.qemu_img_info = mock.Mock(return_value=img_info)
drv._set_rw_permissions_for_all = mock.Mock()
drv._copy_volume_from_snapshot(snapshot, dest_volume, size)
drv._read_info_file.assert_called_once_with(info_path)
image_utils.qemu_img_info.assert_called_once_with(snap_path)
(image_utils.convert_image.
assert_called_once_with(src_vol_path,
dest_vol_path,
'raw',
run_as_root=self._driver._execute_as_root))
drv._set_rw_permissions_for_all.assert_called_once_with(dest_vol_path)
def test_create_volume_from_snapshot_status_not_available(self):
"""Expect an error when the snapshot's status is not 'available'."""
drv = self._driver
src_volume = self._simple_volume()
snap_ref = {'volume_name': src_volume['name'],
'name': 'clone-snap-%s' % src_volume['id'],
'size': src_volume['size'],
'volume_size': src_volume['size'],
'volume_id': src_volume['id'],
'id': 'tmp-snap-%s' % src_volume['id'],
'volume': src_volume,
'status': 'error'}
new_volume = DumbVolume()
new_volume['size'] = snap_ref['size']
self.assertRaises(exception.InvalidSnapshot,
drv.create_volume_from_snapshot,
new_volume,
snap_ref)
def test_create_volume_from_snapshot(self):
drv = self._driver
src_volume = self._simple_volume()
snap_ref = {'volume_name': src_volume['name'],
'name': 'clone-snap-%s' % src_volume['id'],
'size': src_volume['size'],
'volume_size': src_volume['size'],
'volume_id': src_volume['id'],
'id': 'tmp-snap-%s' % src_volume['id'],
'volume': src_volume,
'status': 'available'}
new_volume = DumbVolume()
new_volume['size'] = snap_ref['size']
drv._ensure_shares_mounted = mock.Mock()
drv._find_share = mock.Mock(return_value=self.TEST_QUOBYTE_VOLUME)
drv._do_create_volume = mock.Mock()
drv._copy_volume_from_snapshot = mock.Mock()
drv.create_volume_from_snapshot(new_volume, snap_ref)
drv._ensure_shares_mounted.assert_called_once_with()
drv._find_share.assert_called_once_with(new_volume['size'])
drv._do_create_volume.assert_called_once_with(new_volume)
(drv._copy_volume_from_snapshot.
assert_called_once_with(snap_ref, new_volume, new_volume['size']))
def test_initialize_connection(self):
drv = self._driver
volume = self._simple_volume()
vol_dir = os.path.join(self.TEST_MNT_POINT_BASE,
drv._get_hash_str(self.TEST_QUOBYTE_VOLUME))
vol_path = os.path.join(vol_dir, volume['name'])
qemu_img_output = """image: %s
file format: raw
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
""" % volume['name']
img_info = imageutils.QemuImgInfo(qemu_img_output)
drv.get_active_image_from_info = mock.Mock(return_value=volume['name'])
image_utils.qemu_img_info = mock.Mock(return_value=img_info)
conn_info = drv.initialize_connection(volume, None)
drv.get_active_image_from_info.assert_called_once_with(volume)
image_utils.qemu_img_info.assert_called_once_with(vol_path)
self.assertEqual('raw', conn_info['data']['format'])
self.assertEqual('quobyte', conn_info['driver_volume_type'])
self.assertEqual(volume['name'], conn_info['data']['name'])
self.assertEqual(self.TEST_MNT_POINT_BASE,
conn_info['mount_point_base'])
def test_copy_volume_to_image_raw_image(self):
drv = self._driver
volume = self._simple_volume()
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name'])
image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
with mock.patch.object(drv, 'get_active_image_from_info') as \
mock_get_active_image_from_info, \
mock.patch.object(drv, '_local_volume_dir') as \
mock_local_volume_dir, \
mock.patch.object(image_utils, 'qemu_img_info') as \
mock_qemu_img_info, \
mock.patch.object(image_utils, 'upload_volume') as \
mock_upload_volume, \
mock.patch.object(image_utils, 'create_temporary_file') as \
mock_create_temporary_file:
mock_get_active_image_from_info.return_value = volume['name']
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
mock_create_temporary_file.return_value = self.TEST_TMP_FILE
qemu_img_output = """image: %s
file format: raw
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
""" % volume['name']
img_info = imageutils.QemuImgInfo(qemu_img_output)
mock_qemu_img_info.return_value = img_info
upload_path = volume_path
drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta)
mock_get_active_image_from_info.assert_called_once_with(volume)
mock_local_volume_dir.assert_called_once_with(volume)
mock_qemu_img_info.assert_called_once_with(volume_path)
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path)
self.assertTrue(mock_create_temporary_file.called)
def test_copy_volume_to_image_qcow2_image(self):
"""Upload a qcow2 image file which has to be converted to raw first."""
drv = self._driver
volume = self._simple_volume()
volume_path = '%s/%s' % (self.TEST_MNT_POINT, volume['name'])
image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
with mock.patch.object(drv, 'get_active_image_from_info') as \
mock_get_active_image_from_info, \
mock.patch.object(drv, '_local_volume_dir') as \
mock_local_volume_dir, \
mock.patch.object(image_utils, 'qemu_img_info') as \
mock_qemu_img_info, \
mock.patch.object(image_utils, 'convert_image') as \
mock_convert_image, \
mock.patch.object(image_utils, 'upload_volume') as \
mock_upload_volume, \
mock.patch.object(image_utils, 'create_temporary_file') as \
mock_create_temporary_file:
mock_get_active_image_from_info.return_value = volume['name']
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
mock_create_temporary_file.return_value = self.TEST_TMP_FILE
qemu_img_output = """image: %s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
""" % volume['name']
img_info = imageutils.QemuImgInfo(qemu_img_output)
mock_qemu_img_info.return_value = img_info
upload_path = self.TEST_TMP_FILE
drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta)
mock_get_active_image_from_info.assert_called_once_with(volume)
mock_local_volume_dir.assert_called_with(volume)
mock_qemu_img_info.assert_called_once_with(volume_path)
mock_convert_image.assert_called_once_with(
volume_path, upload_path, 'raw')
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path)
self.assertTrue(mock_create_temporary_file.called)
def test_copy_volume_to_image_snapshot_exists(self):
"""Upload an active snapshot which has to be converted to raw first."""
drv = self._driver
volume = self._simple_volume()
volume_path = '%s/volume-%s' % (self.TEST_MNT_POINT, self.VOLUME_UUID)
volume_filename = 'volume-%s' % self.VOLUME_UUID
image_meta = {'id': '10958016-e196-42e3-9e7f-5d8927ae3099'}
with mock.patch.object(drv, 'get_active_image_from_info') as \
mock_get_active_image_from_info, \
mock.patch.object(drv, '_local_volume_dir') as \
mock_local_volume_dir, \
mock.patch.object(image_utils, 'qemu_img_info') as \
mock_qemu_img_info, \
mock.patch.object(image_utils, 'convert_image') as \
mock_convert_image, \
mock.patch.object(image_utils, 'upload_volume') as \
mock_upload_volume, \
mock.patch.object(image_utils, 'create_temporary_file') as \
mock_create_temporary_file:
mock_get_active_image_from_info.return_value = volume['name']
mock_local_volume_dir.return_value = self.TEST_MNT_POINT
mock_create_temporary_file.return_value = self.TEST_TMP_FILE
qemu_img_output = """image: volume-%s.%s
file format: qcow2
virtual size: 1.0G (1073741824 bytes)
disk size: 173K
backing file: %s
""" % (self.VOLUME_UUID, self.SNAP_UUID, volume_filename)
img_info = imageutils.QemuImgInfo(qemu_img_output)
mock_qemu_img_info.return_value = img_info
upload_path = self.TEST_TMP_FILE
drv.copy_volume_to_image(mock.ANY, volume, mock.ANY, image_meta)
mock_get_active_image_from_info.assert_called_once_with(volume)
mock_local_volume_dir.assert_called_with(volume)
mock_qemu_img_info.assert_called_once_with(volume_path)
mock_convert_image.assert_called_once_with(
volume_path, upload_path, 'raw')
mock_upload_volume.assert_called_once_with(
mock.ANY, mock.ANY, mock.ANY, upload_path)
self.assertTrue(mock_create_temporary_file.called)
| apache-2.0 |
salamer/django | django/contrib/gis/db/backends/base/operations.py | 263 | 4865 | class BaseSpatialOperations(object):
"""
This module holds the base `BaseSpatialBackend` object, which is
instantiated by each spatial database backend with the features
it has.
"""
truncate_params = {}
# Quick booleans for the type of this spatial backend, and
# an attribute for the spatial database version tuple (if applicable)
postgis = False
spatialite = False
mysql = False
oracle = False
spatial_version = None
# How the geometry column should be selected.
select = None
# Does the spatial database have a geometry or geography type?
geography = False
geometry = False
area = False
bounding_circle = False
centroid = False
difference = False
distance = False
distance_sphere = False
distance_spheroid = False
envelope = False
force_rhr = False
mem_size = False
num_geom = False
num_points = False
perimeter = False
perimeter3d = False
point_on_surface = False
polygonize = False
reverse = False
scale = False
snap_to_grid = False
sym_difference = False
transform = False
translate = False
union = False
# Aggregates
disallowed_aggregates = ()
geom_func_prefix = ''
# Mapping between Django function names and backend names, when names do not
# match; used in spatial_function_name().
function_names = {}
# Blacklist/set of known unsupported functions of the backend
unsupported_functions = {
'Area', 'AsGeoHash', 'AsGeoJSON', 'AsGML', 'AsKML', 'AsSVG',
'BoundingCircle', 'Centroid', 'Difference', 'Distance', 'Envelope',
'ForceRHR', 'Intersection', 'Length', 'MemSize', 'NumGeometries',
'NumPoints', 'Perimeter', 'PointOnSurface', 'Reverse', 'Scale',
'SnapToGrid', 'SymDifference', 'Transform', 'Translate',
'Union',
}
# Serialization
geohash = False
geojson = False
gml = False
kml = False
svg = False
# Constructors
from_text = False
from_wkb = False
# Default conversion functions for aggregates; will be overridden if implemented
# for the spatial backend.
def convert_extent(self, box, srid):
raise NotImplementedError('Aggregate extent not implemented for this spatial backend.')
def convert_extent3d(self, box, srid):
raise NotImplementedError('Aggregate 3D extent not implemented for this spatial backend.')
def convert_geom(self, geom_val, geom_field):
raise NotImplementedError('Aggregate method not implemented for this spatial backend.')
# For quoting column values, rather than columns.
def geo_quote_name(self, name):
return "'%s'" % name
# GeometryField operations
def geo_db_type(self, f):
"""
Returns the database column type for the geometry field on
the spatial backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_type() method')
def get_distance(self, f, value, lookup_type):
"""
Returns the distance parameters for the given geometry field,
lookup value, and lookup type.
"""
raise NotImplementedError('Distance operations not available on this spatial backend.')
def get_geom_placeholder(self, f, value, compiler):
"""
Returns the placeholder for the given geometry field with the given
value. Depending on the spatial backend, the placeholder may contain a
stored procedure call to the transformation function of the spatial
backend.
"""
raise NotImplementedError('subclasses of BaseSpatialOperations must provide a geo_db_placeholder() method')
def check_expression_support(self, expression):
if isinstance(expression, self.disallowed_aggregates):
raise NotImplementedError(
"%s spatial aggregation is not supported by this database backend." % expression.name
)
super(BaseSpatialOperations, self).check_expression_support(expression)
def spatial_aggregate_name(self, agg_name):
raise NotImplementedError('Aggregate support not implemented for this spatial backend.')
def spatial_function_name(self, func_name):
if func_name in self.unsupported_functions:
raise NotImplementedError("This backend doesn't support the %s function." % func_name)
return self.function_names.get(func_name, self.geom_func_prefix + func_name)
# Routines for getting the OGC-compliant models.
def geometry_columns(self):
raise NotImplementedError('Subclasses of BaseSpatialOperations must provide a geometry_columns() method.')
def spatial_ref_sys(self):
raise NotImplementedError('subclasses of BaseSpatialOperations must a provide spatial_ref_sys() method')
| bsd-3-clause |
sljeff/JBlog | db.py | 1 | 10515 | # coding: utf-8
import sqlite3
import datetime
from functools import lru_cache
class DB:
def __init__(self, dbpath='blog.db'):
"""
:param str dbpath: database path
"""
self._dbpath = dbpath
@staticmethod
def _prepare_conditions(conditions):
"""
example:
conditions = [('id', '=', '3'), ('name', '>', 'jeff')]
cdt_str, cdt_value = self._prepare_conditions(conditions)
assert cdt_str == 'id = ? AND name > ?'
assert cdt_value == (3, 'jeff')
conditions = {'id': 1, 'name': 'jeff'}
cdt_str, cdt_value = self._prepare_conditions(conditions)
assert cdt_str == 'id = ? AND name = ?'
assert cdt_value == (3, 'jeff')
:param list[tuple[str]]|dict conditions: list of 3-tuple
:rtype: tuple[str, tuple]
"""
cdt_arr = []
cdt_value_arr = []
get_symbol = lambda t: t[1]
inner = ' AND '
if isinstance(conditions, dict):
conditions = [x for x in conditions.items()]
get_symbol = lambda t: '='
inner = ','
for tp in conditions:
if ' ' in tp[0]:
# SQL injection
return None, None
cdt_arr.append('{0} {1} ?'.format(tp[0], get_symbol(tp)))
cdt_value_arr.append(tp[-1])
cdt_str = inner.join(cdt_arr)
return cdt_str, tuple(cdt_value_arr)
def connect(self):
"""
:rtype: sqlite3.Connection
"""
conn = sqlite3.connect(self._dbpath, check_same_thread=False)
conn.row_factory = sqlite3.Row
return conn
def insert(self, table, value_dict):
"""
INSERT INTO table (value_dict.keys()) VALUES (value_dict.values())
:param str table: table to insert
:param dict value_dict: dict to insert
:rtype: bool
"""
result = False
qs = ','.join(['?'] * len(value_dict))
k_tuple = ','.join(value_dict.keys())
v_tuple = tuple(value_dict.values())
sql = 'INSERT INTO {} ({}) VALUES ({})'.format(table, k_tuple, qs)
conn = self.connect()
try:
conn.execute(sql, v_tuple)
conn.commit()
result = True
except Exception as e:
print(e)
conn.rollback()
finally:
conn.close()
return result
def select(self, columns, table, conditions, order_by=None, desc=False, limit=None, offset=None):
"""
SELECT columns FROM table WHERE condition AND condition
note that: columns should not be a string!
:param tuple|list columns: if columns is (), [] or None, it will be translated to '*'.
:param str table: table to select
:param list[tuple[str]] conditions: 3-tuple list; ex: [('author', '=', 'jeff'), ('time', '<', datetime.now())]
:param str order_by: order by which column
:param bool desc: desc
:param int limit: limit number
:param int offset: offset number
:rtype: list[sqlite3.Row]
"""
result = None
sel_str = ','.join(columns) if columns else '*'
cdt_str, cdt_value = self._prepare_conditions(conditions)
if cdt_str is None or cdt_value is None:
return result
if len(cdt_value) == 0:
sql = 'SELECT {} FROM {}'.format(sel_str, table)
else:
sql = 'SELECT {} FROM {} WHERE {}'.format(sel_str, table, cdt_str)
if order_by is not None:
sql += ' ORDER BY {}'.format(order_by)
if desc:
sql += ' DESC'
if limit is not None:
sql += ' LIMIT {}'.format(limit)
if offset is not None:
sql += ' OFFSET {}'.format(offset)
conn = self.connect()
try:
cursor = conn.cursor()
cursor.execute(sql, cdt_value)
result = cursor.fetchall()
except Exception as e:
print(sql, '\n', e)
conn.rollback()
finally:
conn.close()
return result
def update(self, table, value_dict, conditions):
"""
UPDATE table SET value_dict.keys() = value_dict.values() WHERE condition AND condition
:param str table: table to update
:param dict value_dict: values to update
:param list[tuple[str]] conditions: list of 3-tuple; ex: [('author', '=', 'jeff'), ('time', '<', '20160501')]
:rtype: bool
"""
result = False
value_str, value_tuple = self._prepare_conditions(value_dict)
cdt_str, cdt_value = self._prepare_conditions(conditions)
if None in (value_str, value_tuple, cdt_str, cdt_value):
return result
if len(cdt_value) == 0:
sql = 'UPDATE {} SET {}'.format(table, value_str)
else:
sql = 'UPDATE {} SET {} WHERE {}'.format(table, value_str, cdt_str)
conn = self.connect()
try:
conn.execute(sql, value_tuple + cdt_value)
conn.commit()
result = True
except:
conn.rollback()
finally:
conn.close()
return result
def delete(self, table, conditions):
"""
DELETE FROM table WHERE conditions
:param str table: table to delete
:param list[tuple[str]] conditions: list of 3-tuple; ex: [('author', '=', 'jeff'), ('time', '<', '20160501')]
:rtype: bool
"""
result = False
cdt_str, cdt_value = self._prepare_conditions(conditions)
if cdt_str is None or cdt_value is None:
return result
if len(cdt_value) == 0:
sql = 'DELETE FROM {}'.format(table)
else:
sql = 'DELETE FROM {} WHERE {}'.format(table, cdt_str)
conn = self.connect()
try:
conn.execute(sql, cdt_value)
conn.commit()
result = True
except:
conn.rollback()
finally:
conn.close()
return result
class BlogDB(DB):
"""
:param dict table_name: tables in database. need 'articles', .. attributes
:param list selection: columns to select in self.select_article and self.select_articles
"""
def __init__(self, dbpath='blog.db'):
super(BlogDB, self).__init__(dbpath)
self.table_name = {'articles': 'articles', 'category': 'cats'}
self.selection = []
def add_article(self, slug, title, cat_slug, md_content, html_content, author, time=None):
"""
add an article into database
:param str slug: slug
:param str title: title
:param str cat_slug: category
:param str md_content: markdown content of article
:param str html_content: html content of article
:param str author: author
:param datetime.datetime time: time; if None, it will be datetime.datetime.now()
:rtype: bool
"""
cat_id = self.get_id_by_slug(cat_slug)
result = self.insert(self.table_name['articles'], {
'slug': slug,
'title': title,
'md_content': md_content,
'html_content': html_content,
'author': author,
'cat_id': cat_id,
'time': time or datetime.datetime.now()
})
return result
def delete_article(self, slug):
"""
delete an article
:param str slug: slug of the article
:rtype: bool
"""
result = self.delete(self.table_name['articles'], [('slug', '=', slug)])
return result
def delete_articles(self, conditions):
"""
delete articles
:param list[tuple[str]] conditions: list of 3-tuple; ex: [('author', '=', 'jeff'), ('time', '<', '20160501')]
:rtype: bool
"""
result = self.delete(self.table_name['articles'], conditions)
return result
def update_article(self, value_dict, slug):
"""
update an article
:param dict value_dict: values to update
:param str slug: slug of the article
:rtype: bool
"""
result = self.update(self.table_name['articles'], value_dict, [('slug', '=', slug)])
return result
@lru_cache()
def get_id_by_slug(self, slug):
result = self.select([], self.table_name['category'], [('slug', '=', slug)])
return int(result[0]['id']) if result else None
@lru_cache()
def select_article(self, slug):
"""
:param str slug: slug of the article
:rtype: sqlite3.Row|None
"""
result = self.select(self.selection, self.table_name['articles'], [('slug', '=', slug)])
if len(result) != 0:
return result[0]
return None
def select_articles(self, conditions, limit=20, offset=None):
"""
:param list[tuple[str]] conditions: list of 3-tuple; ex: [('author', '=', 'jeff'), ('time', '<', '20160501')]
:param int limit: limit number
:param int offset:offset number
:rtype: list[sqlite3.Row]
"""
if offset == 0:
offset = None
result = self.select(self.selection, self.table_name['articles'], conditions,
order_by='time', desc=True, limit=limit, offset=offset)
return result
@lru_cache()
def select_articles_by_time(self, begin=None, end=None, limit=20, page_num=0):
"""
:param datetime.datetime begin: begin *begin*
:param datetime.datetime end: to *end*
:param int limit: limit number
:param int page_num: page number
:rtype: list[sqlite3.Row]
"""
conditions = []
if begin is not None:
conditions.append(('time', '>', begin))
if end is not None:
conditions.append(('time', '<', end))
result = self.select_articles(conditions, limit, limit * page_num)
return result
@lru_cache()
def select_articles_by_cat(self, cat_slug, limit=20, page_num=0):
"""
:param str cat_slug: category name
:param int limit: limit number
:param int page_num: page number
:return: list[sqlite3.Row]
"""
cat_id = self.get_id_by_slug(cat_slug)
result = None
if cat_id:
result = self.select_articles([('cat_id', '=', cat_id)], limit, limit * page_num)
return result
| mit |
moylop260/odoo-dev | addons/hw_scale/__init__.py | 1894 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.8.36-1/roles/lib_openshift/src/class/oc_env.py | 66 | 4765 | # pylint: skip-file
# flake8: noqa
# pylint: disable=too-many-instance-attributes
class OCEnv(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
container_path = {"pod": "spec.containers[0].env",
"dc": "spec.template.spec.containers[0].env",
"rc": "spec.template.spec.containers[0].env",
}
# pylint allows 5. we need 6
# pylint: disable=too-many-arguments
def __init__(self,
namespace,
kind,
env_vars,
resource_name=None,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCEnv, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.name = resource_name
self.env_vars = env_vars
self._resource = None
@property
def resource(self):
''' property function for resource var'''
if not self._resource:
self.get()
return self._resource
@resource.setter
def resource(self, data):
''' setter function for resource var'''
self._resource = data
def key_value_exists(self, key, value):
''' return whether a key, value pair exists '''
return self.resource.exists_env_value(key, value)
def key_exists(self, key):
''' return whether a key exists '''
return self.resource.exists_env_key(key)
def get(self):
'''return environment variables '''
result = self._get(self.kind, self.name)
if result['returncode'] == 0:
if self.kind == 'dc':
self.resource = DeploymentConfig(content=result['results'][0])
result['results'] = self.resource.get(OCEnv.container_path[self.kind]) or []
return result
def delete(self):
''' delete environment variables '''
if self.resource.delete_env_var(self.env_vars.keys()):
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
return {'returncode': 0, 'changed': False}
def put(self):
'''place env vars into dc '''
for update_key, update_value in self.env_vars.items():
self.resource.update_env_var(update_key, update_value)
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
# pylint: disable=too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run the idempotent ansible code'''
ocenv = OCEnv(params['namespace'],
params['kind'],
params['env_vars'],
resource_name=params['name'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = ocenv.get()
#####
# Get
#####
if state == 'list':
return {'changed': False, 'results': api_rval['results'], 'state': "list"}
########
# Delete
########
if state == 'absent':
for key in params.get('env_vars', {}).keys():
if ocenv.resource.exists_env_key(key):
if check_mode:
return {'changed': False,
'msg': 'CHECK_MODE: Would have performed a delete.'}
api_rval = ocenv.delete()
return {'changed': True, 'state': 'absent'}
return {'changed': False, 'state': 'absent'}
if state == 'present':
########
# Create
########
for key, value in params.get('env_vars', {}).items():
if not ocenv.key_value_exists(key, value):
if check_mode:
return {'changed': False,
'msg': 'CHECK_MODE: Would have performed a create.'}
# Create it here
api_rval = ocenv.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = ocenv.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval['results'], 'state': 'present'}
return {'changed': False, 'results': api_rval['results'], 'state': 'present'}
return {'failed': True,
'changed': False,
'msg': 'Unknown state passed. %s' % state}
| apache-2.0 |
yongshengwang/hue | desktop/core/ext-py/Django-1.6.10/tests/forms_tests/tests/__init__.py | 56 | 1155 | from __future__ import absolute_import
from .test_error_messages import (FormsErrorMessagesTestCase,
ModelChoiceFieldErrorMessagesTestCase)
from .test_extra import FormsExtraTestCase, FormsExtraL10NTestCase
from .test_fields import FieldsTests
from .test_forms import FormsTestCase
from .test_formsets import (FormsFormsetTestCase, FormsetAsFooTests,
TestIsBoundBehavior, TestEmptyFormSet)
from .test_input_formats import (LocalizedTimeTests, CustomTimeInputFormatsTests,
SimpleTimeFormatTests, LocalizedDateTests, CustomDateInputFormatsTests,
SimpleDateFormatTests, LocalizedDateTimeTests,
CustomDateTimeInputFormatsTests, SimpleDateTimeFormatTests)
from .test_media import FormsMediaTestCase, StaticFormsMediaTestCase
from .tests import (TestTicket12510, TestTicket14567, ModelFormCallableModelDefault,
FormsModelTestCase, RelatedModelFormTests)
from .test_regressions import FormsRegressionsTestCase
from .test_util import FormsUtilTestCase
from .test_validators import TestFieldWithValidators
from .test_widgets import (FormsWidgetTestCase, FormsI18NWidgetsTestCase,
WidgetTests, LiveWidgetTests, ClearableFileInputTests)
| apache-2.0 |
dataxu/ansible | lib/ansible/modules/cloud/amazon/ec2_eni.py | 8 | 23900 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_eni
short_description: Create and optionally attach an Elastic Network Interface (ENI) to an instance
description:
- Create and optionally attach an Elastic Network Interface (ENI) to an instance. If an ENI ID or private_ip is
provided, the existing ENI (if any) will be modified. The 'attached' parameter controls the attachment status
of the network interface.
version_added: "2.0"
author: "Rob White (@wimnat)"
options:
eni_id:
description:
- The ID of the ENI (to modify); if null and state is present, a new eni will be created.
required: false
default: null
instance_id:
description:
- Instance ID that you wish to attach ENI to. Since version 2.2, use the 'attached' parameter to attach or
detach an ENI. Prior to 2.2, to detach an ENI from an instance, use 'None'.
required: false
default: null
private_ip_address:
description:
- Private IP address.
required: false
default: null
subnet_id:
description:
- ID of subnet in which to create the ENI.
required: false
description:
description:
- Optional description of the ENI.
required: false
default: null
security_groups:
description:
- List of security groups associated with the interface. Only used when state=present. Since version 2.2, you
can specify security groups by ID or by name or a combination of both. Prior to 2.2, you can specify only by ID.
required: false
default: null
state:
description:
- Create or delete ENI
required: false
default: present
choices: [ 'present', 'absent' ]
device_index:
description:
- The index of the device for the network interface attachment on the instance.
required: false
default: 0
attached:
description:
- Specifies if network interface should be attached or detached from instance. If ommited, attachment status
won't change
required: false
default: yes
version_added: 2.2
force_detach:
description:
- Force detachment of the interface. This applies either when explicitly detaching the interface by setting instance_id
to None or when deleting an interface with state=absent.
required: false
default: no
delete_on_termination:
description:
- Delete the interface when the instance it is attached to is terminated. You can only specify this flag when the
interface is being modified, not on creation.
required: false
source_dest_check:
description:
- By default, interfaces perform source/destination checks. NAT instances however need this check to be disabled.
You can only specify this flag when the interface is being modified, not on creation.
required: false
secondary_private_ip_addresses:
description:
- A list of IP addresses to assign as secondary IP addresses to the network interface.
This option is mutually exclusive of secondary_private_ip_address_count
required: false
version_added: 2.2
purge_secondary_private_ip_addresses:
description:
- To be used with I(secondary_private_ip_addresses) to determine whether or not to remove any secondary IP addresses other than those specified.
Set secondary_private_ip_addresses to an empty list to purge all secondary addresses.
required: false
default: False
version_added: 2.5
secondary_private_ip_address_count:
description:
- The number of secondary IP addresses to assign to the network interface. This option is mutually exclusive of secondary_private_ip_addresses
required: false
version_added: 2.2
extends_documentation_fragment:
- aws
- ec2
notes:
- This module identifies and ENI based on either the eni_id, a combination of private_ip_address and subnet_id,
or a combination of instance_id and device_id. Any of these options will let you specify a particular ENI.
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Create an ENI. As no security group is defined, ENI will be created in default security group
- ec2_eni:
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI and attach it to an instance
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
# Create an ENI with two secondary addresses
- ec2_eni:
subnet_id: subnet-xxxxxxxx
state: present
secondary_private_ip_address_count: 2
# Assign a secondary IP address to an existing ENI
# This will purge any existing IPs
- ec2_eni:
subnet_id: subnet-xxxxxxxx
eni_id: eni-yyyyyyyy
state: present
secondary_private_ip_addresses:
- 172.16.1.1
# Remove any secondary IP addresses from an existing ENI
- ec2_eni:
subnet_id: subnet-xxxxxxxx
eni_id: eni-yyyyyyyy
state: present
secondary_private_ip_addresses:
-
# Destroy an ENI, detaching it from any instance if necessary
- ec2_eni:
eni_id: eni-xxxxxxx
force_detach: yes
state: absent
# Update an ENI
- ec2_eni:
eni_id: eni-xxxxxxx
description: "My new description"
state: present
# Update an ENI identifying it by private_ip_address and subnet_id
- ec2_eni:
subnet_id: subnet-xxxxxxx
private_ip_address: 172.16.1.1
description: "My new description"
# Detach an ENI from an instance
- ec2_eni:
eni_id: eni-xxxxxxx
instance_id: None
state: present
### Delete an interface on termination
# First create the interface
- ec2_eni:
instance_id: i-xxxxxxx
device_index: 1
private_ip_address: 172.31.0.20
subnet_id: subnet-xxxxxxxx
state: present
register: eni
# Modify the interface to enable the delete_on_terminaton flag
- ec2_eni:
eni_id: "{{ eni.interface.id }}"
delete_on_termination: true
'''
RETURN = '''
interface:
description: Network interface attributes
returned: when state != absent
type: complex
contains:
description:
description: interface description
type: string
sample: Firewall network interface
groups:
description: list of security groups
type: list of dictionaries
sample: [ { "sg-f8a8a9da": "default" } ]
id:
description: network interface id
type: string
sample: "eni-1d889198"
mac_address:
description: interface's physical address
type: string
sample: "00:00:5E:00:53:23"
owner_id:
description: aws account id
type: string
sample: 812381371
private_ip_address:
description: primary ip address of this interface
type: string
sample: 10.20.30.40
private_ip_addresses:
description: list of all private ip addresses associated to this interface
type: list of dictionaries
sample: [ { "primary_address": true, "private_ip_address": "10.20.30.40" } ]
source_dest_check:
description: value of source/dest check flag
type: boolean
sample: True
status:
description: network interface status
type: string
sample: "pending"
subnet_id:
description: which vpc subnet the interface is bound
type: string
sample: subnet-b0a0393c
vpc_id:
description: which vpc this network interface is bound
type: string
sample: vpc-9a9a9da
'''
import time
import re
try:
import boto.ec2
import boto.vpc
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (AnsibleAWSError, connect_to_aws,
ec2_argument_spec, get_aws_connection_info,
get_ec2_security_group_ids_from_names)
def get_eni_info(interface):
# Private addresses
private_addresses = []
for ip in interface.private_ip_addresses:
private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary})
interface_info = {'id': interface.id,
'subnet_id': interface.subnet_id,
'vpc_id': interface.vpc_id,
'description': interface.description,
'owner_id': interface.owner_id,
'status': interface.status,
'mac_address': interface.mac_address,
'private_ip_address': interface.private_ip_address,
'source_dest_check': interface.source_dest_check,
'groups': dict((group.id, group.name) for group in interface.groups),
'private_ip_addresses': private_addresses
}
if interface.attachment is not None:
interface_info['attachment'] = {'attachment_id': interface.attachment.id,
'instance_id': interface.attachment.instance_id,
'device_index': interface.attachment.device_index,
'status': interface.attachment.status,
'attach_time': interface.attachment.attach_time,
'delete_on_termination': interface.attachment.delete_on_termination,
}
return interface_info
def wait_for_eni(eni, status):
while True:
time.sleep(3)
eni.update()
# If the status is detached we just need attachment to disappear
if eni.attachment is None:
if status == "detached":
break
else:
if status == "attached" and eni.attachment.status == "attached":
break
def create_eni(connection, vpc_id, module):
instance_id = module.params.get("instance_id")
attached = module.params.get("attached")
if instance_id == 'None':
instance_id = None
device_index = module.params.get("device_index")
subnet_id = module.params.get('subnet_id')
private_ip_address = module.params.get('private_ip_address')
description = module.params.get('description')
security_groups = get_ec2_security_group_ids_from_names(module.params.get('security_groups'), connection, vpc_id=vpc_id, boto3=False)
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
changed = False
try:
eni = connection.create_network_interface(subnet_id, private_ip_address, description, security_groups)
if attached and instance_id is not None:
try:
eni.attach(instance_id, device_index)
except BotoServerError:
eni.delete()
raise
# Wait to allow creation / attachment to finish
wait_for_eni(eni, "attached")
eni.update()
if secondary_private_ip_address_count is not None:
try:
connection.assign_private_ip_addresses(network_interface_id=eni.id, secondary_private_ip_address_count=secondary_private_ip_address_count)
except BotoServerError:
eni.delete()
raise
if secondary_private_ip_addresses is not None:
try:
connection.assign_private_ip_addresses(network_interface_id=eni.id, private_ip_addresses=secondary_private_ip_addresses)
except BotoServerError:
eni.delete()
raise
changed = True
except BotoServerError as e:
module.fail_json(msg=e.message)
module.exit_json(changed=changed, interface=get_eni_info(eni))
def modify_eni(connection, vpc_id, module, eni):
instance_id = module.params.get("instance_id")
attached = module.params.get("attached")
do_detach = module.params.get('state') == 'detached'
device_index = module.params.get("device_index")
description = module.params.get('description')
security_groups = module.params.get('security_groups')
force_detach = module.params.get("force_detach")
source_dest_check = module.params.get("source_dest_check")
delete_on_termination = module.params.get("delete_on_termination")
secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses")
purge_secondary_private_ip_addresses = module.params.get("purge_secondary_private_ip_addresses")
secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count")
changed = False
try:
if description is not None:
if eni.description != description:
connection.modify_network_interface_attribute(eni.id, "description", description)
changed = True
if len(security_groups) > 0:
groups = get_ec2_security_group_ids_from_names(security_groups, connection, vpc_id=vpc_id, boto3=False)
if sorted(get_sec_group_list(eni.groups)) != sorted(groups):
connection.modify_network_interface_attribute(eni.id, "groupSet", groups)
changed = True
if source_dest_check is not None:
if eni.source_dest_check != source_dest_check:
connection.modify_network_interface_attribute(eni.id, "sourceDestCheck", source_dest_check)
changed = True
if delete_on_termination is not None and eni.attachment is not None:
if eni.attachment.delete_on_termination is not delete_on_termination:
connection.modify_network_interface_attribute(eni.id, "deleteOnTermination", delete_on_termination, eni.attachment.id)
changed = True
current_secondary_addresses = [i.private_ip_address for i in eni.private_ip_addresses if not i.primary]
if secondary_private_ip_addresses is not None:
secondary_addresses_to_remove = list(set(current_secondary_addresses) - set(secondary_private_ip_addresses))
if secondary_addresses_to_remove and purge_secondary_private_ip_addresses:
connection.unassign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=list(set(current_secondary_addresses) -
set(secondary_private_ip_addresses)),
dry_run=False)
changed = True
secondary_addresses_to_add = list(set(secondary_private_ip_addresses) - set(current_secondary_addresses))
if secondary_addresses_to_add:
connection.assign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=secondary_addresses_to_add,
secondary_private_ip_address_count=None,
allow_reassignment=False, dry_run=False)
changed = True
if secondary_private_ip_address_count is not None:
current_secondary_address_count = len(current_secondary_addresses)
if secondary_private_ip_address_count > current_secondary_address_count:
connection.assign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=None,
secondary_private_ip_address_count=(secondary_private_ip_address_count -
current_secondary_address_count),
allow_reassignment=False, dry_run=False)
changed = True
elif secondary_private_ip_address_count < current_secondary_address_count:
# How many of these addresses do we want to remove
secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count
connection.unassign_private_ip_addresses(network_interface_id=eni.id,
private_ip_addresses=current_secondary_addresses[:secondary_addresses_to_remove_count],
dry_run=False)
if attached is True:
if eni.attachment and eni.attachment.instance_id != instance_id:
detach_eni(eni, module)
eni.attach(instance_id, device_index)
wait_for_eni(eni, "attached")
changed = True
if eni.attachment is None:
eni.attach(instance_id, device_index)
wait_for_eni(eni, "attached")
changed = True
elif attached is False:
detach_eni(eni, module)
except BotoServerError as e:
module.fail_json(msg=e.message)
eni.update()
module.exit_json(changed=changed, interface=get_eni_info(eni))
def delete_eni(connection, module):
eni_id = module.params.get("eni_id")
force_detach = module.params.get("force_detach")
try:
eni_result_set = connection.get_all_network_interfaces(eni_id)
eni = eni_result_set[0]
if force_detach is True:
if eni.attachment is not None:
eni.detach(force_detach)
# Wait to allow detachment to finish
wait_for_eni(eni, "detached")
eni.update()
eni.delete()
changed = True
else:
eni.delete()
changed = True
module.exit_json(changed=changed)
except BotoServerError as e:
regex = re.compile('The networkInterface ID \'.*\' does not exist')
if regex.search(e.message) is not None:
module.exit_json(changed=False)
else:
module.fail_json(msg=e.message)
def detach_eni(eni, module):
attached = module.params.get("attached")
force_detach = module.params.get("force_detach")
if eni.attachment is not None:
eni.detach(force_detach)
wait_for_eni(eni, "detached")
if attached:
return
eni.update()
module.exit_json(changed=True, interface=get_eni_info(eni))
else:
module.exit_json(changed=False, interface=get_eni_info(eni))
def uniquely_find_eni(connection, module):
eni_id = module.params.get("eni_id")
private_ip_address = module.params.get('private_ip_address')
subnet_id = module.params.get('subnet_id')
instance_id = module.params.get('instance_id')
device_index = module.params.get('device_index')
attached = module.params.get('attached')
try:
filters = {}
# proceed only if we're univocally specifying an ENI
if eni_id is None and private_ip_address is None and (instance_id is None and device_index is None):
return None
if private_ip_address and subnet_id:
filters['private-ip-address'] = private_ip_address
filters['subnet-id'] = subnet_id
if not attached and instance_id and device_index:
filters['attachment.instance-id'] = instance_id
filters['attachment.device-index'] = device_index
if eni_id is None and len(filters) == 0:
return None
eni_result = connection.get_all_network_interfaces(eni_id, filters=filters)
if len(eni_result) == 1:
return eni_result[0]
else:
return None
except BotoServerError as e:
module.fail_json(msg=e.message)
return None
def get_sec_group_list(groups):
# Build list of remote security groups
remote_security_groups = []
for group in groups:
remote_security_groups.append(group.id.encode())
return remote_security_groups
def _get_vpc_id(connection, module, subnet_id):
try:
return connection.get_all_subnets(subnet_ids=[subnet_id])[0].vpc_id
except BotoServerError as e:
module.fail_json(msg=e.message)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
eni_id=dict(default=None, type='str'),
instance_id=dict(default=None, type='str'),
private_ip_address=dict(type='str'),
subnet_id=dict(type='str'),
description=dict(type='str'),
security_groups=dict(default=[], type='list'),
device_index=dict(default=0, type='int'),
state=dict(default='present', choices=['present', 'absent']),
force_detach=dict(default='no', type='bool'),
source_dest_check=dict(default=None, type='bool'),
delete_on_termination=dict(default=None, type='bool'),
secondary_private_ip_addresses=dict(default=None, type='list'),
purge_secondary_private_ip_addresses=dict(default=False, type='bool'),
secondary_private_ip_address_count=dict(default=None, type='int'),
attached=dict(default=None, type='bool')
)
)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[
['secondary_private_ip_addresses', 'secondary_private_ip_address_count']
],
required_if=([
('state', 'absent', ['eni_id']),
('attached', True, ['instance_id']),
('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses'])
])
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
vpc_connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
state = module.params.get("state")
if state == 'present':
eni = uniquely_find_eni(connection, module)
if eni is None:
subnet_id = module.params.get("subnet_id")
if subnet_id is None:
module.fail_json(msg="subnet_id is required when creating a new ENI")
vpc_id = _get_vpc_id(vpc_connection, module, subnet_id)
create_eni(connection, vpc_id, module)
else:
vpc_id = eni.vpc_id
modify_eni(connection, vpc_id, module, eni)
elif state == 'absent':
delete_eni(connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
diagramsoftware/account-financial-tools | currency_rate_update/model/company.py | 29 | 2418 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2009 CamptoCamp. All rights reserved.
# @author Nicolas Bessi
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class res_company(models.Model):
"""override company to add currency update"""
@api.multi
def _compute_multi_curr_enable(self):
"check if multi company currency is enabled"
company_currency = self.env['res.currency'].search([('company_id',
'!=', False)])
for company in self:
company.multi_company_currency_enable = \
1 if company_currency else 0
@api.one
def button_refresh_currency(self):
"""Refresh the currencies rates !!for all companies now"""
self.services_to_use.refresh_currency()
_inherit = "res.company"
# Activate the currency update
auto_currency_up = fields.Boolean(
string='Automatic Update',
help="Automatic update of the currencies for this company")
# Function field that allows to know the
# multi company currency implementation
multi_company_currency_enable = fields.Boolean(
string='Multi company currency', translate=True,
compute="_compute_multi_curr_enable",
help="When this option is unchecked it will allow users "
"to set a distinct currency updates on each company."
)
# List of services to fetch rates
services_to_use = fields.One2many(
'currency.rate.update.service',
'company_id',
string='Currency update services')
| agpl-3.0 |
safwanrahman/mozillians | vendor-local/lib/python/markdown/extensions/toc.py | 11 | 9491 | """
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.1+](http://packages.python.org/Markdown/)
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import Extension
from ..treeprocessors import Treeprocessor
from ..util import etree, parseBoolValue, AMP_SUBSTITUTE
from .headerid import slugify, unique, itertext, stashedHTML2text
import re
def order_toc_list(toc_list):
"""Given an unsorted list with errors and skips, return a nested one.
[{'level': 1}, {'level': 2}]
=>
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
A wrong list is also converted:
[{'level': 2}, {'level': 1}]
=>
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
"""
def build_correct(remaining_list, prev_elements=[{'level': 1000}]):
if not remaining_list:
return [], []
current = remaining_list.pop(0)
if not 'children' in current.keys():
current['children'] = []
if not prev_elements:
# This happens for instance with [8, 1, 1], ie. when some
# header level is outside a scope. We treat it as a
# top-level
next_elements, children = build_correct(remaining_list, [current])
current['children'].append(children)
return [current] + next_elements, []
prev_element = prev_elements.pop()
children = []
next_elements = []
# Is current part of the child list or next list?
if current['level'] > prev_element['level']:
#print "%d is a child of %d" % (current['level'], prev_element['level'])
prev_elements.append(prev_element)
prev_elements.append(current)
prev_element['children'].append(current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children += children2
next_elements += next_elements2
else:
#print "%d is ancestor of %d" % (current['level'], prev_element['level'])
if not prev_elements:
#print "No previous elements, so appending to the next set"
next_elements.append(current)
prev_elements = [current]
next_elements2, children2 = build_correct(remaining_list, prev_elements)
current['children'].extend(children2)
else:
#print "Previous elements, comparing to those first"
remaining_list.insert(0, current)
next_elements2, children2 = build_correct(remaining_list, prev_elements)
children.extend(children2)
next_elements += next_elements2
return next_elements, children
ordered_list, __ = build_correct(toc_list)
return ordered_list
class TocTreeprocessor(Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def add_anchor(self, c, elem_id): #@ReservedAssignment
anchor = etree.Element("a")
anchor.text = c.text
anchor.attrib["href"] = "#" + elem_id
anchor.attrib["class"] = "toclink"
c.text = ""
for elem in c.getchildren():
anchor.append(elem)
c.remove(elem)
c.append(anchor)
def add_permalink(self, c, elem_id):
permalink = etree.Element("a")
permalink.text = ("%spara;" % AMP_SUBSTITUTE
if self.use_permalinks is True else self.use_permalinks)
permalink.attrib["href"] = "#" + elem_id
permalink.attrib["class"] = "headerlink"
permalink.attrib["title"] = "Permanent link"
c.append(permalink)
def build_toc_etree(self, div, toc_list):
# Add title to the div
if self.config["title"]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"]
def build_etree_ul(toc_list, parent):
ul = etree.SubElement(parent, "ul")
for item in toc_list:
# List item link, to be inserted into the toc div
li = etree.SubElement(ul, "li")
link = etree.SubElement(li, "a")
link.text = item.get('name', '')
link.attrib["href"] = '#' + item.get('id', '')
if item['children']:
build_etree_ul(item['children'], li)
return ul
return build_etree_ul(toc_list, div)
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
header_rgx = re.compile("[Hh][123456]")
self.use_anchors = parseBoolValue(self.config["anchorlink"])
self.use_permalinks = parseBoolValue(self.config["permalink"], False)
if self.use_permalinks is None:
self.use_permalinks = self.config["permalink"]
# Get a list of id attributes
used_ids = set()
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.add(c.attrib["id"])
toc_list = []
marker_found = False
for (p, c) in self.iterparent(doc):
text = ''.join(itertext(c)).strip()
if not text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text and c.text.strip() == self.config["marker"] and \
not header_rgx.match(c.tag) and c.tag not in ['pre', 'code']:
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
marker_found = True
if header_rgx.match(c.tag):
# Do not override pre-existing ids
if not "id" in c.attrib:
elem_id = stashedHTML2text(text, self.markdown)
elem_id = unique(self.config["slugify"](elem_id, '-'), used_ids)
c.attrib["id"] = elem_id
else:
elem_id = c.attrib["id"]
tag_level = int(c.tag[-1])
toc_list.append({'level': tag_level,
'id': elem_id,
'name': text})
if self.use_anchors:
self.add_anchor(c, elem_id)
if self.use_permalinks:
self.add_permalink(c, elem_id)
toc_list_nested = order_toc_list(toc_list)
self.build_toc_etree(div, toc_list_nested)
prettify = self.markdown.treeprocessors.get('prettify')
if prettify: prettify.run(div)
if not marker_found:
# serialize and attach to markdown instance.
toc = self.markdown.serializer(div)
for pp in self.markdown.postprocessors.values():
toc = pp.run(toc)
self.markdown.toc = toc
class TocExtension(Extension):
TreeProcessorClass = TocTreeprocessor
def __init__(self, configs=[]):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [slugify,
"Function to generate anchors based on header text-"
"Defaults to the headerid ext's slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"],
"permalink" : [0,
"1 or link text if a Sphinx-style permalink should be added",
"Defaults to 0"]
}
for key, value in configs:
self.setConfig(key, value)
def extendMarkdown(self, md, md_globals):
tocext = self.TreeProcessorClass(md)
tocext.config = self.getConfigs()
# Headerid ext is set to '>prettify'. With this set to '_end',
# it should always come after headerid ext (and honor ids assinged
# by the header id extension) if both are used. Same goes for
# attr_list extension. This must come last because we don't want
# to redefine ids after toc is created. But we do want toc prettified.
md.treeprocessors.add("toc", tocext, "_end")
def makeExtension(configs={}):
return TocExtension(configs=configs)
| bsd-3-clause |
ZG-Tennis/django-cropduster | cropduster/utils.py | 2 | 2261 | from PIL import Image
from decimal import Decimal
def aspect_ratio(width, height):
""" Defines aspect ratio from two sizes with consistent rounding method """
if not height or not width:
return 1
else:
return Decimal(str(round(float(width)/float(height), 2)))
def rescale(img, width=0, height=0, auto_crop=True, **kwargs):
"""
Rescale the given image. If one size is not given, image is scaled down at current aspect ratio
img -- a PIL image object
Auto-crop option does a dumb crop that chops the image to the needed size
"""
if width <= 0:
width = float(img.size[0] * height) /float(img.size[1])
if height <= 0:
height = float(img.size[1] * width) /float(img.size[0])
max_width = width
max_height = height
src_width, src_height = img.size
src_ratio = float(src_width) / float(src_height)
dst_width, dst_height = max_width, max_height
dst_ratio = float(dst_width) / float(dst_height)
if auto_crop:
if dst_ratio < src_ratio:
crop_height = src_height
crop_width = crop_height * dst_ratio
x_offset = float(src_width - crop_width) / 2
y_offset = 0
else:
crop_width = src_width
crop_height = crop_width / dst_ratio
x_offset = 0
y_offset = float(src_height - crop_height) / 3
img = img.crop((
int(x_offset),
int(y_offset),
int(x_offset + crop_width),
int(y_offset + crop_height)
))
img = img.resize((int(dst_width), int(dst_height)), Image.ANTIALIAS)
# if not cropping, don't squish, use w/h as max values to resize on
else:
if (width / src_ratio) > height:
# height larger than intended
dst_width = width
dst_height = width / src_ratio
else:
# width larger than intended
dst_width = src_ratio * height
dst_height = height
img = img.resize((int(dst_width), int(dst_height)), Image.ANTIALIAS)
img = img.crop([0, 0, int(width), int(height)])
return img
def create_cropped_image(path=None, x=0, y=0, width=0, height=0):
"""
Crop image given a starting (x, y) position and a width and height of the cropped area
"""
if path is None:
raise ValueError("A path must be specified")
img = Image.open(path)
img.copy()
img.load()
img = img.crop((x, y, x + width, y + height))
img.load()
return img
| bsd-2-clause |
bsmr-ansible/ansible-modules-core | cloud/amazon/ec2_metric_alarm.py | 53 | 10776 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms.
- Metrics you wish to alarm on must already exist.
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
options: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
options: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
if getattr(alarm, attr) != action:
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError as e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
hsuchie4/TACTIC | 3rd_party/CherryPy/cherrypy/lib/sessions.py | 6 | 25932 | """Session implementation for CherryPy.
We use cherrypy.request to store some convenient variables as
well as data about the session for the current request. Instead of
polluting cherrypy.request we use a Session object bound to
cherrypy.session to store these variables.
"""
import datetime
import os
try:
import cPickle as pickle
except ImportError:
import pickle
import random
try:
# Python 2.5+
from hashlib import sha1 as sha
except ImportError:
from sha import new as sha
import time
import threading
import types
from warnings import warn
import cherrypy
from cherrypy.lib import httputil
missing = object()
class Session(object):
"""A CherryPy dict-like Session object (one per request)."""
__metaclass__ = cherrypy._AttributeDocstrings
_id = None
id_observers = None
id_observers__doc = "A list of callbacks to which to pass new id's."
id__doc = "The current session ID."
def _get_id(self):
return self._id
def _set_id(self, value):
self._id = value
for o in self.id_observers:
o(value)
id = property(_get_id, _set_id, doc=id__doc)
timeout = 60
timeout__doc = "Number of minutes after which to delete session data."
locked = False
locked__doc = """
If True, this session instance has exclusive read/write access
to session data."""
loaded = False
loaded__doc = """
If True, data has been retrieved from storage. This should happen
automatically on the first attempt to access session data."""
clean_thread = None
clean_thread__doc = "Class-level Monitor which calls self.clean_up."
clean_freq = 5
clean_freq__doc = "The poll rate for expired session cleanup in minutes."
originalid = None
originalid__doc = "The session id passed by the client. May be missing or unsafe."
missing = False
missing__doc = "True if the session requested by the client did not exist."
regenerated = False
regenerated__doc = """
True if the application called session.regenerate(). This is not set by
internal calls to regenerate the session id."""
debug=False
def __init__(self, id=None, **kwargs):
self.id_observers = []
self._data = {}
for k, v in kwargs.items():
setattr(self, k, v)
self.originalid = id
self.missing = False
if id is None:
if self.debug:
cherrypy.log('No id given; making a new one', 'TOOLS.SESSIONS')
self._regenerate()
else:
self.id = id
if not self._exists():
if self.debug:
cherrypy.log('Expired or malicious session %r; '
'making a new one' % id, 'TOOLS.SESSIONS')
# Expired or malicious session. Make a new one.
# See http://www.cherrypy.org/ticket/709.
self.id = None
self.missing = True
self._regenerate()
def regenerate(self):
"""Replace the current session (with a new id)."""
self.regenerated = True
self._regenerate()
def _regenerate(self):
if self.id is not None:
self.delete()
old_session_was_locked = self.locked
if old_session_was_locked:
self.release_lock()
self.id = None
while self.id is None:
self.id = self.generate_id()
# Assert that the generated id is not already stored.
if self._exists():
self.id = None
if old_session_was_locked:
self.acquire_lock()
def clean_up(self):
"""Clean up expired sessions."""
pass
try:
os.urandom(20)
except (AttributeError, NotImplementedError):
# os.urandom not available until Python 2.4. Fall back to random.random.
def generate_id(self):
"""Return a new session id."""
return sha('%s' % random.random()).hexdigest()
else:
def generate_id(self):
"""Return a new session id."""
return os.urandom(20).encode('hex')
def save(self):
"""Save session data."""
try:
# If session data has never been loaded then it's never been
# accessed: no need to save it
if self.loaded:
t = datetime.timedelta(seconds = self.timeout * 60)
expiration_time = datetime.datetime.now() + t
if self.debug:
cherrypy.log('Saving with expiry %s' % expiration_time,
'TOOLS.SESSIONS')
self._save(expiration_time)
finally:
if self.locked:
# Always release the lock if the user didn't release it
self.release_lock()
def load(self):
"""Copy stored session data into this session instance."""
data = self._load()
# data is either None or a tuple (session_data, expiration_time)
if data is None or data[1] < datetime.datetime.now():
if self.debug:
cherrypy.log('Expired session, flushing data', 'TOOLS.SESSIONS')
self._data = {}
else:
self._data = data[0]
self.loaded = True
# Stick the clean_thread in the class, not the instance.
# The instances are created and destroyed per-request.
cls = self.__class__
if self.clean_freq and not cls.clean_thread:
# clean_up is in instancemethod and not a classmethod,
# so that tool config can be accessed inside the method.
t = cherrypy.process.plugins.Monitor(
cherrypy.engine, self.clean_up, self.clean_freq * 60,
name='Session cleanup')
t.subscribe()
cls.clean_thread = t
t.start()
def delete(self):
"""Delete stored session data."""
self._delete()
def __getitem__(self, key):
if not self.loaded: self.load()
return self._data[key]
def __setitem__(self, key, value):
if not self.loaded: self.load()
self._data[key] = value
def __delitem__(self, key):
if not self.loaded: self.load()
del self._data[key]
def pop(self, key, default=missing):
"""Remove the specified key and return the corresponding value.
If key is not found, default is returned if given,
otherwise KeyError is raised.
"""
if not self.loaded: self.load()
if default is missing:
return self._data.pop(key)
else:
return self._data.pop(key, default)
def __contains__(self, key):
if not self.loaded: self.load()
return key in self._data
def has_key(self, key):
"""D.has_key(k) -> True if D has a key k, else False."""
if not self.loaded: self.load()
return key in self._data
def get(self, key, default=None):
"""D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None."""
if not self.loaded: self.load()
return self._data.get(key, default)
def update(self, d):
"""D.update(E) -> None. Update D from E: for k in E: D[k] = E[k]."""
if not self.loaded: self.load()
self._data.update(d)
def setdefault(self, key, default=None):
"""D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D."""
if not self.loaded: self.load()
return self._data.setdefault(key, default)
def clear(self):
"""D.clear() -> None. Remove all items from D."""
if not self.loaded: self.load()
self._data.clear()
def keys(self):
"""D.keys() -> list of D's keys."""
if not self.loaded: self.load()
return self._data.keys()
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples."""
if not self.loaded: self.load()
return self._data.items()
def values(self):
"""D.values() -> list of D's values."""
if not self.loaded: self.load()
return self._data.values()
class RamSession(Session):
# Class-level objects. Don't rebind these!
cache = {}
locks = {}
def clean_up(self):
"""Clean up expired sessions."""
now = datetime.datetime.now()
for id, (data, expiration_time) in self.cache.items():
if expiration_time <= now:
try:
del self.cache[id]
except KeyError:
pass
try:
del self.locks[id]
except KeyError:
pass
def _exists(self):
return self.id in self.cache
def _load(self):
return self.cache.get(self.id)
def _save(self, expiration_time):
self.cache[self.id] = (self._data, expiration_time)
def _delete(self):
self.cache.pop(self.id, None)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
return len(self.cache)
class FileSession(Session):
"""Implementation of the File backend for sessions
storage_path: the folder where session data will be saved. Each session
will be saved as pickle.dump(data, expiration_time) in its own file;
the filename will be self.SESSION_PREFIX + self.id.
"""
SESSION_PREFIX = 'session-'
LOCK_SUFFIX = '.lock'
pickle_protocol = pickle.HIGHEST_PROTOCOL
def __init__(self, id=None, **kwargs):
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
Session.__init__(self, id=id, **kwargs)
def setup(cls, **kwargs):
"""Set up the storage system for file-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
# The 'storage_path' arg is required for file-based sessions.
kwargs['storage_path'] = os.path.abspath(kwargs['storage_path'])
for k, v in kwargs.items():
setattr(cls, k, v)
# Warn if any lock files exist at startup.
lockfiles = [fname for fname in os.listdir(cls.storage_path)
if (fname.startswith(cls.SESSION_PREFIX)
and fname.endswith(cls.LOCK_SUFFIX))]
if lockfiles:
plural = ('', 's')[len(lockfiles) > 1]
warn("%s session lockfile%s found at startup. If you are "
"only running one process, then you may need to "
"manually delete the lockfiles found at %r."
% (len(lockfiles), plural, cls.storage_path))
setup = classmethod(setup)
def _get_file_path(self):
f = os.path.join(self.storage_path, self.SESSION_PREFIX + self.id)
if not os.path.abspath(f).startswith(self.storage_path):
raise cherrypy.HTTPError(400, "Invalid session id in cookie.")
return f
def _exists(self):
path = self._get_file_path()
return os.path.exists(path)
def _load(self, path=None):
if path is None:
path = self._get_file_path()
try:
f = open(path, "rb")
try:
return pickle.load(f)
finally:
f.close()
except (IOError, EOFError):
return None
def _save(self, expiration_time):
f = open(self._get_file_path(), "wb")
try:
pickle.dump((self._data, expiration_time), f, self.pickle_protocol)
finally:
f.close()
def _delete(self):
try:
os.unlink(self._get_file_path())
except OSError:
pass
def acquire_lock(self, path=None):
"""Acquire an exclusive lock on the currently-loaded session data."""
if path is None:
path = self._get_file_path()
path += self.LOCK_SUFFIX
while True:
try:
lockfd = os.open(path, os.O_CREAT|os.O_WRONLY|os.O_EXCL)
except OSError:
time.sleep(0.1)
else:
os.close(lockfd)
break
self.locked = True
def release_lock(self, path=None):
"""Release the lock on the currently-loaded session data."""
if path is None:
path = self._get_file_path()
os.unlink(path + self.LOCK_SUFFIX)
self.locked = False
def clean_up(self):
"""Clean up expired sessions."""
now = datetime.datetime.now()
# Iterate over all session files in self.storage_path
for fname in os.listdir(self.storage_path):
if (fname.startswith(self.SESSION_PREFIX)
and not fname.endswith(self.LOCK_SUFFIX)):
# We have a session file: lock and load it and check
# if it's expired. If it fails, nevermind.
path = os.path.join(self.storage_path, fname)
self.acquire_lock(path)
try:
contents = self._load(path)
# _load returns None on IOError
if contents is not None:
data, expiration_time = contents
if expiration_time < now:
# Session expired: deleting it
os.unlink(path)
finally:
self.release_lock(path)
def __len__(self):
"""Return the number of active sessions."""
return len([fname for fname in os.listdir(self.storage_path)
if (fname.startswith(self.SESSION_PREFIX)
and not fname.endswith(self.LOCK_SUFFIX))])
class PostgresqlSession(Session):
""" Implementation of the PostgreSQL backend for sessions. It assumes
a table like this:
create table session (
id varchar(40),
data text,
expiration_time timestamp
)
You must provide your own get_db function.
"""
pickle_protocol = pickle.HIGHEST_PROTOCOL
def __init__(self, id=None, **kwargs):
Session.__init__(self, id, **kwargs)
self.cursor = self.db.cursor()
def setup(cls, **kwargs):
"""Set up the storage system for Postgres-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
for k, v in kwargs.items():
setattr(cls, k, v)
self.db = self.get_db()
setup = classmethod(setup)
def __del__(self):
if self.cursor:
self.cursor.close()
self.db.commit()
def _exists(self):
# Select session data from table
self.cursor.execute('select data, expiration_time from session '
'where id=%s', (self.id,))
rows = self.cursor.fetchall()
return bool(rows)
def _load(self):
# Select session data from table
self.cursor.execute('select data, expiration_time from session '
'where id=%s', (self.id,))
rows = self.cursor.fetchall()
if not rows:
return None
pickled_data, expiration_time = rows[0]
data = pickle.loads(pickled_data)
return data, expiration_time
def _save(self, expiration_time):
pickled_data = pickle.dumps(self._data, self.pickle_protocol)
self.cursor.execute('update session set data = %s, '
'expiration_time = %s where id = %s',
(pickled_data, expiration_time, self.id))
def _delete(self):
self.cursor.execute('delete from session where id=%s', (self.id,))
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
# We use the "for update" clause to lock the row
self.locked = True
self.cursor.execute('select id from session where id=%s for update',
(self.id,))
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
# We just close the cursor and that will remove the lock
# introduced by the "for update" clause
self.cursor.close()
self.locked = False
def clean_up(self):
"""Clean up expired sessions."""
self.cursor.execute('delete from session where expiration_time < %s',
(datetime.datetime.now(),))
class MemcachedSession(Session):
# The most popular memcached client for Python isn't thread-safe.
# Wrap all .get and .set operations in a single lock.
mc_lock = threading.RLock()
# This is a seperate set of locks per session id.
locks = {}
servers = ['127.0.0.1:11211']
def setup(cls, **kwargs):
"""Set up the storage system for memcached-based sessions.
This should only be called once per process; this will be done
automatically when using sessions.init (as the built-in Tool does).
"""
for k, v in kwargs.items():
setattr(cls, k, v)
import memcache
cls.cache = memcache.Client(cls.servers)
setup = classmethod(setup)
def _exists(self):
self.mc_lock.acquire()
try:
return bool(self.cache.get(self.id))
finally:
self.mc_lock.release()
def _load(self):
self.mc_lock.acquire()
try:
return self.cache.get(self.id)
finally:
self.mc_lock.release()
def _save(self, expiration_time):
# Send the expiration time as "Unix time" (seconds since 1/1/1970)
td = int(time.mktime(expiration_time.timetuple()))
self.mc_lock.acquire()
try:
if not self.cache.set(self.id, (self._data, expiration_time), td):
raise AssertionError("Session data for id %r not set." % self.id)
finally:
self.mc_lock.release()
def _delete(self):
self.cache.delete(self.id)
def acquire_lock(self):
"""Acquire an exclusive lock on the currently-loaded session data."""
self.locked = True
self.locks.setdefault(self.id, threading.RLock()).acquire()
def release_lock(self):
"""Release the lock on the currently-loaded session data."""
self.locks[self.id].release()
self.locked = False
def __len__(self):
"""Return the number of active sessions."""
raise NotImplementedError
# Hook functions (for CherryPy tools)
def save():
"""Save any changed session data."""
if not hasattr(cherrypy.serving, "session"):
return
request = cherrypy.serving.request
response = cherrypy.serving.response
# Guard against running twice
if hasattr(request, "_sessionsaved"):
return
request._sessionsaved = True
if response.stream:
# If the body is being streamed, we have to save the data
# *after* the response has been written out
request.hooks.attach('on_end_request', cherrypy.session.save)
else:
# If the body is not being streamed, we save the data now
# (so we can release the lock).
if isinstance(response.body, types.GeneratorType):
response.collapse_body()
cherrypy.session.save()
save.failsafe = True
def close():
"""Close the session object for this request."""
sess = getattr(cherrypy.serving, "session", None)
if getattr(sess, "locked", False):
# If the session is still locked we release the lock
sess.release_lock()
close.failsafe = True
close.priority = 90
def init(storage_type='ram', path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False, clean_freq=5,
persistent=True, debug=False, **kwargs):
"""Initialize session object (using cookies).
storage_type: one of 'ram', 'file', 'postgresql'. This will be used
to look up the corresponding class in cherrypy.lib.sessions
globals. For example, 'file' will use the FileSession class.
path: the 'path' value to stick in the response cookie metadata.
path_header: if 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name: the name of the cookie.
timeout: the expiration timeout (in minutes) for the stored session data.
If 'persistent' is True (the default), this is also the timeout
for the cookie.
domain: the cookie domain.
secure: if False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
clean_freq (minutes): the poll rate for expired session cleanup.
persistent: if True (the default), the 'timeout' argument will be used
to expire the cookie. If False, the cookie will not have an expiry,
and the cookie will be a "session cookie" which expires when the
browser is closed.
Any additional kwargs will be bound to the new Session instance,
and may be specific to the storage type. See the subclass of Session
you're using for more information.
"""
request = cherrypy.serving.request
# Guard against running twice
if hasattr(request, "_session_init_flag"):
return
request._session_init_flag = True
# Check if request came with a session ID
id = None
if name in request.cookie:
id = request.cookie[name].value
if debug:
cherrypy.log('ID obtained from request.cookie: %r' % id,
'TOOLS.SESSIONS')
# Find the storage class and call setup (first time only).
storage_class = storage_type.title() + 'Session'
storage_class = globals()[storage_class]
if not hasattr(cherrypy, "session"):
if hasattr(storage_class, "setup"):
storage_class.setup(**kwargs)
# Create and attach a new Session instance to cherrypy.serving.
# It will possess a reference to (and lock, and lazily load)
# the requested session data.
kwargs['timeout'] = timeout
kwargs['clean_freq'] = clean_freq
cherrypy.serving.session = sess = storage_class(id, **kwargs)
sess.debug = debug
def update_cookie(id):
"""Update the cookie every time the session id changes."""
cherrypy.serving.response.cookie[name] = id
sess.id_observers.append(update_cookie)
# Create cherrypy.session which will proxy to cherrypy.serving.session
if not hasattr(cherrypy, "session"):
cherrypy.session = cherrypy._ThreadLocalProxy('session')
if persistent:
cookie_timeout = timeout
else:
# See http://support.microsoft.com/kb/223799/EN-US/
# and http://support.mozilla.com/en-US/kb/Cookies
cookie_timeout = None
set_response_cookie(path=path, path_header=path_header, name=name,
timeout=cookie_timeout, domain=domain, secure=secure)
def set_response_cookie(path=None, path_header=None, name='session_id',
timeout=60, domain=None, secure=False):
"""Set a response cookie for the client.
path: the 'path' value to stick in the response cookie metadata.
path_header: if 'path' is None (the default), then the response
cookie 'path' will be pulled from request.headers[path_header].
name: the name of the cookie.
timeout: the expiration timeout for the cookie. If 0 or other boolean
False, no 'expires' param will be set, and the cookie will be a
"session cookie" which expires when the browser is closed.
domain: the cookie domain.
secure: if False (the default) the cookie 'secure' value will not
be set. If True, the cookie 'secure' value will be set (to 1).
"""
# Set response cookie
cookie = cherrypy.serving.response.cookie
cookie[name] = cherrypy.serving.session.id
cookie[name]['path'] = (path or cherrypy.serving.request.headers.get(path_header)
or '/')
# We'd like to use the "max-age" param as indicated in
# http://www.faqs.org/rfcs/rfc2109.html but IE doesn't
# save it to disk and the session is lost if people close
# the browser. So we have to use the old "expires" ... sigh ...
## cookie[name]['max-age'] = timeout * 60
if timeout:
e = time.time() + (timeout * 60)
cookie[name]['expires'] = httputil.HTTPDate(e)
if domain is not None:
cookie[name]['domain'] = domain
if secure:
cookie[name]['secure'] = 1
def expire():
"""Expire the current session cookie."""
name = cherrypy.serving.request.config.get('tools.sessions.name', 'session_id')
one_year = 60 * 60 * 24 * 365
e = time.time() - one_year
cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e)
| epl-1.0 |
Axam/nsx-web | fuel_agent/fuel_agent/tests/test_utils.py | 2 | 6073 | # Copyright 2011 Justin Santa Barbara
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import tempfile
import testtools
import mock
import stevedore
from fuel_agent import errors
from fuel_agent.openstack.common import processutils
from fuel_agent.utils import utils
class ExecuteTestCase(testtools.TestCase):
"""This class is partly based on the same class in openstack/ironic."""
def setUp(self):
super(ExecuteTestCase, self).setUp()
fake_driver = stevedore.extension.Extension('fake_driver', None, None,
'fake_obj')
self.drv_manager = stevedore.driver.DriverManager.make_test_instance(
fake_driver)
def test_parse_unit(self):
self.assertEqual(utils.parse_unit('1.00m', 'm', ceil=True), 1)
self.assertEqual(utils.parse_unit('1.00m', 'm', ceil=False), 1)
self.assertEqual(utils.parse_unit('1.49m', 'm', ceil=True), 2)
self.assertEqual(utils.parse_unit('1.49m', 'm', ceil=False), 1)
self.assertEqual(utils.parse_unit('1.51m', 'm', ceil=True), 2)
self.assertEqual(utils.parse_unit('1.51m', 'm', ceil=False), 1)
self.assertRaises(ValueError, utils.parse_unit, '1.00m', 'MiB')
self.assertRaises(ValueError, utils.parse_unit, '', 'MiB')
def test_B2MiB(self):
self.assertEqual(utils.B2MiB(1048575, ceil=False), 0)
self.assertEqual(utils.B2MiB(1048576, ceil=False), 1)
self.assertEqual(utils.B2MiB(1048575, ceil=True), 1)
self.assertEqual(utils.B2MiB(1048576, ceil=True), 1)
self.assertEqual(utils.B2MiB(1048577, ceil=True), 2)
def test_retry_on_failure(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If stdin fails to get passed during one of the runs, make a note.
if ! grep -q foo
then
echo 'failure' > "$1"
fi
# If stdin has failed to get passed during this or a previous run, exit early.
if grep failure "$1"
then
exit 1
fi
runs="$(cat $1)"
if [ -z "$runs" ]
then
runs=0
fi
runs=$(($runs + 1))
echo $runs > "$1"
exit 1
''')
fp.close()
os.chmod(tmpfilename, 0o755)
self.assertRaises(processutils.ProcessExecutionError,
utils.execute,
tmpfilename, tmpfilename2, attempts=10,
process_input='foo',
delay_on_retry=False)
fp = open(tmpfilename2, 'r')
runs = fp.read()
fp.close()
self.assertNotEqual(runs.strip(), 'failure', 'stdin did not '
'always get passed '
'correctly')
runs = int(runs.strip())
self.assertEqual(10, runs,
'Ran %d times instead of 10.' % (runs,))
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
def test_unknown_kwargs_raises_error(self):
self.assertRaises(processutils.UnknownArgumentError,
utils.execute,
'/usr/bin/env', 'true',
this_is_not_a_valid_kwarg=True)
def test_check_exit_code_boolean(self):
utils.execute('/usr/bin/env', 'false', check_exit_code=False)
self.assertRaises(processutils.ProcessExecutionError,
utils.execute,
'/usr/bin/env', 'false', check_exit_code=True)
def test_no_retry_on_success(self):
fd, tmpfilename = tempfile.mkstemp()
_, tmpfilename2 = tempfile.mkstemp()
try:
fp = os.fdopen(fd, 'w+')
fp.write('''#!/bin/sh
# If we've already run, bail out.
grep -q foo "$1" && exit 1
# Mark that we've run before.
echo foo > "$1"
# Check that stdin gets passed correctly.
grep foo
''')
fp.close()
os.chmod(tmpfilename, 0o755)
utils.execute(tmpfilename,
tmpfilename2,
process_input='foo',
attempts=2)
finally:
os.unlink(tmpfilename)
os.unlink(tmpfilename2)
@mock.patch('stevedore.driver.DriverManager')
def test_get_driver(self, mock_drv_manager):
mock_drv_manager.return_value = self.drv_manager
self.assertEqual('fake_obj', utils.get_driver('fake_driver'))
@mock.patch('jinja2.Environment')
@mock.patch('jinja2.FileSystemLoader')
@mock.patch('six.moves.builtins.open')
def test_render_and_save_fail(self, mock_open, mock_j_lo, mock_j_env):
mock_open.side_effect = Exception('foo')
self.assertRaises(errors.TemplateWriteError, utils.render_and_save,
'fake_dir', 'fake_tmpl_name', 'fake_data',
'fake_file_name')
@mock.patch('jinja2.Environment')
@mock.patch('jinja2.FileSystemLoader')
@mock.patch('six.moves.builtins.open')
def test_render_and_save_ok(self, mock_open, mock_j_lo, mock_j_env):
mock_render = mock.Mock()
mock_render.render.return_value = 'fake_data'
mock_j_env.get_template.return_value = mock_render
utils.render_and_save('fake_dir', 'fake_tmpl_name', 'fake_data',
'fake_file_name')
mock_open.assert_called_once_with('fake_file_name', 'w')
| apache-2.0 |
ryfeus/lambda-packs | Tensorflow/source/tensorflow/contrib/memory_stats/__init__.py | 16 | 1114 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for memory statistics.
@@BytesInUse
@@BytesLimit
@@MaxBytesInUse
"""
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import BytesInUse
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import BytesLimit
from tensorflow.contrib.memory_stats.python.ops.memory_stats_ops import MaxBytesInUse
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| mit |
DeBortoliWines/Bika-LIMS | bika/lims/content/artemplate.py | 2 | 8365 | """
AnalysisRequests often use the same configurations.
ARTemplate includes all AR fields, including preset AnalysisProfile
"""
from AccessControl import ClassSecurityInfo
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.CMFCore.permissions import View, ModifyPortalContent
from Products.ATExtensions.field.records import RecordsField
from Products.CMFCore.utils import getToolByName
from bika.lims import PMF, bikaMessageFactory as _
from bika.lims.browser.widgets import RecordsWidget as BikaRecordsWidget
from bika.lims.browser.widgets import ARTemplatePartitionsWidget
from bika.lims.browser.widgets import ARTemplateAnalysesWidget
from bika.lims.browser.widgets import RecordsWidget
from bika.lims.browser.widgets import ReferenceWidget
from bika.lims.config import PROJECTNAME
from bika.lims.content.bikaschema import BikaSchema
from zope.interface import Interface, implements
import sys
schema = BikaSchema.copy() + Schema((
## SamplePoint and SampleType references are managed with
## accessors and mutators below to get/set a string value
## (the Title of the object), but still store a normal Reference.
## Form autocomplete widgets can then work with the Titles.
ReferenceField('SamplePoint',
vocabulary_display_path_bound = sys.maxint,
allowed_types = ('SamplePoint',),
relationship = 'ARTemplateSamplePoint',
referenceClass = HoldingReference,
accessor = 'getSamplePoint',
edit_accessor = 'getSamplePoint',
mutator = 'setSamplePoint',
widget=ReferenceWidget(
label = _("Sample Point"),
description = _("Location where sample was taken"),
visible={'edit': 'visible', 'view': 'visible', 'add': 'visible',
'secondary': 'invisible'},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
),
),
ComputedField(
"SamplePointUID",
expression="context.Schema()['SamplePoint'].get(context) and context.Schema()['SamplePoint'].get(context).UID() or ''",
widget=ComputedWidget(
visible=False,
),
),
ReferenceField('SampleType',
vocabulary_display_path_bound = sys.maxint,
allowed_types = ('SampleType',),
relationship = 'ARTemplateSampleType',
referenceClass = HoldingReference,
accessor = 'getSampleType',
edit_accessor = 'getSampleType',
mutator = 'setSampleType',
widget=ReferenceWidget(
label = _("Sample Type"),
description = _("Create a new sample of this type"),
visible={'edit': 'visible', 'view': 'visible', 'add': 'visible',
'secondary': 'invisible'},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
),
),
ComputedField(
"SampleTypeUID",
expression="context.Schema()['SampleType'].get(context) and context.Schema()['SampleType'].get(context).UID() or ''",
widget=ComputedWidget(
visible=False,
),
),
BooleanField('ReportDryMatter',
default = False,
widget = BooleanWidget(
label = _("Report as Dry Matter"),
description = _("These results can be reported as dry matter"),
),
),
TextField('Remarks',
searchable = True,
default_content_type = 'text/plain',
allowed_content_types= ('text/plain', ),
default_output_type="text/plain",
widget = TextAreaWidget(
macro = "bika_widgets/remarks",
label = _("Remarks"),
append_only = True,
),
),
RecordsField('Partitions',
schemata = 'Sample Partitions',
required = 0,
type = 'artemplate_parts',
subfields = ('part_id',
'Container',
'Preservation',
'container_uid',
'preservation_uid'),
subfield_labels = {'part_id': _('Partition'),
'Container': _('Container'),
'Preservation': _('Preservation')},
subfield_sizes = {'part_id': 15,
'Container': 35,
'Preservation': 35},
subfield_hidden = {'preservation_uid': True,
'container_uid': True},
default = [{'part_id':'part-1',
'Container':'',
'Preservation':'',
'container_uid':'',
'preservation_uid':''}],
widget=ARTemplatePartitionsWidget(
label = _("Sample Partitions"),
description = _("Configure the sample partitions and preservations " + \
"for this template. Assign analyses to the different " + \
"partitions on the template's Analyses tab"),
combogrid_options={
'Container': {
'colModel': [
{'columnName':'container_uid', 'hidden':True},
{'columnName':'Container', 'width':'30', 'label':_('Container')},
{'columnName':'Description', 'width':'70', 'label':_('Description')}],
'url': 'getcontainers',
'showOn': True,
'width': '550px'
},
'Preservation': {
'colModel': [
{'columnName':'preservation_uid', 'hidden':True},
{'columnName':'Preservation', 'width':'30', 'label':_('Preservation')},
{'columnName':'Description', 'width':'70', 'label':_('Description')}],
'url': 'getpreservations',
'showOn': True,
'width': '550px'
},
},
),
),
ReferenceField('AnalysisProfile',
schemata = 'Analyses',
required = 0,
multiValued = 0,
allowed_types = ('AnalysisProfile',),
relationship = 'ARTemplateAnalysisProfile',
widget=ReferenceWidget(
label = _("Analysis Profile"),
description =_("The Analysis Profile selection for this template"),
visible={'edit': 'visible', 'view': 'visible', 'add': 'visible',
'secondary': 'invisible'},
catalog_name='bika_setup_catalog',
base_query={'inactive_state': 'active'},
showOn=True,
),
),
RecordsField('Analyses',
schemata = 'Analyses',
required = 0,
type = 'artemplate_analyses',
subfields = ('service_uid', 'partition'),
subfield_labels = {'service_uid': _('Title'),
'partition': _('Partition')},
default = [],
widget = ARTemplateAnalysesWidget(
label = _("Analyses"),
description=_("Select analyses to include in this template"),
)
),
),
)
schema['description'].widget.visible = True
schema['title'].widget.visible = True
schema['title'].validators = ('uniquefieldvalidator',)
# Update the validation layer after change the validator in runtime
schema['title']._validationLayer()
class ARTemplate(BaseContent):
security = ClassSecurityInfo()
schema = schema
displayContentsTab = False
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
security.declarePublic('AnalysisProfiles')
def AnalysisProfiles(self, instance=None):
instance = instance or self
bsc = getToolByName(instance, 'bika_setup_catalog')
items = []
for p in bsc(portal_type='AnalysisProfile',
inactive_state='active',
sort_on = 'sortable_title'):
p = p.getObject()
title = p.Title()
items.append((p.UID(), title))
items = [['','']] + list(items)
return DisplayList(items)
def getClientUID(self):
return self.aq_parent.UID();
registerType(ARTemplate, PROJECTNAME)
| agpl-3.0 |
appliedx/edx-platform | lms/djangoapps/verify_student/migrations/0003_auto__add_field_softwaresecurephotoverification_display.py | 114 | 6778 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SoftwareSecurePhotoVerification.display'
db.add_column('verify_student_softwaresecurephotoverification', 'display',
self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SoftwareSecurePhotoVerification.display'
db.delete_column('verify_student_softwaresecurephotoverification', 'display')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'reverification.midcoursereverificationwindow': {
'Meta': {'object_name': 'MidcourseReverificationWindow'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'verify_student.softwaresecurephotoverification': {
'Meta': {'ordering': "['-created_at']", 'object_name': 'SoftwareSecurePhotoVerification'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'display': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'error_msg': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'face_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_image_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'photo_id_key': ('django.db.models.fields.TextField', [], {'max_length': '1024'}),
'receipt_id': ('django.db.models.fields.CharField', [], {'default': "'<function uuid4 at 0x3176410>'", 'max_length': '255', 'db_index': 'True'}),
'reviewing_service': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'reviewing_user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'photo_verifications_reviewed'", 'null': 'True', 'to': "orm['auth.User']"}),
'status': ('model_utils.fields.StatusField', [], {'default': "'created'", 'max_length': '100', u'no_check_for_status': 'True'}),
'status_changed': ('model_utils.fields.MonitorField', [], {'default': 'datetime.datetime.now', u'monitor': "u'status'"}),
'submitted_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'window': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['reverification.MidcourseReverificationWindow']", 'null': 'True'})
}
}
complete_apps = ['verify_student']
| agpl-3.0 |
bspink/django | tests/transactions/tests.py | 239 | 19163 | from __future__ import unicode_literals
import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (
DatabaseError, Error, IntegrityError, OperationalError, connection,
transaction,
)
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils import six
from .models import Reporter
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
other_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.create(id=1, first_name="Tintin")
other_thread_ready.set()
# We cannot synchronize the two threads with an event here
# because the main thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see below for 1)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
other_thread_ready.wait()
with six.assertRaisesRegex(self, OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
# 1) This line locks... (see above for 2)
Reporter.objects.create(id=1, first_name="Tintin")
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
# Regression test for #20028
class Callable(object):
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
# Regression test for #23074
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with six.assertRaisesRegex(self, Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
| bsd-3-clause |
achiku/jungle | jungle/ec2.py | 1 | 8325 | # -*- coding: utf-8 -*-
import subprocess
import sys
import botocore
import click
from jungle.session import create_session
def format_output(instances, flag):
"""return formatted string for instance"""
out = []
line_format = '{0}\t{1}\t{2}\t{3}\t{4}'
name_len = _get_max_name_len(instances) + 3
if flag:
line_format = '{0:<' + str(name_len) + '}{1:<16}{2:<21}{3:<16}{4:<16}'
for i in instances:
tag_name = get_tag_value(i.tags, 'Name')
out.append(line_format.format(
tag_name, i.state['Name'], i.id, i.private_ip_address, str(i.public_ip_address)))
return out
def _get_instance_ip_address(instance, use_private_ip=False):
if use_private_ip:
return instance.private_ip_address
elif instance.public_ip_address is not None:
return instance.public_ip_address
else:
click.echo("Public IP address not set. Attempting to use the private IP address.")
return instance.private_ip_address
def _get_max_name_len(instances):
"""get max length of Tag:Name"""
# FIXME: ec2.instanceCollection doesn't have __len__
for i in instances:
return max([len(get_tag_value(i.tags, 'Name')) for i in instances])
return 0
def get_tag_value(x, key):
"""Get a value from tag"""
if x is None:
return ''
result = [y['Value'] for y in x if y['Key'] == key]
if result:
return result[0]
return ''
@click.group()
@click.option('--profile-name', '-P', default=None, help='AWS profile name')
@click.pass_context
def cli(ctx, profile_name):
"""EC2 CLI group"""
ctx.obj = {'AWS_PROFILE_NAME': profile_name}
@cli.command(help='List EC2 instances')
@click.argument('name', default='*')
@click.option('--list-formatted', '-l', is_flag=True)
@click.pass_context
def ls(ctx, name, list_formatted):
"""List EC2 instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
if name == '*':
instances = ec2.instances.filter()
else:
condition = {'Name': 'tag:Name', 'Values': [name]}
instances = ec2.instances.filter(Filters=[condition])
out = format_output(instances, list_formatted)
click.echo('\n'.join(out))
@cli.command(help='Start EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def up(ctx, instance_id):
"""Start EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.start()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
@cli.command(help='Stop EC2 instance')
@click.option('--instance-id', '-i', required=True, help='EC2 instance id')
@click.pass_context
def down(ctx, instance_id):
"""Stop EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
ec2 = session.resource('ec2')
try:
instance = ec2.Instance(instance_id)
instance.stop()
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
def create_ssh_command(session, instance_id, instance_name, username, key_file, port, ssh_options,
use_private_ip, gateway_instance_id, gateway_username):
"""Create SSH Login command string"""
ec2 = session.resource('ec2')
if instance_id is not None:
try:
instance = ec2.Instance(instance_id)
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
elif instance_name is not None:
try:
conditions = [
{'Name': 'tag:Name', 'Values': [instance_name]},
{'Name': 'instance-state-name', 'Values': ['running']},
]
instances = ec2.instances.filter(Filters=conditions)
target_instances = []
for idx, i in enumerate(instances):
target_instances.append(i)
if len(target_instances) == 1:
instance = target_instances[0]
hostname = _get_instance_ip_address(instance, use_private_ip)
else:
for idx, i in enumerate(instances):
tag_name = get_tag_value(i.tags, 'Name')
click.echo('[{0}]: {1}\t{2}\t{3}\t{4}\t{5}'.format(
idx, i.id, i.public_ip_address, i.state['Name'], tag_name, i.key_name))
selected_idx = click.prompt("Please enter a valid number", type=int, default=0)
if len(target_instances) - 1 < selected_idx or selected_idx < 0:
click.echo("selected number [{0}] is invalid".format(selected_idx), err=True)
sys.exit(2)
click.echo("{0} is selected.".format(selected_idx))
instance = target_instances[selected_idx]
hostname = _get_instance_ip_address(instance, use_private_ip)
except botocore.exceptions.ClientError as e:
click.echo("Invalid instance ID {0} ({1})".format(instance_id, e), err=True)
sys.exit(2)
# TODO: need to refactor and make it testable
if key_file is None:
key_file_option = ''
else:
key_file_option = ' -i {0}'.format(key_file)
gateway_username_option = build_option_username(gateway_username)
username_option = build_option_username(username)
if ssh_options is None:
ssh_options = ''
else:
ssh_options = ' {0}'.format(ssh_options)
if gateway_instance_id is not None:
gateway_instance = ec2.Instance(gateway_instance_id)
gateway_public_ip = gateway_instance.public_ip_address
hostname = instance.private_ip_address
cmd = 'ssh -tt{0} {1}{2} -p {3}{4} ssh{5} {6}'.format(
gateway_username_option, gateway_public_ip, key_file_option,
port, ssh_options, username_option, hostname)
else:
cmd = 'ssh{0} {1}{2} -p {3}{4}'.format(username_option, hostname, key_file_option, port, ssh_options)
return cmd
def build_option_username(username):
if username is None:
return ''
else:
return ' -l {0}'.format(username)
@cli.command(help='SSH login to EC2 instance')
@click.option('--instance-id', '-i', default=None, help='EC2 instance id')
@click.option('--instance-name', '-n', default=None, help='EC2 instance Name Tag')
@click.option('--username', '-u', default=None, help='Login username')
@click.option('--key-file', '-k', help='SSH Key file path', type=click.Path())
@click.option('--port', '-p', help='SSH port', default=22)
@click.option('--private-ip', '-e', help='Use instance private ip', is_flag=True, default=False)
@click.option('--ssh-options', '-s', help='Additional SSH options', default=None)
@click.option('--gateway-instance-id', '-g', default=None, help='Gateway instance id')
@click.option('--gateway-username', '-x', default=None, help='Gateway username')
@click.option('--dry-run', is_flag=True, default=False, help='Print SSH Login command and exist')
@click.pass_context
def ssh(ctx, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username, dry_run):
"""SSH to EC2 instance"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
if instance_id is None and instance_name is None:
click.echo(
"One of --instance-id/-i or --instance-name/-n"
" has to be specified.", err=True)
sys.exit(1)
elif instance_id is not None and instance_name is not None:
click.echo(
"Both --instance-id/-i and --instance-name/-n "
"can't to be specified at the same time.", err=True)
sys.exit(1)
cmd = create_ssh_command(
session, instance_id, instance_name, username, key_file, port, ssh_options, private_ip,
gateway_instance_id, gateway_username)
if not dry_run:
subprocess.call(cmd, shell=True)
else:
click.echo(cmd)
| mit |
shabab12/edx-platform | openedx/core/djangoapps/programs/migrations/0001_initial.py | 86 | 1453 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProgramsApiConfig',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('change_date', models.DateTimeField(auto_now_add=True, verbose_name='Change date')),
('enabled', models.BooleanField(default=False, verbose_name='Enabled')),
('internal_service_url', models.URLField(verbose_name='Internal Service URL')),
('public_service_url', models.URLField(verbose_name='Public Service URL')),
('api_version_number', models.IntegerField(verbose_name='API Version')),
('enable_student_dashboard', models.NullBooleanField(verbose_name='Enable Student Dashboard Displays')),
('changed_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name='Changed by')),
],
options={
'ordering': ('-change_date',),
'abstract': False,
},
),
]
| agpl-3.0 |
dingocuster/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 355 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>,
# Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
devs1991/test_edx_docmode | openedx/core/djangoapps/course_groups/management/commands/tests/test_post_cohort_membership_fix.py | 14 | 4245 | """
Test for the post-migration fix commands that are included with this djangoapp
"""
from django.core.management import call_command
from django.test.client import RequestFactory
from nose.plugins.attrib import attr
from openedx.core.djangoapps.course_groups.views import cohort_handler
from openedx.core.djangoapps.course_groups.cohorts import get_cohort_by_name
from openedx.core.djangoapps.course_groups.tests.helpers import config_course_cohorts
from openedx.core.djangoapps.course_groups.models import CohortMembership
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@attr('shard_2')
class TestPostMigrationFix(ModuleStoreTestCase):
"""
Base class for testing post-migration fix commands
"""
def setUp(self):
"""
setup course, user and request for tests
"""
super(TestPostMigrationFix, self).setUp()
self.course1 = CourseFactory.create()
self.course2 = CourseFactory.create()
self.user1 = UserFactory(is_staff=True)
self.user2 = UserFactory(is_staff=True)
self.request = RequestFactory().get("dummy_url")
self.request.user = self.user1
def test_post_cohortmembership_fix(self):
"""
Test that changes made *after* migration, but *before* turning on new code are handled properly
"""
# First, we're going to simulate some problem states that can arise during this window
config_course_cohorts(self.course1, is_cohorted=True, auto_cohorts=["Course1AutoGroup1", "Course1AutoGroup2"])
# Get the cohorts from the courses, which will cause auto cohorts to be created
cohort_handler(self.request, unicode(self.course1.id))
course_1_auto_cohort_1 = get_cohort_by_name(self.course1.id, "Course1AutoGroup1")
course_1_auto_cohort_2 = get_cohort_by_name(self.course1.id, "Course1AutoGroup2")
# When migrations were first run, the users were assigned to CohortMemberships correctly
membership1 = CohortMembership(
course_id=course_1_auto_cohort_1.course_id,
user=self.user1,
course_user_group=course_1_auto_cohort_1
)
membership1.save()
membership2 = CohortMembership(
course_id=course_1_auto_cohort_1.course_id,
user=self.user2,
course_user_group=course_1_auto_cohort_1
)
membership2.save()
# But before CohortMembership code was turned on, some changes were made:
course_1_auto_cohort_2.users.add(self.user1) # user1 is now in 2 cohorts in the same course!
course_1_auto_cohort_2.users.add(self.user2)
course_1_auto_cohort_1.users.remove(self.user2) # and user2 was moved, but no one told CohortMembership!
# run the post-CohortMembership command, dry-run
call_command('post_cohort_membership_fix')
# Verify nothing was changed in dry-run mode.
self.assertEqual(self.user1.course_groups.count(), 2) # CourseUserGroup has 2 entries for user1
self.assertEqual(CohortMembership.objects.get(user=self.user2).course_user_group.name, 'Course1AutoGroup1')
user2_cohorts = list(self.user2.course_groups.values_list('name', flat=True))
self.assertEqual(user2_cohorts, ['Course1AutoGroup2']) # CourseUserGroup and CohortMembership disagree
# run the post-CohortMembership command, and commit it
call_command('post_cohort_membership_fix', commit='commit')
# verify that both databases agree about the (corrected) state of the memberships
self.assertEqual(self.user1.course_groups.count(), 1)
self.assertEqual(CohortMembership.objects.filter(user=self.user1).count(), 1)
self.assertEqual(self.user2.course_groups.count(), 1)
self.assertEqual(CohortMembership.objects.filter(user=self.user2).count(), 1)
self.assertEqual(CohortMembership.objects.get(user=self.user2).course_user_group.name, 'Course1AutoGroup2')
user2_cohorts = list(self.user2.course_groups.values_list('name', flat=True))
self.assertEqual(user2_cohorts, ['Course1AutoGroup2'])
| agpl-3.0 |
beni55/viewfinder | backend/op/register_user_op.py | 13 | 5924 | # Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Viewfinder RegisterUserOperation.
This operation registers an existing user by adding the REGISTERED label to it. It also can
register a new mobile device.
"""
__authors__ = ['andy@emailscrubbed.com (Andy Kimball)']
import json
from tornado import gen
from viewfinder.backend.db.analytics import Analytics
from viewfinder.backend.db.device import Device
from viewfinder.backend.db.identity import Identity
from viewfinder.backend.db.lock import Lock
from viewfinder.backend.db.operation import Operation
from viewfinder.backend.db.user import User
from viewfinder.backend.db.viewpoint import Viewpoint
from viewfinder.backend.op.notification_manager import NotificationManager
from viewfinder.backend.op.viewfinder_op import ViewfinderOperation
class RegisterUserOperation(ViewfinderOperation):
"""The RegisterUser operation follows the four phase pattern described in the header of
operation_map.py.
"user_dict" contains oauth-supplied user information which is either used to initially
populate the fields for a new user account, or is used to update missing fields. The
"REGISTERED" label is always added to the user object if is not yet present.
"ident_dict" contains the identity key, authority, and various auth-specific access and
refresh tokens that will be stored with the identity.
"device_dict" contains information about the device being used for this registration. If
access is via the web application, "device_dict" will be None. Otherwise, it will contain
either: a device-id for an already-registered device, or device information in order to
create a new device.
"""
def __init__(self, client, user_dict, ident_dict, device_dict):
super(RegisterUserOperation, self).__init__(client)
self._user_dict = user_dict
self._ident_dict = ident_dict
self._device_dict = device_dict
@classmethod
@gen.coroutine
def Execute(cls, client, user_dict, ident_dict, device_dict):
"""Entry point called by the operation framework."""
yield RegisterUserOperation(client, user_dict, ident_dict, device_dict)._RegisterUser()
@gen.coroutine
def _RegisterUser(self):
"""Orchestrates the register operation by executing each of the phases in turn."""
yield self._Check()
self._client.CheckDBNotModified()
yield self._Update()
yield Operation.TriggerFailpoint(self._client)
yield self._Notify()
@gen.coroutine
def _Check(self):
"""Gathers pre-mutation information:
1. Queries for the existing user and identity.
2. Checkpoints whether the user is prospective.
3. Checkpoints whether the identity is linked to the user.
4. Checkpoints whether the device is the first mobile device to be registered.
"""
# Start populating the checkpoint if this the first time the operation has been run.
if self._op.checkpoint is None:
# Remember whether the user was a prospective user at the start of the operation.
user = yield gen.Task(User.Query, self._client, self._user_dict['user_id'], None)
self._is_first_register = not user.IsRegistered()
# Remember whether the identity was bound to the user at the start of the operation.
identity = yield gen.Task(Identity.Query, self._client, self._ident_dict['key'], None)
self._is_linking = identity.user_id is None
# Remember if this is the first mobile device to be registered for this user.
existing_devices = yield gen.Task(Device.RangeQuery,
self._client,
user.user_id,
None,
limit=1,
col_names=None)
self._is_first_device = len(existing_devices) == 0
checkpoint = {'is_first_reg': self._is_first_register,
'linked': self._is_linking,
'is_first_dev': self._is_first_device}
yield self._op.SetCheckpoint(self._client, checkpoint)
else:
# Restore state from checkpoint.
self._is_first_register = self._op.checkpoint['is_first_reg']
self._is_first_device = self._op.checkpoint['is_first_dev']
self._is_linking = self._op.checkpoint['linked']
@gen.coroutine
def _Update(self):
"""Updates the database:
1. Registers the user and identity.
2. Registers the device.
"""
yield User.Register(self._client,
self._user_dict,
self._ident_dict,
self._op.timestamp,
rewrite_contacts=self._is_first_register or self._is_linking)
if self._device_dict is not None:
yield Device.Register(self._client,
self._user_dict['user_id'],
self._device_dict,
is_first=self._is_first_device)
# Update analytics if prospective user was registered.
if self._is_first_register:
analytics = Analytics.Create(entity='us:%d' % self._user_dict['user_id'],
type=Analytics.USER_REGISTER,
timestamp=self._op.timestamp)
yield gen.Task(analytics.Update, self._client)
@gen.coroutine
def _Notify(self):
"""Creates notifications:
1. Notifies the user and his friends and contacts of any changes to the user or its
identities.
"""
yield NotificationManager.NotifyRegisterUser(self._client,
self._user_dict,
self._ident_dict,
self._op.timestamp,
self._is_first_register,
self._is_linking)
| apache-2.0 |
dcosson/parallel-ci-runner | parallel_ci_runner/docker_commands.py | 1 | 4006 | from functools import partial
import random
import string
class DockerBuildCommand(object):
""" Generates a `docker build` command to run in a subprocess.
"""
def __init__(self, docker_repo, tag, dockerfile='Dockerfile', build_args=None):
self.docker_repo = docker_repo
self.tag = tag
self.dockerfile = dockerfile
self.build_args = build_args or {}
def build(self):
def docker_build_command(process_num):
build_arg_flags = " ".join(
"--build-arg {}='{}'".format(k, v) for k, v in self.build_args.items())
return "docker build -f {0} -t {1}:{2} {3} .".format(
self.dockerfile, self.docker_repo, self.tag, build_arg_flags)
return docker_build_command
def full_image_name(self):
return "{0}:{1}".format(self.docker_repo, self.tag)
class DockerCommand(object):
def __init__(self, docker_command, container_name_prefix):
self.docker_command = docker_command
self.container_name_prefix = container_name_prefix
def build(self, cmd):
def docker_command(process_num):
cmd_string = cmd(process_num) if hasattr(cmd, '__call__') else cmd
return "docker {0} {1}{2} {3}".format(
self.docker_command, self.container_name_prefix, process_num, cmd_string)
return docker_command
class DockerComposeCommand(object):
""" Generates docker or docker-compose commands to run in a subprocess.
"""
def __init__(self, docker_compose_file='docker-compose.yml',
project_name_base=None, env_vars=None):
self.docker_compose_file = docker_compose_file
self.env_vars = env_vars or {}
self.project_name_base = project_name_base or self._random_project_name()
def _random_project_name(self, length=12):
chars = string.ascii_lowercase + string.digits
return 'cirunner' + ''.join(random.choice(chars) for i in range(length))
def _default_env_vars(self, process_num):
return {
'PROJECT_NAME': self._project_name(process_num),
'CI_COMMAND_NUMBER': process_num,
}
def _project_name(self, command_num):
if self.project_name_base is None:
return None
return self.project_name_base + str(command_num)
def _build_cmd(self, app, cmd_string, docker_compose_command, process_num):
""" Builds the docker-compose command running cmd_string
process_num gets appended to the project name which lets you run
in parallel on separate docker-compose clusters of containers.
"""
output = self._env_vars_prefix(process_num)
output += self._compose_with_file_and_project_name(process_num)
output += " {0}".format(docker_compose_command)
if app:
output += " {0}".format(app)
if cmd_string:
output += " {0}".format(cmd_string)
return output
def _cleanup_cmd(self, process_num):
tmp = self._env_vars_prefix(process_num)
tmp += self._compose_with_file_and_project_name(process_num)
return "{0} stop && {0} rm --force".format(tmp)
def _compose_with_file_and_project_name(self, process_num):
output = "docker-compose"
output += " -f {0}".format(self.docker_compose_file)
if self._project_name(process_num):
output += " -p {0}".format(self._project_name(process_num))
return output
def _env_vars_prefix(self, process_num):
output = ""
env_vars = self._default_env_vars(process_num)
env_vars.update(self.env_vars)
if env_vars:
output += ' '.join("{0}={1}".format(k, v) for k, v in env_vars.items())
output += " "
return output
def build(self, app, docker_compose_command, cmd_string=None):
return partial(self._build_cmd, app, cmd_string, docker_compose_command)
def cleanup(self):
return self._cleanup_cmd
| mit |
fbagirov/scikit-learn | doc/tutorial/machine_learning_map/pyparsing.py | 258 | 137838 | # module pyparsing.py
#
# Copyright (c) 2003-2008 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#from __future__ import generators
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form "<salutation>, <addressee>!")::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word( alphas ) + "," + Word( alphas ) + "!"
hello = "Hello, World!"
print hello, "->", greet.parseString( hello )
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The parsed results returned from parseString() can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "1.4.11"
__versionTime__ = "10 February 2008 17:28"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy,sys
import warnings
import re
import sre_constants
import xml.sax.saxutils
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
"""
Detect if we are running version 3.X and make appropriate changes
Robert A. Clark
"""
if sys.version_info[0] > 2:
__MAX_INT__ = sys.maxsize
__BASE_STRING__ = str
else:
__MAX_INT__ = sys.maxint
__BASE_STRING__ = basestring
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# The Python docs (http://docs.python.org/ref/customization.html#l2h-182)
# state that "The return value must be a string object". However, does a
# unicode object (being a subclass of basestring) count as a "string
# object"?
# If so, then return a unicode object:
return unicode(obj)
# Else encode it... but how? There are many choices... :)
# Replace unprintables with escape codes?
#return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors')
# Replace unprintables with question marks?
#return unicode(obj).encode(sys.getdefaultencoding(), 'replace')
# ...
def _str2dict(strg):
return dict( [(c,0) for c in strg] )
#~ return set( [c for c in strg] )
class _Constants(object):
pass
alphas = string.lowercase + string.uppercase
nums = string.digits
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = "\\"
printables = "".join( [ c for c in string.printable if c not in string.whitespace ] )
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
__slots__ = ( "loc","msg","pstr","parserElement" )
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError, aname
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join( [line_str[:line_column],
markerString, line_str[line_column:]])
return line_str.strip()
class ParseException(ParseBaseException):
"""exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by validate() if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup)
class ParseResults(object):
"""Structured parse results, to provide multiple means of access to the parsed data:
- as a list (len(results))
- by list index (results[0], results[1], etc.)
- by attribute (results.<resultsName>)
"""
__slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" )
def __new__(cls, toklist, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist, name=None, asList=True, modal=True ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
if isinstance(toklist, list):
self.__toklist = toklist[:]
else:
self.__toklist = [toklist]
self.__tokdict = dict()
# this line is related to debugging the asXML bug
#~ asList = False
if name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not toklist in (None,'',[]):
if isinstance(toklist,__BASE_STRING__):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),-1)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),-1)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,int):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = range(*i.indices(mylen))
removed.reverse()
# fixup indices in token dictionary
for name in self.__tokdict.keys():
occurrences = self.__tokdict[name]
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return self.__tokdict.has_key(k)
def __len__( self ): return len( self.__toklist )
def __bool__(self): return len( self.__toklist ) > 0
def __nonzero__( self ): return self.__bool__()
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( reversed(self.__toklist) )
def keys( self ):
"""Returns all named result keys."""
return self.__tokdict.keys()
def pop( self, index=-1 ):
"""Removes and returns item at specified index (default=last).
Will work with either numeric indices or dict-key indicies."""
ret = self[index]
del self[index]
return ret
def get(self, key, defaultValue=None):
"""Returns named result matching the given key, or if there is no
such name, then returns the given defaultValue or None if no
defaultValue is specified."""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name in self.__tokdict.keys():
occurrences = self.__tokdict[name]
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > j))
def items( self ):
"""Returns all named result keys and values as a list of tuples."""
return [(k,self[k]) for k in self.__tokdict.keys()]
def values( self ):
"""Returns all named result values."""
return [ v[-1][0] for v in self.__tokdict.values() ]
def __getattr__( self, name ):
if name not in self.__slots__:
if self.__tokdict.has_key( name ):
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
return None
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = ( lambda a: (a<0 and offset) or (a+offset) )
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
del other
return self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
out = "["
sep = ""
for i in self.__toklist:
if isinstance(i, ParseResults):
out += sep + _ustr(i)
else:
out += sep + repr(i)
sep = ", "
out += "]"
return out
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""Returns the parse results as a nested list of matching tokens, all converted to strings."""
out = []
for res in self.__toklist:
if isinstance(res,ParseResults):
out.append( res.asList() )
else:
out.append( res )
return out
def asDict( self ):
"""Returns the named parse results as dictionary."""
return dict( self.items() )
def copy( self ):
"""Returns a new copy of a ParseResults object."""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""Returns the parse results as XML. Tags are created for tokens and lists that have defined results names."""
nl = "\n"
out = []
namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist ] )
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
worklist = self.__toklist
for i,res in enumerate(worklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = xml.sax.saxutils.escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""Returns the results name for this token expression."""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
self.__tokdict.values()[0][0][1] in (0,-1)):
return self.__tokdict.keys()[0]
else:
return None
def dump(self,indent='',depth=0):
"""Diagnostic method for listing out the contents of a ParseResults.
Accepts an optional indent argument so that this string can be embedded
in a nested display of other data."""
out = []
out.append( indent+_ustr(self.asList()) )
keys = self.items()
keys.sort()
for k,v in keys:
if out:
out.append('\n')
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v.keys():
#~ out.append('\n')
out.append( v.dump(indent,depth+1) )
#~ out.append('\n')
else:
out.append(_ustr(v))
else:
out.append(_ustr(v))
#~ out.append('\n')
return "".join(out)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
self.__tokdict, \
par, \
inAccumNames, \
self.__name = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR > 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print ("Match",_ustr(expr),"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched",_ustr(expr),"->",toks.asList())
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:", _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
def setDefaultWhitespaceChars( chars ):
"""Overrides the default whitespace chars
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars)
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""Make a copy of this ParserElement. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element."""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""Define name for this expression, for use in debugging."""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original ParserElement object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
"""
newself = self.copy()
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set breakFlag to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
_parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def _normalizeParseActionArgs( f ):
"""Internal method used to decorate parse actions that take fewer than 3 arguments,
so that all parse actions can be called as f(s,l,t)."""
STAR_ARGS = 4
try:
restore = None
if isinstance(f,type):
restore = f
f = f.__init__
if f.func_code.co_flags & STAR_ARGS:
return f
numargs = f.func_code.co_argcount
if hasattr(f,"im_self"):
numargs -= 1
if restore:
f = restore
except AttributeError:
try:
# not a function, must be a callable object, get info from the
# im_func binding of its bound __call__ method
if f.__call__.im_func.func_code.co_flags & STAR_ARGS:
return f
numargs = f.__call__.im_func.func_code.co_argcount
if hasattr(f.__call__,"im_self"):
numargs -= 1
except AttributeError:
# not a bound method, get info directly from __call__ method
if f.__call__.func_code.co_flags & STAR_ARGS:
return f
numargs = f.__call__.func_code.co_argcount
if hasattr(f.__call__,"im_self"):
numargs -= 1
#~ print ("adding function %s with %d args" % (f.func_name,numargs))
if numargs == 3:
return f
else:
if numargs == 2:
def tmp(s,l,t):
return f(l,t)
elif numargs == 1:
def tmp(s,l,t):
return f(t)
else: #~ numargs == 0:
def tmp(s,l,t):
return f()
try:
tmp.__name__ = f.__name__
except AttributeError:
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__doc__ = f.__doc__
except AttributeError:
# no need for special handling if attribute doesnt exist
pass
try:
tmp.__dict__.update(f.__dict__)
except AttributeError:
# no need for special handling if attribute doesnt exist
pass
return tmp
_normalizeParseActionArgs = staticmethod(_normalizeParseActionArgs)
def setParseAction( self, *fns, **kwargs ):
"""Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks),
fn(loc,toks), fn(toks), or just fn(), where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a ParseResults object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing <TAB>s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
self.parseAction = map(self._normalizeParseActionArgs, list(fns))
self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def addParseAction( self, *fns, **kwargs ):
"""Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}."""
self.parseAction += map(self._normalizeParseActionArgs, list(fns))
self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"])
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
fn(s,loc,expr,err) where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw ParseFatalException
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseException, err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = loc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseException, err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
return self._parse( instring, loc, doActions=False )[0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
lookup = (self,instring,loc,callPreParse,doActions)
if lookup in ParserElement._exprArgCache:
value = ParserElement._exprArgCache[ lookup ]
if isinstance(value,Exception):
if isinstance(value,ParseBaseException):
value.loc = loc
raise value
return (value[0],value[1].copy())
else:
try:
value = self._parseNoCache( instring, loc, doActions, callPreParse )
ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy())
return value
except ParseBaseException, pe:
ParserElement._exprArgCache[ lookup ] = pe
raise
_parse = _parseNoCache
# argument cache for optimizing repeated calls when backtracking through recursive expressions
_exprArgCache = {}
def resetCache():
ParserElement._exprArgCache.clear()
resetCache = staticmethod(resetCache)
_packratEnabled = False
def enablePackrat():
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method ParserElement.enablePackrat(). If
your program uses psyco to "compile as you go", you must call
enablePackrat before calling psyco.full(). If you do not do this,
Python will crash. For best results, call enablePackrat() immediately
after importing pyparsing.
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
ParserElement._parse = ParserElement._parseCache
enablePackrat = staticmethod(enablePackrat)
def parseString( self, instring ):
"""Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
Note: parseString implicitly calls expandtabs() on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the loc argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling parseWithTabs on your grammar before calling parseString
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full (s,loc,toks) signature, and
reference the input string using the parse action's s argument
- explictly expand the tabs in your input string before calling
parseString
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if self.keepTabs:
loc, tokens = self._parse( instring, 0 )
else:
loc, tokens = self._parse( instring.expandtabs(), 0 )
return tokens
def scanString( self, instring, maxMatches=__MAX_INT__ ):
"""Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
maxMatches argument, to clip scanning after 'n' matches are found.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs."""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
matches += 1
yield tokens, preloc, nextLoc
loc = nextLoc
def transformString( self, instring ):
"""Extension to scanString, to modify matching text with modified tokens that may
be returned from a parse action. To use transformString, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking transformString() on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. transformString() returns the resulting transformed string."""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
return "".join(map(_ustr,out))
def searchString( self, instring, maxMatches=__MAX_INT__ ):
"""Another extension to scanString, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
maxMatches argument, to clip searching after 'n' matches are found.
"""
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
def __add__(self, other ):
"""Implementation of + operator - returns And"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""Implementation of + operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __mul__(self,other):
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
if len(other)==2:
if isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("can only multiply 'ParserElement' and int or (int,int) objects")
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
ret = And([self]*minElements)+ makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""Implementation of | operator - returns MatchFirst"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""Implementation of | operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""Implementation of ^ operator - returns Or"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""Implementation of ^ operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""Implementation of & operator - returns Each"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""Implementation of & operator when left operand is not a ParserElement"""
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""Implementation of ~ operator - returns NotAny"""
return NotAny( self )
def __call__(self, name):
"""Shortcut for setResultsName, with listAllMatches=default::
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
could be written as::
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
return self.setResultsName(name)
def suppress( self ):
"""Suppresses the output of this ParserElement; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""Disables the skipping of whitespace before matching the characters in the
ParserElement's defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""Overrides default behavior to expand <TAB>s to spaces before parsing the input string.
Must be called before parseString when the input grammar contains elements that
match <TAB> characters."""
self.keepTabs = True
return self
def ignore( self, other ):
"""Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
"""
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append( other )
else:
self.ignoreExprs.append( Suppress( other ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""Enable display of debugging messages while doing pattern matching."""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""Enable display of debugging messages while doing pattern matching.
Set flag to True to enable, False to disable."""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""Check defined expressions for valid structure, check for infinite recursive definitions."""
self.checkRecursion( [] )
def parseFile( self, file_or_filename ):
"""Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
f = open(file_or_filename, "rb")
file_contents = f.read()
f.close()
return self.parseString(file_contents)
def getException(self):
return ParseException("",0,self.errmsg,self)
def __getattr__(self,aname):
if aname == "myException":
self.myException = ret = self.getException();
return ret;
else:
raise AttributeError, "no such attribute " + aname
def __eq__(self,other):
if isinstance(other, __BASE_STRING__):
try:
(self + StringEnd()).parseString(_ustr(other))
return True
except ParseException:
return False
else:
return super(ParserElement,self)==other
def __req__(self,other):
return self == other
class Token(ParserElement):
"""Abstract ParserElement subclass, for defining atomic matching patterns."""
def __init__( self ):
super(Token,self).__init__( savelist=False )
#self.myException = ParseException("",0,"",self)
def setName(self, name):
s = super(Token,self).setName(name)
self.errmsg = "Expected " + self.name
#s.myException.msg = self.errmsg
return s
class Empty(Token):
"""An empty token, will always match."""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""A token that will never match."""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Literal(Token):
"""Token to exactly match a specified string."""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
_L = Literal
class Keyword(Token):
"""Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with Literal::
Literal("if") will match the leading 'if' in 'ifAndOnlyIf'.
Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)'
Accepts two optional constructor arguments in addition to the keyword string:
identChars is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"; caseless allows case-insensitive
matching, default is False.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ):
super(Keyword,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = _str2dict(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
setDefaultKeywordChars = staticmethod(setDefaultKeywordChars)
class CaselessLiteral(Literal):
"""Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class CaselessKeyword(Keyword):
def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Word(Token):
"""Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ):
super(Word,self).__init__()
self.initCharsOrig = initChars
self.initChars = _str2dict(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = _str2dict(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = _str2dict(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError, "cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted"
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = __MAX_INT__
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.bodyCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
return loc,result.group()
if not(instring[ loc ] in self.initChars):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
"""
def __init__( self, pattern, flags=0):
"""The parameters pattern and flags are passed to the re.compile() function as-is. See the Python re module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if len(pattern) == 0:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error,e:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d.keys():
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
"""Token for matching strings that are delimited by quoting characters.
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None):
"""
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=None)
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=None)
- multiline - boolean indicating whether quotes can span multiple lines (default=False)
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=True)
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=None => same as quoteChar)
"""
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if len(quoteChar) == 0:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if len(endQuoteChar) == 0:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join(["%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)]) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error,e:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,__BASE_STRING__):
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""Token for matching words composed of characters *not* in a given set.
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for min is 1 (a
minimum value < 1 is not valid); the default values for max and exact
are 0, meaning no maximum or exact length restriction.
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError, "cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted"
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = __MAX_INT__
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
#self.myException.msg = self.errmsg
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is " \\t\\n". Also takes optional min, max, and exact arguments,
as defined for the Word class."""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join([c for c in self.whiteChars if c not in self.matchWhite]) )
#~ self.leaveWhitespace()
self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite]))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
#self.myException.msg = self.errmsg
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = __MAX_INT__
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
#~ raise ParseException( instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""Token to advance to a specific column of input text; useful for tabular report scraping."""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""Matches if current position is at the beginning of a line within the parse string"""
def __init__( self ):
super(LineStart,self).__init__()
self.setWhitespaceChars( " \t" )
self.errmsg = "Expected start of line"
#self.myException.msg = self.errmsg
def preParse( self, instring, loc ):
preloc = super(LineStart,self).preParse(instring,loc)
if instring[preloc] == "\n":
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
if not( loc==0 or
(loc == self.preParse( instring, 0 )) or
(instring[loc-1] == "\n") ): #col(loc, instring) != 1:
#~ raise ParseException( instring, loc, "Expected start of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class LineEnd(_PositionToken):
"""Matches if current position is at the end of a line within the parse string"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( " \t" )
self.errmsg = "Expected end of line"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
#~ raise ParseException( instring, loc, "Expected end of line" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class StringStart(_PositionToken):
"""Matches if current position is at the beginning of the parse string"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
#~ raise ParseException( instring, loc, "Expected start of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class StringEnd(_PositionToken):
"""Matches if current position is at the end of the parse string"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
#self.myException.msg = self.errmsg
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
#~ raise ParseException( instring, loc, "Expected end of text" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class WordStart(_PositionToken):
"""Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordStart(alphanums). WordStart will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = _str2dict(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class WordEnd(_PositionToken):
"""Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of wordChars
(default=printables). To emulate the \b behavior of regular expressions,
use WordEnd(alphanums). WordEnd will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = _str2dict(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
#~ raise ParseException( instring, loc, "Expected end of word" )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
class ParseExpression(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, list ):
self.exprs = exprs
elif isinstance( exprs, __BASE_STRING__ ):
self.exprs = [ Literal( exprs ) ]
else:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends leaveWhitespace defined in base class, and also invokes leaveWhitespace on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
class And(ParseExpression):
"""Requires all given ParseExpressions to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the '+' operator.
"""
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.setWhitespaceChars( exprs[0].whiteChars )
self.skipWhitespace = exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
for e in self.exprs[1:]:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.keys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
class Or(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the '^' operator.
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxMatchLoc = -1
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
if loc2 > maxMatchLoc:
maxMatchLoc = loc2
maxMatchExp = e
if maxMatchLoc < 0:
if self.exprs:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
return maxMatchExp._parse( instring, loc, doActions )
def __ixor__(self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""Requires that at least one ParseExpression is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the '|' operator.
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if exprs:
self.mayReturnEmpty = False
for e in self.exprs:
if e.mayReturnEmpty:
self.mayReturnEmpty = True
break
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException, err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if self.exprs:
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""Requires all given ParseExpressions to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the '&' operator.
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = True
for e in self.exprs:
if not e.mayReturnEmpty:
self.mayReturnEmpty = False
break
self.skipWhitespace = True
self.optionals = [ e.expr for e in exprs if isinstance(e,Optional) ]
self.multioptionals = [ e.expr for e in exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
def parseImpl( self, instring, loc, doActions=True ):
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(e)
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join( [ _ustr(e) for e in tmpReqd ] )
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = ParseResults([])
for r in resultlist:
dups = {}
for k in r.keys():
if k in finalResults.keys():
tmp = ParseResults(finalResults[k])
tmp += ParseResults(r[k])
dups[k] = tmp
finalResults += ParseResults(r)
for k,v in dups.items():
finalResults[k] = v
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join( [ _ustr(e) for e in self.exprs ] ) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""Abstract subclass of ParserElement, for combining and post-processing parsed tokens."""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, __BASE_STRING__ ):
expr = Literal(expr)
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""Lookahead matching of the given parse expression. FollowedBy
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. FollowedBy always returns a null token list."""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""Lookahead to disallow matching with the given parse expression. NotAny
does *not* advance the parsing position within the input string, it only
verifies that the specified parse expression does *not* match at the current
position. Also, NotAny does *not* skip over leading whitespace. NotAny
always returns a null token list. May be constructed using the '~' operator."""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
try:
self.expr.tryParse( instring, loc )
except (ParseException,IndexError):
pass
else:
#~ raise ParseException(instring, loc, self.errmsg )
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class ZeroOrMore(ParseElementEnhance):
"""Optional repetition of zero or more of the given expression."""
def __init__( self, expr ):
super(ZeroOrMore,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
tokens = []
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(ZeroOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class OneOrMore(ParseElementEnhance):
"""Repetition of one or more of the given expression."""
def parseImpl( self, instring, loc, doActions=True ):
# must be at least one
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = ( len(self.ignoreExprs) > 0 )
while 1:
if hasIgnoreExprs:
preloc = self._skipIgnorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self.expr._parse( instring, preloc, doActions )
if tmptokens or tmptokens.keys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
def setResultsName( self, name, listAllMatches=False ):
ret = super(OneOrMore,self).setResultsName(name,listAllMatches)
ret.saveAsList = True
return ret
class _NullToken(object):
def __bool__(self):
return False
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""Optional matching of the given expression.
A default return string can also be specified, if the optional expression
is not found.
"""
def __init__( self, exprs, default=_optionalNotMatched ):
super(Optional,self).__init__( exprs, savelist=False )
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""Token for skipping over all undefined text until the matched expression is found.
If include is set to true, the matched expression is also consumed. The ignore
argument is used to define grammars (typically quoted strings and comments) that
might contain false matches.
"""
def __init__( self, other, include=False, ignore=None ):
super( SkipTo, self ).__init__( other )
if ignore is not None:
self.expr = self.expr.copy()
self.expr.ignore(ignore)
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
self.errmsg = "No match found for "+_ustr(self.expr)
#self.myException = ParseException("",0,self.errmsg,self)
def parseImpl( self, instring, loc, doActions=True ):
startLoc = loc
instrlen = len(instring)
expr = self.expr
while loc <= instrlen:
try:
loc = expr._skipIgnorables( instring, loc )
expr._parse( instring, loc, doActions=False, callPreParse=False )
if self.includeMatch:
skipText = instring[startLoc:loc]
loc,mat = expr._parse(instring,loc,doActions,callPreParse=False)
if mat:
skipRes = ParseResults( skipText )
skipRes += mat
return loc, [ skipRes ]
else:
return loc, [ skipText ]
else:
return loc, [ instring[startLoc:loc] ]
except (ParseException,IndexError):
loc += 1
exc = self.myException
exc.loc = loc
exc.pstr = instring
raise exc
class Forward(ParseElementEnhance):
"""Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the Forward variable using the '<<' operator.
Note: take care when assigning to Forward not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the Forward::
fwdExpr << (a | b | c)
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, __BASE_STRING__ ):
other = Literal(other)
self.expr = other
self.mayReturnEmpty = other.mayReturnEmpty
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return None
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = Forward
return "Forward: "+retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret << self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""Abstract subclass of ParseExpression, for converting parsed results."""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Upcase(TokenConverter):
"""Converter to upper case all matching tokens."""
def __init__(self, *args):
super(Upcase,self).__init__(*args)
warnings.warn("Upcase class is deprecated, use upcaseTokens parse action instead",
DeprecationWarning,stacklevel=2)
def postParse( self, instring, loc, tokenlist ):
return map( string.upper, tokenlist )
class Combine(TokenConverter):
"""Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying 'adjacent=False' in the constructor.
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and len(retToks.keys())>0:
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""Converter to return the matched tokens as a list - useful for returning tokens of ZeroOrMore and OneOrMore expressions."""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
"""
def __init__( self, exprs ):
super(Dict,self).__init__( exprs )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.keys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""Converter for ignoring the results of a parsed expression."""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""Wrapper for parse actions, to ensure they are only called once."""
def __init__(self, methodCall):
self.callable = ParserElement._normalizeParseActionArgs(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""Decorator for debugging parse actions."""
f = ParserElement._normalizeParseActionArgs(f)
def z(*paArgs):
thisFunc = f.func_name
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %s)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception, exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %s)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing 'combine=True' in the constructor.
If combine is set to True, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr ):
"""Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = int(t[0])
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
return ( Word(nums).setName("arrayLen").setParseAction(countFieldParseAction, callDuringTry=True) + arrayExpr )
def _flatten(L):
if type(L) is not list: return [L]
if L == []: return L
return _flatten(L[0]) + _flatten(L[1:])
def matchPreviousLiteral(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches a
previous literal, will also match the leading "1:1" in "1:10".
If this is not desired, use matchPreviousExpr.
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And( [ Literal(tt) for tt in tflat ] )
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def matchPreviousExpr(expr):
"""Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match "1:1", but not "1:2". Because this matches by
expressions, will *not* match the leading "1:1" in "1:10";
the expressions are evaluated first, and then compared, so
"1" is compared with "10".
Do *not* use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep << e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,"\\"+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a MatchFirst for best performance.
Parameters:
- strs - a string of space-delimited literals, or a list of string literals
- caseless - (default=False) - treat all literals as caseless
- useRegex - (default=True) - as an optimization, will generate a Regex
object; otherwise, will generate a MatchFirst object (if caseless=True, or
if creating a Regex raises an exception)
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
if isinstance(strs,(list,tuple)):
symbols = strs[:]
elif isinstance(strs,__BASE_STRING__):
symbols = strs.split()
else:
warnings.warn("Invalid argument to oneOf, expected string or list",
SyntaxWarning, stacklevel=2)
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join( [ _escapeRegexRangeChars(sym) for sym in symbols] ) )
else:
return Regex( "|".join( [ re.escape(sym) for sym in symbols] ) )
except:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst( [ parseElementClass(sym) for sym in symbols ] )
def dictOf( key, value ):
"""Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the Dict, ZeroOrMore, and Group tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the Dict results can include named token
fields.
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_printables_less_backslash = "".join([ c for c in printables if c not in r"\]" ])
_escapedHexChar = Combine( Suppress(_bslash + "0x") + Word(hexnums) ).setParseAction(lambda s,l,t:unichr(int(t[0],16)))
_escapedOctChar = Combine( Suppress(_bslash) + Word("0","01234567") ).setParseAction(lambda s,l,t:unichr(int(t[0],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(_printables_less_backslash,exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
_expanded = lambda p: (isinstance(p,ParseResults) and ''.join([ unichr(c) for c in range(ord(p[0]),ord(p[1])+1) ]) or p)
def srange(s):
r"""Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be::
a single character
an escaped character with a leading backslash (such as \- or \])
an escaped hex character with a leading '\0x' (\0x21, which is a '!' character)
an escaped octal character with a leading '\0' (\041, which is a '!' character)
a range of any of the above, separated by a dash ('a-z', etc.)
any combination of the above ('aeiouy', 'a-zA-Z0-9_$', etc.)
"""
try:
return "".join([_expanded(part) for part in _reBracketExpr.parseString(s).body])
except:
return ""
def matchOnlyAtCol(n):
"""Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""Helper method for common parse actions that simply return a literal value. Especially
useful when used with transformString().
"""
def _replFunc(*args):
return [replStr]
return _replFunc
def removeQuotes(s,l,t):
"""Helper parse action for removing quotation marks from parsed quoted strings.
To use, add this parse action to quoted string using::
quotedString.setParseAction( removeQuotes )
"""
return t[0][1:-1]
def upcaseTokens(s,l,t):
"""Helper parse action to convert tokens to upper case."""
return [ tt.upper() for tt in map(_ustr,t) ]
def downcaseTokens(s,l,t):
"""Helper parse action to convert tokens to lower case."""
return [ tt.lower() for tt in map(_ustr,t) ]
def keepOriginalText(s,startLoc,t):
"""Helper parse action to preserve original parsed text,
overriding any nested parse actions."""
try:
endloc = getTokensEndLoc()
except ParseException:
raise ParseFatalException, "incorrect usage of keepOriginalText - may only be called as a parse action"
del t[:]
t += ParseResults(s[startLoc:endloc])
return t
def getTokensEndLoc():
"""Method to be called from within a parse action to determine the end
location of the parsed tokens."""
import inspect
fstack = inspect.stack()
try:
# search up the stack (through intervening argument normalizers) for correct calling routine
for f in fstack[2:]:
if f[3] == "_parseNoCache":
endloc = f[0].f_locals["loc"]
return endloc
else:
raise ParseFatalException, "incorrect usage of getTokensEndLoc - may only be called from within a parse action"
finally:
del fstack
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,__BASE_STRING__):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join( [ c for c in printables if c not in ">" ] )
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % tagStr)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % tagStr)
return openTag, closeTag
def makeHTMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for HTML, given a tag name"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""Helper to construct opening and closing tag expressions for XML, given a tag name"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""Helper to create a validating parse action to be used with start tags created
with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
<TD> or <DIV>.
Call withAttribute with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in (class="Customer",align="right"), or
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
To verify that the attribute exists, but without specifying a value, pass
withAttribute.ANY_VALUE as the value.
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def operatorPrecedence( baseExpr, opList ):
"""Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal
- numTerms is the number of terms for this operator (must
be 1 or 2)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants opAssoc.RIGHT and opAssoc.LEFT.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
"""
ret = Forward()
lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
thisExpr = Forward()#.setName("expr%d" % i)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = Group( FollowedBy(lastExpr + opExpr) + lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
matchExpr = Group( FollowedBy(lastExpr + opExpr + lastExpr) + lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
raise ValueError, "operator must be unary (1) or binary (2)"
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
matchExpr = Group( FollowedBy(lastExpr + opExpr + thisExpr) + lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
raise ValueError, "operator must be unary (1) or binary (2)"
else:
raise ValueError, "operator must indicate right or left associativity"
if pa:
matchExpr.setParseAction( pa )
thisExpr << ( matchExpr | lastExpr )
lastExpr = thisExpr
ret << lastExpr
return ret
dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*"').setName("string enclosed in double quotes")
sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*'").setName("string enclosed in single quotes")
quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\x[0-9a-fA-F]+)|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\x[0-9a-fA-F]+)|(?:\\.))*')''').setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy())
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString):
"""Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default="("); can also be a pyparsing expression
- closer - closing character for a nested list (default=")"); can also be a pyparsing expression
- content - expression for items within the nested lists (default=None)
- ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString)
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the ignoreExpr argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an Or or MatchFirst.
The default is quotedString, but if no expressions are to be ignored,
then pass None for this argument.
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,__BASE_STRING__) and isinstance(closer,__BASE_STRING__):
content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret << Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret << Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
return ret
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:"))
commonHTMLEntity = Combine(_L("&") + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";")
_htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),"><& '"))
replaceHTMLEntity = lambda t : t.entity in _htmlEntityMap and _htmlEntityMap[t.entity] or None
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Regex(r"/\*(?:[^*]*\*+)+?/").setName("C style comment")
htmlComment = Regex(r"<!--[\s\S]*?-->")
restOfLine = Regex(r".*").leaveWhitespace()
dblSlashComment = Regex(r"\/\/(\\\n|.)*").setName("// comment")
cppStyleComment = Regex(r"/(?:\*(?:[^*]*\*+)+?/|/[^\n]*(?:\n[^\n]*)*?(?:(?<!\\)|\Z))").setName("C++ style comment")
javaStyleComment = cppStyleComment
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
_noncomma = "".join( [ c for c in printables if c != "," ] )
_commasepitem = Combine(OneOrMore(Word(_noncomma) +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString | _commasepitem, default="") ).setName("commaSeparatedList")
if __name__ == "__main__":
def test( teststring ):
print (teststring,"->",)
try:
tokens = simpleSQL.parseString( teststring )
tokenlist = tokens.asList()
print (tokenlist)
print ("tokens = ", tokens)
print ("tokens.columns =", tokens.columns)
print ("tokens.tables =", tokens.tables)
print (tokens.asXML("SQL",True))
except ParseException,err:
print (err.line)
print (" "*(err.column-1) + "^")
print (err)
print()
selectToken = CaselessLiteral( "select" )
fromToken = CaselessLiteral( "from" )
ident = Word( alphas, alphanums + "_$" )
columnName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
columnNameList = Group( delimitedList( columnName ) )#.setName("columns")
tableName = delimitedList( ident, ".", combine=True ).setParseAction( upcaseTokens )
tableNameList = Group( delimitedList( tableName ) )#.setName("tables")
simpleSQL = ( selectToken + \
( '*' | columnNameList ).setResultsName( "columns" ) + \
fromToken + \
tableNameList.setResultsName( "tables" ) )
test( "SELECT * from XYZZY, ABC" )
test( "select * from SYS.XYZZY" )
test( "Select A from Sys.dual" )
test( "Select AA,BB,CC from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Select A, B, C from Sys.dual" )
test( "Xelect A, B, C from Sys.dual" )
test( "Select A, B, C frox Sys.dual" )
test( "Select" )
test( "Select ^^^ frox Sys.dual" )
test( "Select A, B, C from Sys.dual, Table2 " )
| bsd-3-clause |
macks22/gensim | gensim/test/test_keras_integration.py | 1 | 6627 | import unittest
import os
import numpy as np
from gensim.models import word2vec
try:
from sklearn.datasets import fetch_20newsgroups
except ImportError:
raise unittest.SkipTest("Test requires sklearn to be installed, which is not available")
try:
import keras
from keras.engine import Input
from keras.models import Model
from keras.layers.merge import dot
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Flatten
from keras.layers import Conv1D, MaxPooling1D
except ImportError:
raise unittest.SkipTest("Test requires Keras to be installed, which is not available")
sentences = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
class TestKerasWord2VecWrapper(unittest.TestCase):
def setUp(self):
self.model_cos_sim = word2vec.Word2Vec(sentences, size=100, min_count=1, hs=1)
# self.model_twenty_ng = word2vec.Word2Vec(word2vec.LineSentence(datapath('20_newsgroup_keras_w2v_data.txt')), min_count=1)
self.model_twenty_ng = word2vec.Word2Vec(min_count=1)
def testWord2VecTraining(self):
"""
Test word2vec training.
"""
model = self.model_cos_sim
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 100))
self.assertTrue(model.syn1.shape == (len(model.wv.vocab), 100))
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
def testEmbeddingLayerCosineSim(self):
"""
Test Keras 'Embedding' layer returned by 'get_embedding_layer' function for a simple word similarity task.
"""
keras_w2v_model = self.model_cos_sim
keras_w2v_model_wv = keras_w2v_model.wv
embedding_layer = keras_w2v_model_wv.get_embedding_layer()
input_a = Input(shape=(1,), dtype='int32', name='input_a')
input_b = Input(shape=(1,), dtype='int32', name='input_b')
embedding_a = embedding_layer(input_a)
embedding_b = embedding_layer(input_b)
similarity = dot([embedding_a, embedding_b], axes=2, normalize=True)
model = Model(input=[input_a, input_b], output=similarity)
model.compile(optimizer='sgd', loss='mse')
word_a = 'graph'
word_b = 'trees'
output = model.predict([
np.asarray([keras_w2v_model.wv.vocab[word_a].index]),
np.asarray([keras_w2v_model.wv.vocab[word_b].index])
])
# output is the cosine distance between the two words (as a similarity measure)
self.assertTrue(type(output[0][0][0]) == np.float32) # verify that a float is returned
def testEmbeddingLayer20NewsGroup(self):
"""
Test Keras 'Embedding' layer returned by 'get_embedding_layer' function for a smaller version of the 20NewsGroup classification problem.
"""
MAX_SEQUENCE_LENGTH = 1000
# Prepare text samples and their labels
# Processing text dataset
texts = [] # list of text samples
texts_w2v = [] # used to train the word embeddings
labels = [] # list of label ids
data = fetch_20newsgroups(subset='train', categories=['alt.atheism', 'comp.graphics', 'sci.space'])
for index in range(len(data)):
label_id = data.target[index]
file_data = data.data[index]
i = file_data.find('\n\n') # skip header
if i > 0:
file_data = file_data[i:]
try:
curr_str = str(file_data)
sentence_list = curr_str.split('\n')
for sentence in sentence_list:
sentence = (sentence.strip()).lower()
texts.append(sentence)
texts_w2v.append(sentence.split(' '))
labels.append(label_id)
except Exception:
pass
# Vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
# word_index = tokenizer.word_index
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
x_train = data
y_train = labels
# prepare the embedding layer using the wrapper
keras_w2v = self.model_twenty_ng
keras_w2v.build_vocab(texts_w2v)
keras_w2v.train(texts, total_examples=keras_w2v.corpus_count, epochs=keras_w2v.iter)
keras_w2v_wv = keras_w2v.wv
embedding_layer = keras_w2v_wv.get_embedding_layer()
# create a 1D convnet to solve our classification task
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x) # global max pooling
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(y_train.shape[1], activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
fit_ret_val = model.fit(x_train, y_train, epochs=1)
# verify the type of the object returned after training
self.assertTrue(type(fit_ret_val) == keras.callbacks.History) # value returned is a `History` instance. Its `history` attribute contains all information collected during training.
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
hfp/tensorflow-xsmm | tensorflow/contrib/ignite/python/ops/igfs_ops.py | 15 | 1920 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ignite File System for checkpointing and communication with TensorBoard.
Apache Ignite is a memory-centric distributed database, caching, and
processing platform for transactional, analytical, and streaming workloads,
delivering in-memory speeds at petabyte scale. In addition to database
functionality Apache Ignite provides a distributed file system called
IGFS (https://ignite.apache.org/features/igfs.html). IGFS delivers a similar
functionality to Hadoop HDFS, but only in-memory. In fact, in addition to
its own APIs, IGFS implements Hadoop FileSystem API and can be transparently
plugged into Hadoop or Spark deployments. This contrib package contains an
integration between IGFS and TensorFlow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.contrib.ignite.python.ops import ignite_op_loader # pylint: disable=unused-import
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
file_system_library = os.path.join(resource_loader.get_data_files_path(),
"../../_ignite_ops.so")
load_library.load_file_system_library(file_system_library)
| apache-2.0 |
Innovahn/odoo.old | addons/mail/mail_group.py | 247 | 12877 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import openerp
import openerp.tools as tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.safe_eval import safe_eval as eval
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
class mail_group(osv.Model):
""" A mail_group is a collection of users sharing messages in a discussion
group. The group mechanics are based on the followers. """
_description = 'Discussion group'
_name = 'mail.group'
_mail_flat_thread = False
_inherit = ['mail.thread']
_inherits = {'mail.alias': 'alias_id'}
def _get_image(self, cr, uid, ids, name, args, context=None):
result = {}
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'description': fields.text('Description'),
'menu_id': fields.many2one('ir.ui.menu', string='Related Menu', required=True, ondelete="cascade"),
'public': fields.selection([('public', 'Public'), ('private', 'Private'), ('groups', 'Selected Group Only')], 'Privacy', required=True,
help='This group is visible by non members. \
Invisible groups can add members through the invite button.'),
'group_public_id': fields.many2one('res.groups', string='Authorized Group'),
'group_ids': fields.many2many('res.groups', rel='mail_group_res_group_rel',
id1='mail_group_id', id2='groups_id', string='Auto Subscription',
help="Members of those groups will automatically added as followers. "\
"Note that they will be able to manage their subscription manually "\
"if necessary."),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Photo",
help="This field holds the image used as photo for the group, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized photo of the group. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the group. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="The email address associated with this group. New emails received will automatically "
"create new topics."),
}
def _get_default_employee_group(self, cr, uid, context=None):
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')
return ref and ref[1] or False
def _get_default_image(self, cr, uid, context=None):
image_path = openerp.modules.get_module_resource('mail', 'static/src/img', 'groupdefault.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
_defaults = {
'public': 'groups',
'group_public_id': _get_default_employee_group,
'image': _get_default_image,
}
def _generate_header_description(self, cr, uid, group, context=None):
header = ''
if group.description:
header = '%s' % group.description
if group.alias_id and group.alias_name and group.alias_domain:
if header:
header = '%s<br/>' % header
return '%sGroup email gateway: %s@%s' % (header, group.alias_name, group.alias_domain)
return header
def _subscribe_users(self, cr, uid, ids, context=None):
for mail_group in self.browse(cr, uid, ids, context=context):
partner_ids = []
for group in mail_group.group_ids:
partner_ids += [user.partner_id.id for user in group.users]
self.message_subscribe(cr, uid, ids, partner_ids, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# get parent menu
menu_parent = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'mail_group_root')
menu_parent = menu_parent and menu_parent[1] or False
# Create menu id
mobj = self.pool.get('ir.ui.menu')
menu_id = mobj.create(cr, SUPERUSER_ID, {'name': vals['name'], 'parent_id': menu_parent}, context=context)
vals['menu_id'] = menu_id
# Create group and alias
create_context = dict(context, alias_model_name=self._name, alias_parent_model_name=self._name, mail_create_nolog=True)
mail_group_id = super(mail_group, self).create(cr, uid, vals, context=create_context)
group = self.browse(cr, uid, mail_group_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [group.alias_id.id], {"alias_force_thread_id": mail_group_id, 'alias_parent_thread_id': mail_group_id}, context)
group = self.browse(cr, uid, mail_group_id, context=context)
# Create client action for this group and link the menu to it
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'action_mail_group_feeds')
if ref:
search_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'view_message_search')
params = {
'search_view_id': search_ref and search_ref[1] or False,
'domain': [
('model', '=', 'mail.group'),
('res_id', '=', mail_group_id),
],
'context': {
'default_model': 'mail.group',
'default_res_id': mail_group_id,
},
'res_model': 'mail.message',
'thread_level': 1,
'header_description': self._generate_header_description(cr, uid, group, context=context),
'view_mailbox': True,
'compose_placeholder': 'Send a message to the group',
}
cobj = self.pool.get('ir.actions.client')
newref = cobj.copy(cr, SUPERUSER_ID, ref[1], default={'params': str(params), 'name': vals['name']}, context=context)
mobj.write(cr, SUPERUSER_ID, menu_id, {'action': 'ir.actions.client,' + str(newref), 'mail_group_id': mail_group_id}, context=context)
if vals.get('group_ids'):
self._subscribe_users(cr, uid, [mail_group_id], context=context)
return mail_group_id
def unlink(self, cr, uid, ids, context=None):
groups = self.browse(cr, uid, ids, context=context)
alias_ids = [group.alias_id.id for group in groups if group.alias_id]
menu_ids = [group.menu_id.id for group in groups if group.menu_id]
# Delete mail_group
try:
all_emp_group = self.pool['ir.model.data'].get_object_reference(cr, uid, 'mail', 'group_all_employees')[1]
except ValueError:
all_emp_group = None
if all_emp_group and all_emp_group in ids:
raise osv.except_osv(_('Warning!'), _('You cannot delete those groups, as the Whole Company group is required by other modules.'))
res = super(mail_group, self).unlink(cr, uid, ids, context=context)
# Cascade-delete mail aliases as well, as they should not exist without the mail group.
self.pool.get('mail.alias').unlink(cr, SUPERUSER_ID, alias_ids, context=context)
# Cascade-delete menu entries as well
self.pool.get('ir.ui.menu').unlink(cr, SUPERUSER_ID, menu_ids, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
result = super(mail_group, self).write(cr, uid, ids, vals, context=context)
if vals.get('group_ids'):
self._subscribe_users(cr, uid, ids, context=context)
# if description, name or alias is changed: update client action
if vals.get('description') or vals.get('name') or vals.get('alias_id') or vals.get('alias_name'):
cobj = self.pool.get('ir.actions.client')
for action in [group.menu_id.action for group in self.browse(cr, uid, ids, context=context)]:
new_params = action.params
new_params['header_description'] = self._generate_header_description(cr, uid, group, context=context)
cobj.write(cr, SUPERUSER_ID, [action.id], {'params': str(new_params)}, context=context)
# if name is changed: update menu
if vals.get('name'):
mobj = self.pool.get('ir.ui.menu')
mobj.write(cr, SUPERUSER_ID,
[group.menu_id.id for group in self.browse(cr, uid, ids, context=context)],
{'name': vals.get('name')}, context=context)
return result
def action_follow(self, cr, uid, ids, context=None):
""" Wrapper because message_subscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_subscribe_users(cr, uid, ids, context=context)
def action_unfollow(self, cr, uid, ids, context=None):
""" Wrapper because message_unsubscribe_users take a user_ids=None
that receive the context without the wrapper. """
return self.message_unsubscribe_users(cr, uid, ids, context=context)
def get_suggested_thread(self, cr, uid, removed_suggested_threads=None, context=None):
"""Show the suggestion of groups if display_groups_suggestions if the
user perference allows it."""
user = self.pool.get('res.users').browse(cr, uid, uid, context)
if not user.display_groups_suggestions:
return []
else:
return super(mail_group, self).get_suggested_thread(cr, uid, removed_suggested_threads, context)
def message_get_email_values(self, cr, uid, id, notif_mail=None, context=None):
res = super(mail_group, self).message_get_email_values(cr, uid, id, notif_mail=notif_mail, context=context)
group = self.browse(cr, uid, id, context=context)
headers = {}
if res.get('headers'):
try:
headers.update(eval(res['headers']))
except Exception:
pass
headers['Precedence'] = 'list'
# avoid out-of-office replies from MS Exchange
# http://blogs.technet.com/b/exchange/archive/2006/10/06/3395024.aspx
headers['X-Auto-Response-Suppress'] = 'OOF'
if group.alias_domain and group.alias_name:
headers['List-Id'] = '%s.%s' % (group.alias_name, group.alias_domain)
headers['List-Post'] = '<mailto:%s@%s>' % (group.alias_name, group.alias_domain)
# Avoid users thinking it was a personal message
# X-Forge-To: will replace To: after SMTP envelope is determined by ir.mail.server
list_to = '"%s" <%s@%s>' % (group.name, group.alias_name, group.alias_domain)
headers['X-Forge-To'] = list_to
res['headers'] = repr(headers)
return res
| agpl-3.0 |
paran0ids0ul/infernal-twin | build/reportlab/src/reportlab/rl_config.py | 31 | 4186 | '''module that aggregates config information'''
__all__=('_reset','register_reset')
def _defaults_init():
'''
create & return defaults for all reportlab settings from
reportlab.rl_settings.py
reportlab.local_rl_settings.py
reportlab_settings.py or ~/.reportlab_settings
latter values override earlier
'''
from reportlab.lib.utils import rl_exec
import os
_DEFAULTS={}
rl_exec('from reportlab.rl_settings import *',_DEFAULTS)
_overrides=_DEFAULTS.copy()
try:
rl_exec('from reportlab.local_rl_settings import *',_overrides)
_DEFAULTS.update(_overrides)
except ImportError:
pass
_overrides=_DEFAULTS.copy()
try:
rl_exec('from reportlab_settings import *',_overrides)
_DEFAULTS.update(_overrides)
except ImportError:
_overrides=_DEFAULTS.copy()
try:
try:
fn = os.path.expanduser(os.path.join('~','.reportlab_settings')) #appengine fails with KeyError/ImportError (dev/live)
except (KeyError, ImportError):
fn = None
if fn:
with open(fn,'rb') as f:
rl_exec(f.read(),_overrides)
_DEFAULTS.update(_overrides)
except:
pass
return _DEFAULTS
_DEFAULTS=_defaults_init()
_SAVED = {}
sys_version=None
#this is used to set the options from
def _setOpt(name, value, conv=None):
'''set a module level value from environ/default'''
from os import environ
ename = 'RL_'+name
if ename in environ:
value = environ[ename]
if conv: value = conv(value)
globals()[name] = value
def _startUp():
'''This function allows easy resetting to the global defaults
If the environment contains 'RL_xxx' then we use the value
else we use the given default'''
import os, sys
global sys_version, _unset_
sys_version = sys.version.split()[0] #strip off the other garbage
from reportlab.lib import pagesizes
from reportlab.lib.utils import rl_isdir
if _SAVED=={}:
_unset_ = getattr(sys,'_rl_config__unset_',None)
if _unset_ is None:
class _unset_: pass
sys._rl_config__unset_ = _unset_ = _unset_()
global __all__
A = list(__all__)
for k,v in _DEFAULTS.items():
_SAVED[k] = globals()[k] = v
if k not in __all__:
A.append(k)
__all__ = tuple(A)
#places to search for Type 1 Font files
import reportlab
D = {'REPORTLAB_DIR': os.path.abspath(os.path.dirname(reportlab.__file__)),
'CWD': os.getcwd(),
'disk': os.getcwd().split(':')[0],
'sys_version': sys_version,
'XDG_DATA_HOME': os.environ.get('XDG_DATA_HOME','~/.local/share'),
}
for k in _SAVED:
if k.endswith('SearchPath'):
P=[]
for p in _SAVED[k]:
d = (p % D).replace('/',os.sep)
if '~' in d:
try:
d = os.path.expanduser(d) #appengine fails with KeyError/ImportError (dev/live)
except (KeyError, ImportError):
continue
if rl_isdir(d): P.append(d)
_setOpt(k,os.pathsep.join(P),lambda x:x.split(os.pathsep))
globals()[k] = list(filter(rl_isdir,globals()[k]))
else:
v = _SAVED[k]
if isinstance(v,(int,float)): conv = type(v)
elif k=='defaultPageSize': conv = lambda v,M=pagesizes: getattr(M,v)
else: conv = None
_setOpt(k,v,conv)
_registered_resets=[]
def register_reset(func):
'''register a function to be called by rl_config._reset'''
_registered_resets[:] = [x for x in _registered_resets if x()]
L = [x for x in _registered_resets if x() is func]
if L: return
from weakref import ref
_registered_resets.append(ref(func))
def _reset():
'''attempt to reset reportlab and friends'''
_startUp() #our reset
for f in _registered_resets[:]:
c = f()
if c:
c()
else:
_registered_resets.remove(f)
_startUp()
| gpl-3.0 |
mhils/mitmproxy | mitmproxy/proxy/config.py | 1 | 3378 | import os
import re
import typing
from OpenSSL import crypto
from mitmproxy import certs
from mitmproxy import exceptions
from mitmproxy import options as moptions
from mitmproxy.net import server_spec
class HostMatcher:
def __init__(self, handle, patterns=tuple()):
self.handle = handle
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
if not address:
return False
host = "%s:%s" % address
if self.handle in ["ignore", "tcp"]:
return any(rex.search(host) for rex in self.regexes)
else: # self.handle == "allow"
return not any(rex.search(host) for rex in self.regexes)
def __bool__(self):
return bool(self.patterns)
class ProxyConfig:
def __init__(self, options: moptions.Options) -> None:
self.options = options
self.certstore: certs.CertStore
self.check_filter: typing.Optional[HostMatcher] = None
self.check_tcp: typing.Optional[HostMatcher] = None
self.upstream_server: typing.Optional[server_spec.ServerSpec] = None
self.configure(options, set(options.keys()))
options.changed.connect(self.configure)
def configure(self, options: moptions.Options, updated: typing.Any) -> None:
if options.allow_hosts and options.ignore_hosts:
raise exceptions.OptionsError("--ignore-hosts and --allow-hosts are mutually "
"exclusive; please choose one.")
if options.ignore_hosts:
self.check_filter = HostMatcher("ignore", options.ignore_hosts)
elif options.allow_hosts:
self.check_filter = HostMatcher("allow", options.allow_hosts)
else:
self.check_filter = HostMatcher(False)
if "tcp_hosts" in updated:
self.check_tcp = HostMatcher("tcp", options.tcp_hosts)
certstore_path = os.path.expanduser(options.confdir)
if not os.path.exists(os.path.dirname(certstore_path)):
raise exceptions.OptionsError(
"Certificate Authority parent directory does not exist: %s" %
os.path.dirname(certstore_path)
)
key_size = options.key_size
passphrase = options.cert_passphrase.encode("utf-8") if options.cert_passphrase else None
self.certstore = certs.CertStore.from_store(
certstore_path,
moptions.CONF_BASENAME,
key_size,
passphrase
)
for c in options.certs:
parts = c.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
cert = os.path.expanduser(parts[1])
if not os.path.exists(cert):
raise exceptions.OptionsError(
"Certificate file does not exist: %s" % cert
)
try:
self.certstore.add_cert_file(parts[0], cert, passphrase)
except crypto.Error:
raise exceptions.OptionsError(
"Invalid certificate format: %s" % cert
)
m = options.mode
if m.startswith("upstream:") or m.startswith("reverse:"):
_, spec = server_spec.parse_with_mode(options.mode)
self.upstream_server = spec
| mit |
lucafavatella/intellij-community | python/testData/MockSdk3.4/python_stubs/sys.py | 24 | 18084 | # encoding: utf-8
# module sys
# from (built-in)
# by generator 1.138
"""
This module provides access to some objects used or maintained by the
interpreter and to functions that interact strongly with the interpreter.
Dynamic objects:
argv -- command line arguments; argv[0] is the script pathname if known
path -- module search path; path[0] is the script directory, else ''
modules -- dictionary of loaded modules
displayhook -- called to show results in an interactive session
excepthook -- called to handle any uncaught exception other than SystemExit
To customize printing in an interactive session or to install a custom
top-level exception handler, assign other functions to replace these.
stdin -- standard input file object; used by input()
stdout -- standard output file object; used by print()
stderr -- standard error object; used for error messages
By assigning other file objects (or objects that behave like files)
to these, it is possible to redirect all of the interpreter's I/O.
last_type -- type of last uncaught exception
last_value -- value of last uncaught exception
last_traceback -- traceback of last uncaught exception
These three are only available in an interactive session after a
traceback has been printed.
Static objects:
builtin_module_names -- tuple of module names built into this interpreter
copyright -- copyright notice pertaining to this interpreter
exec_prefix -- prefix used to find the machine-specific Python library
executable -- absolute path of the executable binary of the Python interpreter
float_info -- a struct sequence with information about the float implementation.
float_repr_style -- string indicating the style of repr() output for floats
hash_info -- a struct sequence with information about the hash algorithm.
hexversion -- version information encoded as a single integer
implementation -- Python implementation information.
int_info -- a struct sequence with information about the int implementation.
maxsize -- the largest supported length of containers.
maxunicode -- the value of the largest Unicode codepoint
platform -- platform identifier
prefix -- prefix used to find the Python library
thread_info -- a struct sequence with information about the thread implementation.
version -- the version of this interpreter as a string
version_info -- version information as a named tuple
__stdin__ -- the original stdin; don't touch!
__stdout__ -- the original stdout; don't touch!
__stderr__ -- the original stderr; don't touch!
__displayhook__ -- the original displayhook; don't touch!
__excepthook__ -- the original excepthook; don't touch!
Functions:
displayhook() -- print an object to the screen, and save it in builtins._
excepthook() -- print an exception and its traceback to sys.stderr
exc_info() -- return thread-safe information about the current exception
exit() -- exit the interpreter by raising SystemExit
getdlopenflags() -- returns flags to be used for dlopen() calls
getprofile() -- get the global profiling function
getrefcount() -- return the reference count for an object (plus one :-)
getrecursionlimit() -- return the max recursion depth for the interpreter
getsizeof() -- return the size of an object in bytes
gettrace() -- get the global debug tracing function
setcheckinterval() -- control how often the interpreter checks for events
setdlopenflags() -- set the flags to be used for dlopen() calls
setprofile() -- set the global profiling function
setrecursionlimit() -- set the max recursion depth for the interpreter
settrace() -- set the global debug tracing function
"""
# no imports
# Variables with simple values
abiflags = 'm'
api_version = 1013
base_exec_prefix = '/Library/Frameworks/Python.framework/Versions/3.4'
base_prefix = '/Library/Frameworks/Python.framework/Versions/3.4'
byteorder = 'little'
copyright = 'Copyright (c) 2001-2014 Python Software Foundation.\nAll Rights Reserved.\n\nCopyright (c) 2000 BeOpen.com.\nAll Rights Reserved.\n\nCopyright (c) 1995-2001 Corporation for National Research Initiatives.\nAll Rights Reserved.\n\nCopyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.\nAll Rights Reserved.'
dont_write_bytecode = True
executable = '/Users/vlan/.virtualenvs/obraz-py3.4/bin/python'
exec_prefix = '/Users/vlan/.virtualenvs/obraz-py3.4'
float_repr_style = 'short'
hexversion = 50594544
maxsize = 9223372036854775807
maxunicode = 1114111
platform = 'darwin'
prefix = '/Users/vlan/.virtualenvs/obraz-py3.4'
version = '3.4.2 (v3.4.2:ab2c023a9432, Oct 5 2014, 20:42:22) \n[GCC 4.2.1 (Apple Inc. build 5666) (dot 3)]'
_home = '/Library/Frameworks/Python.framework/Versions/3.4/bin'
__egginsert = 0
__plen = 5
# functions
def callstats(): # real signature unknown; restored from __doc__
"""
callstats() -> tuple of integers
Return a tuple of function call statistics, if CALL_PROFILE was defined
when Python was built. Otherwise, return None.
When enabled, this function returns detailed, implementation-specific
details about the number of function calls executed. The return value is
a 11-tuple where the entries in the tuple are counts of:
0. all function calls
1. calls to PyFunction_Type objects
2. PyFunction calls that do not create an argument tuple
3. PyFunction calls that do not create an argument tuple
and bypass PyEval_EvalCodeEx()
4. PyMethod calls
5. PyMethod calls on bound methods
6. PyType calls
7. PyCFunction calls
8. generator calls
9. All other calls
10. Number of stack pops performed by call_function()
"""
return ()
def call_tracing(func, args): # real signature unknown; restored from __doc__
"""
call_tracing(func, args) -> object
Call func(*args), while tracing is enabled. The tracing state is
saved, and restored afterwards. This is intended to be called from
a debugger from a checkpoint, to recursively debug some other code.
"""
return object()
def displayhook(p_object): # real signature unknown; restored from __doc__
"""
displayhook(object) -> None
Print an object to sys.stdout and also save it in builtins._
"""
pass
def excepthook(exctype, value, traceback): # real signature unknown; restored from __doc__
"""
excepthook(exctype, value, traceback) -> None
Handle an exception by displaying it with a traceback on sys.stderr.
"""
pass
def exc_info(): # real signature unknown; restored from __doc__
"""
exc_info() -> (type, value, traceback)
Return information about the most recent exception caught by an except
clause in the current stack frame or in an older stack frame.
"""
pass
def exit(status=None): # real signature unknown; restored from __doc__
"""
exit([status])
Exit the interpreter by raising SystemExit(status).
If the status is omitted or None, it defaults to zero (i.e., success).
If the status is an integer, it will be used as the system exit status.
If it is another kind of object, it will be printed and the system
exit status will be one (i.e., failure).
"""
pass
def getallocatedblocks(): # real signature unknown; restored from __doc__
"""
getallocatedblocks() -> integer
Return the number of memory blocks currently allocated, regardless of their
size.
"""
return 0
def getcheckinterval(): # real signature unknown; restored from __doc__
""" getcheckinterval() -> current check interval; see setcheckinterval(). """
pass
def getdefaultencoding(): # real signature unknown; restored from __doc__
"""
getdefaultencoding() -> string
Return the current default string encoding used by the Unicode
implementation.
"""
return ""
def getdlopenflags(): # real signature unknown; restored from __doc__
"""
getdlopenflags() -> int
Return the current value of the flags that are used for dlopen calls.
The flag constants are defined in the os module.
"""
return 0
def getfilesystemencoding(): # real signature unknown; restored from __doc__
"""
getfilesystemencoding() -> string
Return the encoding used to convert Unicode filenames in
operating system filenames.
"""
return ""
def getprofile(): # real signature unknown; restored from __doc__
"""
getprofile()
Return the profiling function set with sys.setprofile.
See the profiler chapter in the library manual.
"""
pass
def getrecursionlimit(): # real signature unknown; restored from __doc__
"""
getrecursionlimit()
Return the current value of the recursion limit, the maximum depth
of the Python interpreter stack. This limit prevents infinite
recursion from causing an overflow of the C stack and crashing Python.
"""
pass
def getrefcount(p_object): # real signature unknown; restored from __doc__
"""
getrefcount(object) -> integer
Return the reference count of object. The count returned is generally
one higher than you might expect, because it includes the (temporary)
reference as an argument to getrefcount().
"""
return 0
def getsizeof(p_object, default): # real signature unknown; restored from __doc__
"""
getsizeof(object, default) -> int
Return the size of object in bytes.
"""
return 0
def getswitchinterval(): # real signature unknown; restored from __doc__
""" getswitchinterval() -> current thread switch interval; see setswitchinterval(). """
pass
def gettrace(): # real signature unknown; restored from __doc__
"""
gettrace()
Return the global debug tracing function set with sys.settrace.
See the debugger chapter in the library manual.
"""
pass
def intern(string): # real signature unknown; restored from __doc__
"""
intern(string) -> string
``Intern'' the given string. This enters the string in the (global)
table of interned strings whose purpose is to speed up dictionary lookups.
Return the string itself or the previously interned string object with the
same value.
"""
return ""
def setcheckinterval(n): # real signature unknown; restored from __doc__
"""
setcheckinterval(n)
Tell the Python interpreter to check for asynchronous events every
n instructions. This also affects how often thread switches occur.
"""
pass
def setdlopenflags(n): # real signature unknown; restored from __doc__
"""
setdlopenflags(n) -> None
Set the flags used by the interpreter for dlopen calls, such as when the
interpreter loads extension modules. Among other things, this will enable
a lazy resolving of symbols when importing a module, if called as
sys.setdlopenflags(0). To share symbols across extension modules, call as
sys.setdlopenflags(os.RTLD_GLOBAL). Symbolic names for the flag modules
can be found in the os module (RTLD_xxx constants, e.g. os.RTLD_LAZY).
"""
pass
def setprofile(function): # real signature unknown; restored from __doc__
"""
setprofile(function)
Set the profiling function. It will be called on each function call
and return. See the profiler chapter in the library manual.
"""
pass
def setrecursionlimit(n): # real signature unknown; restored from __doc__
"""
setrecursionlimit(n)
Set the maximum depth of the Python interpreter stack to n. This
limit prevents infinite recursion from causing an overflow of the C
stack and crashing Python. The highest possible limit is platform-
dependent.
"""
pass
def setswitchinterval(n): # real signature unknown; restored from __doc__
"""
setswitchinterval(n)
Set the ideal thread switching delay inside the Python interpreter
The actual frequency of switching threads can be lower if the
interpreter executes long sequences of uninterruptible code
(this is implementation-specific and workload-dependent).
The parameter must represent the desired switching delay in seconds
A typical value is 0.005 (5 milliseconds).
"""
pass
def settrace(function): # real signature unknown; restored from __doc__
"""
settrace(function)
Set the global debug tracing function. It will be called on each
function call. See the debugger chapter in the library manual.
"""
pass
def _clear_type_cache(): # real signature unknown; restored from __doc__
"""
_clear_type_cache() -> None
Clear the internal type lookup cache.
"""
pass
def _current_frames(): # real signature unknown; restored from __doc__
"""
_current_frames() -> dictionary
Return a dictionary mapping each current thread T's thread id to T's
current stack frame.
This function should be used for specialized purposes only.
"""
return {}
def _debugmallocstats(): # real signature unknown; restored from __doc__
"""
_debugmallocstats()
Print summary info to stderr about the state of
pymalloc's structures.
In Py_DEBUG mode, also perform some expensive internal consistency
checks.
"""
pass
def _getframe(depth=None): # real signature unknown; restored from __doc__
"""
_getframe([depth]) -> frameobject
Return a frame object from the call stack. If optional integer depth is
given, return the frame object that many calls below the top of the stack.
If that is deeper than the call stack, ValueError is raised. The default
for depth is zero, returning the frame at the top of the call stack.
This function should be used for internal and specialized
purposes only.
"""
pass
def __displayhook__(*args, **kwargs): # real signature unknown
"""
displayhook(object) -> None
Print an object to sys.stdout and also save it in builtins._
"""
pass
def __excepthook__(*args, **kwargs): # real signature unknown
"""
excepthook(exctype, value, traceback) -> None
Handle an exception by displaying it with a traceback on sys.stderr.
"""
pass
def __interactivehook__(): # reliably restored by inspect
# no doc
pass
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
""" Load a built-in module. """
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is ''
# variables with complex values
argv = [] # real value of type <class 'list'> skipped
builtin_module_names = () # real value of type <class 'tuple'> skipped
flags = (
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
)
float_info = (
1.7976931348623157e+308,
1024,
308,
2.2250738585072014e-308,
-1021,
-307,
15,
53,
2.220446049250313e-16,
2,
1,
)
hash_info = (
64,
2305843009213693951,
314159,
0,
1000003,
'siphash24',
64,
128,
0,
)
implementation = None # (!) real value is ''
int_info = (
30,
4,
)
meta_path = [
__loader__,
None, # (!) real value is ''
None, # (!) real value is ''
]
modules = {} # real value of type <class 'dict'> skipped
path = [
'/Users/vlan/src/idea/out/classes/production/python-helpers',
'/Library/Frameworks/Python.framework/Versions/3.4/lib/python34.zip',
'/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4',
'/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/plat-darwin',
'/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/lib-dynload',
'/Users/vlan/.virtualenvs/obraz-py3.4/lib/python3.4/site-packages',
]
path_hooks = [
None, # (!) real value is ''
None, # (!) real value is ''
]
path_importer_cache = {} # real value of type <class 'dict'> skipped
stderr = None # (!) forward: __stderr__, real value is ''
stdin = None # (!) forward: __stdin__, real value is ''
stdout = None # (!) forward: __stdout__, real value is ''
thread_info = (
'pthread',
'mutex+cond',
None,
)
version_info = (
3,
4,
2,
'final',
0,
)
warnoptions = []
_mercurial = (
'CPython',
'v3.4.2',
'ab2c023a9432',
)
_xoptions = {}
__spec__ = None # (!) real value is ''
__stderr__ = None # (!) real value is ''
__stdin__ = None # (!) real value is ''
__stdout__ = None # (!) real value is ''
# intermittent names
exc_value = Exception()
exc_traceback=None
| apache-2.0 |
nikhilprathapani/python-for-android | python3-alpha/python3-src/Lib/test/test_capi.py | 47 | 7438 | # Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from __future__ import with_statement
import os
import random
import subprocess
import sys
import time
import unittest
from test import support
try:
import threading
except ImportError:
threading = None
import _testcapi
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_no_FatalError_infinite_loop(self):
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertEqual(err.rstrip(),
b'Fatal Python error:'
b' PyThreadState_Get: no current thread')
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
for i in range(context.nThreads):
t = threading.Thread(target=self.pendingcalls_thread, args = (context,))
t.start()
threads.append(t)
self.pendingcalls_wait(context.l, n, context)
for t in threads:
t.join()
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
# Bug #6012
class Test6012(unittest.TestCase):
def test(self):
self.assertEqual(_testcapi.argparsing("Hello", "World"), 1)
class EmbeddingTest(unittest.TestCase):
@unittest.skipIf(
sys.platform.startswith('win'),
"test doesn't work under Windows")
def test_subinterps(self):
# XXX only tested under Unix checkouts
basepath = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
oldcwd = os.getcwd()
# This is needed otherwise we get a fatal error:
# "Py_Initialize: Unable to get the locale encoding
# LookupError: no codec search functions registered: can't find encoding"
os.chdir(basepath)
try:
exe = os.path.join(basepath, "Modules", "_testembed")
if not os.path.exists(exe):
self.skipTest("%r doesn't exist" % exe)
p = subprocess.Popen([exe],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(p.returncode, 0,
"bad returncode %d, stderr is %r" %
(p.returncode, err))
if support.verbose:
print()
print(out.decode('latin1'))
print(err.decode('latin1'))
finally:
os.chdir(oldcwd)
def test_main():
support.run_unittest(CAPITest, TestPendingCalls, Test6012, EmbeddingTest)
for name in dir(_testcapi):
if name.startswith('test_'):
test = getattr(_testcapi, name)
if support.verbose:
print("internal", name)
test()
# some extra thread-state tests driven via _testcapi
def TestThreadState():
if support.verbose:
print("auto-thread-state")
idents = []
def callback():
idents.append(_thread.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
if idents.count(_thread.get_ident()) != 3:
raise support.TestFailed(
"Couldn't find main thread correctly in the list")
if threading:
import _thread
import time
TestThreadState()
t = threading.Thread(target=TestThreadState)
t.start()
t.join()
if __name__ == "__main__":
test_main()
| apache-2.0 |
marcvinyals/cnfgen | cnfformula/cmdline.py | 1 | 29676 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""Components for command line interface
CNFgen has many command line entry points to its functionality, and
some of them expose the same functionality over and over. This module
contains useful common components.
Copyright (C) 2012, 2013, 2014, 2015, 2016 Massimo Lauria <lauria@kth.se>
https://github.com/MassimoLauria/cnfgen.git
"""
from __future__ import print_function
import sys
import argparse
import networkx
import random
from itertools import combinations, product
from .graphs import supported_formats as graph_formats
from .graphs import readGraph,writeGraph
from .graphs import bipartite_random_left_regular,bipartite_random_regular,bipartite_shift
from .graphs import bipartite_sets
from .graphs import dag_complete_binary_tree,dag_pyramid
from .graphs import sample_missing_edges
try: # NetworkX >= 1.10
complete_bipartite_graph = networkx.bipartite.complete_bipartite_graph
bipartite_random_graph = networkx.bipartite.random_graph
bipartite_gnmk_random_graph = networkx.bipartite.gnmk_random_graph
except AttributeError: # Networkx < 1.10
from networkx import complete_bipartite_graph
from networkx import bipartite_random_graph
from networkx import bipartite_gnmk_random_graph
__all__ = [ "register_cnfgen_subcommand","is_cnfgen_subcommand",
"DirectedAcyclicGraphHelper", "SimpleGraphHelper", "BipartiteGraphHelper"]
__cnfgen_subcommand_mark = "_is_cnfgen_subcommand"
def register_cnfgen_subcommand(cls):
"""Register the class as a formula subcommand
CNFgen command line tool invokes subcommands to generate formula
families. This class decorator is used to declare that a class is
indeed the implementation of a formula generator subcommand.
In this way CNFgen setup code will automatically find it and
integrate it into the CNFgen command line interface.
The class argument is tested to check whether it is a suitable
implementation of a CNFgen subcommand.
In particular the class must have four attributes
+ ``name`` the name of the CNF formula
+ ``description`` a short description of the formulas
+ ``setup_command_line`` a method that takes a command line parser
object and populates it with appropriate options.
+ ``build_cnf`` a method that takes the arguments and produce the CNF.
The parser expected by ``setup_command_line(parser)`` in such as the one produced by
``argparse.ArgumentParser``.
The argument for ``build_cnf(args)`` is the dictionary of flags and
options parsed from the command line as produced by ``args=parser.parse_args``
Parameters
----------
class : any
the class to test
Returns
-------
None
Raises
------
AssertionError
when the class is not formula subcommand
"""
assert \
hasattr(cls,'build_cnf') and \
hasattr(cls,'setup_command_line') and \
hasattr(cls,'name') and \
hasattr(cls,'description')
setattr(cls,__cnfgen_subcommand_mark,True)
return cls
def is_cnfgen_subcommand(cls):
"""Test whether the object is a registered CNFgen subcommand
Parameters
----------
class : any
the class to test
Returns
-------
bool
"""
return hasattr(cls,__cnfgen_subcommand_mark)
__cnf_transformation_subcommand_mark = "_is_cnf_transformation_subcommand"
def register_cnf_transformation_subcommand(cls):
"""Register the class as a transformation subcommand
CNFgen command line tool invokes subcommands to apply
transformations to formula families. This class decorator is used
to declare that a class is indeed the implementation of a formula
transformation subcommand. In this way CNFgen setup code will
automatically find it and integrate it into the CNFgen command
line interface.
The class argument is tested to check whether it is a suitable
implementation of a CNFgen subcommand.
In particular the class must have four attributes
+ ``name`` the name of the CNF transformation
+ ``description`` a short description of the transformation
+ ``setup_command_line`` a method that takes a command line parser object and populates it with appropriate options.
+ ``transform_cnf`` a method that takes a CNF, the arguments and produce a new CNF.
The parser expected by ``setup_command_line(parser)`` in such as the one produced by
``argparse.ArgumentParser``.
The arguments for ``transform_cnf(F,args)`` are a CNF, and the dictionary of flags and
options parsed from the command line as produced by ``args=parser.parse_args``
Parameters
----------
class : any
the class to test
Returns
-------
None
Raises
------
AssertionError
when the class is not a transformation subcommand
"""
assert \
hasattr(cls,'transform_cnf') and \
hasattr(cls,'setup_command_line') and \
hasattr(cls,'name') and \
hasattr(cls,'description')
setattr(cls,__cnf_transformation_subcommand_mark,True)
return cls
def is_cnf_transformation_subcommand(cls):
"""Test whether the object is a registered CNFgen transformation
Parameters
----------
class : any
the class to test
Returns
-------
bool
"""
return hasattr(cls,__cnf_transformation_subcommand_mark)
def find_methods_in_package(package,test, sortkey=None):
"""Explore a package for functions and methods that implement a specific test"""
import pkgutil
result = []
if sortkey == None :
sortkey = str
for loader, module_name, _ in pkgutil.walk_packages(package.__path__):
module_name = package.__name__+"."+module_name
if module_name in sys.modules:
module = sys.modules[module_name]
else:
module = loader.find_module(module_name).load_module(module_name)
for objname in dir(module):
obj = getattr(module, objname)
if test(obj):
result.append(obj)
result.sort(key=sortkey)
return result
### Graph readers/generators
def positive_int(string):
"""Type checker for positive integers
"""
value = int(string)
if (value<=0) : raise ValueError('integer is not positive: {}'.format(value))
return value
class GraphHelper(object):
"""Command Line helper for reading graphs
"""
@staticmethod
def setup_command_line(parser):
"""Setup command line options for getting graphs"""
raise NotImplementedError("Graph Input helper must be subclassed")
@staticmethod
def obtain_graph(args):
"""Read/Generate the graph according to the command line options"""
raise NotImplementedError("Graph Input helper must be subclassed")
class DirectedAcyclicGraphHelper(GraphHelper):
@staticmethod
def setup_command_line(parser, suffix="", required=False):
"""Setup command line options for reading a DAG
Parameters
----------
parser : ArgParse parser object
it is populated with options for input graphs
suffix: string, optional
add a suffix to all input options. Useful if you need to input
multiple graphs in the same command line (default: empty)
require : bool, optional
enforce that at least one input specification is required.
If it is not the case the standard input is the default input.
Not a good idea if we read multiple graphs in input.
"""
gr=parser.add_argument_group(title="Input directed acyclic graph (DAG) " + suffix,
description="""
You can either read the input DAG from file according to one of
the formats, or generate it using one of the constructions included.""")
gr=gr.add_mutually_exclusive_group(required=required)
gr.add_argument('--input'+suffix,'-i'+suffix,
type=argparse.FileType('r',0),
metavar="<input>",
default='-',
help="""Read the DAG from <input>. Setting '<input>' to '-' is another way
to read from standard input. (default: -) """)
gr.add_argument('--tree'+suffix,type=positive_int,action='store',metavar="<height>",
help="rooted tree digraph")
gr.add_argument('--pyramid'+suffix,type=positive_int,action='store',metavar="<height>",
help="pyramid digraph")
gr=parser.add_argument_group("I/O options")
gr.add_argument('--savegraph'+suffix,'-sg'+suffix,
type=argparse.FileType('wb',0),
metavar="<graph_file>",
default=None,
help="""Save the DAG to <graph_file>.
Setting '<graph_file>' to '-' is
another way to send the DAG to
standard output. (default: -)
""")
gr.add_argument('--graphformat'+suffix,'-gf'+suffix,
choices=graph_formats()['dag'],
default='autodetect',
help="Format of the DAG file. (default: autodetect)")
@staticmethod
def obtain_graph(args,suffix=""):
"""Produce a DAG from either input or library
"""
if getattr(args,'tree'+suffix) is not None:
assert getattr(args,'tree'+suffix) > 0
D = dag_complete_binary_tree( getattr(args,'tree'+suffix) )
elif getattr(args,'pyramid'+suffix) is not None:
assert getattr(args,'pyramid'+suffix) > 0
D = dag_pyramid( getattr(args,'pyramid'+suffix))
elif getattr(args,'graphformat'+suffix) is not None:
try:
print("INFO: reading directed acyclic graph {} from '{}'".format(suffix,getattr(args,"input"+suffix).name),
file=sys.stderr)
D=readGraph(getattr(args,'input'+suffix),
"dag",
getattr(args,'graphformat'+suffix))
except ValueError as e:
print("ERROR ON '{}'. {}".format(getattr(args,'input'+suffix).name,e),file=sys.stderr)
exit(-1)
else:
raise RuntimeError("Command line does not specify a directed acyclic graph")
# Output the graph is requested
if getattr(args,'savegraph'+suffix) is not None:
writeGraph(D,
getattr(args,'savegraph'+suffix),
"dag",
getattr(args,'graphformat'+suffix))
return D
class SimpleGraphHelper(GraphHelper):
@staticmethod
def setup_command_line(parser,suffix="",required=False):
"""Setup input options for command lines
Parameters
----------
parser : ArgParse parser object
it is populated with options for input graphs
suffix: string, optional
add a suffix to all input options. Useful if you need to input
multiple graphs in the same command line (default: empty)
require : bool, optional
enforce that at least one input specification is required.
If it is not the case the standard input is the default input.
Not a good idea if we read multiple graphs in input.
"""
gr=parser.add_argument_group(title="Input graph "+suffix,
description="""
You can either read the input graph from file according to one of
the formats, or generate it using one of the included constructions.""")
class IntFloat(argparse.Action):
def __call__(self, parser, args, values, option_string = None):
try:
n, p = positive_int(values[0]),float(values[1])
if p>1.0 or p<0: raise ValueError('p must be a float between 0 and 1')
except ValueError as e:
raise argparse.ArgumentError(self,e.message)
setattr(args, self.dest, (n,p))
gr=gr.add_mutually_exclusive_group(required=required)
gr.add_argument('--input'+suffix,'-i'+suffix,
type=argparse.FileType('r',0),
metavar="<input>",
default='-',
help="""Read the graph from <input>.
Setting '<input>' to '-' reads the graph from standard
input. (default: -)
""")
gr.add_argument('--gnp'+suffix,nargs=2,action=IntFloat,metavar=('n','p'),
help="random graph according to G(n,p) model (i.e. independent edges)")
gr.add_argument('--gnm'+suffix,type=positive_int,nargs=2,action='store',metavar=('n','m'),
help="random graph according to G(n,m) model (i.e. m random edges)")
gr.add_argument('--gnd'+suffix,type=positive_int,nargs=2,action='store',metavar=('n','d'),
help="random d-regular graph according to G(n,d) model (i.e. d random edges per vertex)")
gr.add_argument('--grid'+suffix,type=positive_int,nargs='+',action='store',
metavar=('d1','d2'),
help="n-dimensional grid of dimension d1 x d2 x ... ")
gr.add_argument('--torus'+suffix,type=positive_int,nargs='+',action='store',
metavar=('d1','d2'),
help="n-dimensional torus grid of dimensions d1 x d2 x ... x dn")
gr.add_argument('--complete'+suffix,type=positive_int,action='store',metavar="<N>",
help="complete graph on N vertices")
gr.add_argument('--empty'+suffix,type=positive_int,action='store',metavar="<N>",
help="empty graph on N vertices")
gr=parser.add_argument_group("Modifications for input graph "+suffix)
gr.add_argument('--plantclique'+suffix,type=positive_int,action='store',metavar="<k>",
help="choose k vertices at random and add all edges among them")
gr.add_argument('--addedges'+suffix,type=positive_int,action='store',metavar="<k>",
help="add k NEW random edges to the graph (applied last)")
gr.add_argument('--splitedge'+suffix,action='store_true',
help="split an edge of the graph by adding a vertex in the middle")
gr=parser.add_argument_group("I/O options for graph "+suffix)
gr.add_argument('--savegraph'+suffix,'-sg'+suffix,
type=argparse.FileType('wb',0),
metavar="<graph_file>",
default=None,
help="""Save the graph to <graph_file>.
Setting '<graph_file>' to '-' is
another way to send the graph to
standard output. (default: -)
""")
gr.add_argument('--graphformat'+suffix,'-gf'+suffix,
choices=graph_formats()['simple'],
default='autodetect',
help="Format of the graph file. (default: autodetect)")
@staticmethod
def obtain_graph(args,suffix=""):
"""Build a Graph according to command line arguments
Arguments:
- `args`: command line options
"""
if getattr(args,'gnd'+suffix) is not None:
n,d = getattr(args,'gnd'+suffix)
if (n*d)%2 == 1:
raise ValueError("n * d must be even")
G=networkx.random_regular_graph(d,n)
elif getattr(args,'gnp'+suffix) is not None:
n,p = getattr(args,'gnp'+suffix)
G=networkx.gnp_random_graph(n,p)
elif getattr(args,'gnm'+suffix) is not None:
n,m = getattr(args,'gnm'+suffix)
G=networkx.gnm_random_graph(n,m)
elif getattr(args,'grid'+suffix) is not None:
G=networkx.grid_graph(getattr(args,'grid'+suffix))
elif getattr(args,'torus'+suffix) is not None:
G=networkx.grid_graph(getattr(args,'torus'+suffix),periodic=True)
elif getattr(args,'complete'+suffix) is not None:
G=networkx.complete_graph(getattr(args,'complete'+suffix))
elif getattr(args,'empty'+suffix) is not None:
G=networkx.empty_graph(getattr(args,'empty'+suffix))
elif getattr(args,'graphformat'+suffix) is not None:
try:
print("INFO: reading simple graph {} from '{}'".format(suffix,getattr(args,"input"+suffix).name),
file=sys.stderr)
G=readGraph(getattr(args,'input'+suffix),
"simple",
getattr(args,'graphformat'+suffix))
except ValueError as e:
print("ERROR ON '{}'. {}".format(
getattr(args,'input'+suffix).name,e),
file=sys.stderr)
exit(-1)
else:
raise RuntimeError("Command line does not specify a graph")
# Graph modifications
if getattr(args,'plantclique'+suffix) is not None and getattr(args,'plantclique'+suffix)>1:
cliquesize = getattr(args,'plantclique'+suffix)
if cliquesize > G.order() :
raise ValueError("Clique cannot be larger than graph")
clique=random.sample(G.nodes(),cliquesize)
for v,w in combinations(clique,2):
G.add_edge(v,w)
if getattr(args,'addedges'+suffix) is not None and getattr(args,'addedges'+suffix)>0:
k = getattr(args,'addedges'+suffix)
G.add_edges_from(sample_missing_edges(G,k))
if hasattr(G, 'name'):
G.name = "{} with {} new random edges".format(G.name,k)
if getattr(args,'splitedge'+suffix):
(u,v) = next(iter(G.edges()))
G.remove_edge(u,v)
for i in range(G.order()+1):
if (i,i) not in G:
new_node = (i,i)
break
G.add_edge(u,new_node)
G.add_edge(v,new_node)
if hasattr(G, 'name'):
G.name = "{} with a split edge".format(G.name)
# Output the graph is requested
if getattr(args,'savegraph'+suffix) is not None:
writeGraph(G,
getattr(args,'savegraph'+suffix),
'simple',
getattr(args,'graphformat'+suffix))
return G
class BipartiteGraphHelper(GraphHelper):
@staticmethod
def setup_command_line(parser,suffix="",required=False):
"""Setup input options for reading bipartites on command lines
Parameters
----------
parser : ArgParse parser object
it is populated with options for input graphs
suffix: string, optional
add a suffix to all input options. Useful if you need to input
multiple graphs in the same command line (default: empty)
require : bool, optional
enforce that at least one input specification is required.
If it is not the case the standard input is the default input.
Not a good idea if we read multiple graphs in input.
"""
class IntIntFloat(argparse.Action):
def __call__(self, parser, args, values, option_string = None):
try:
l,r,p = positive_int(values[0]),positive_int(values[1]),float(values[2])
if not 0.0 <= p <= 1.0:
raise ValueError('p must be a float between 0 and 1')
except ValueError as e:
raise argparse.ArgumentError(self,e.message)
setattr(args, self.dest, (l,r,p))
class BipartiteRegular(argparse.Action):
def __call__(self, parser, args, values, option_string = None):
try:
l,r,d = positive_int(values[0]),positive_int(values[1]),positive_int(values[2])
if d > r :
raise ValueError('In a regular bipartite graph, left degree d is at most r.')
if (d*l % r) != 0 :
raise ValueError('In a regular bipartite graph, r must divide d*l.')
except ValueError as e:
raise argparse.ArgumentError(self,e.message)
setattr(args, self.dest, (l,r,d))
class BipartiteEdge(argparse.Action):
def __call__(self, parser, args, values, option_string = None):
try:
l,r,m = positive_int(values[0]),positive_int(values[1]),positive_int(values[2])
if m > r*l :
raise ValueError('In a bipartite graph, #edges is at most l*r.')
except ValueError as e:
raise argparse.ArgumentError(self,e.message)
setattr(args, self.dest, (l,r,m))
class BipartiteShift(argparse.Action):
def __call__(self, parser, args, values, option_string = None):
try:
if len(values)<2:
raise ValueError("'bshift' requires two positive int parameters or more.")
N,M,pattern= values[0],values[1],sorted(values[2:])
for i in range(len(pattern)-1):
if pattern[i] == pattern[i+1]:
raise ValueError("no repetitions is allowed in the edge pattern.")
if N<1 or M<1:
raise ValueError("matrix dimensions N and M must be positive.")
if any([ x < 1 or x > M for x in pattern]):
raise ValueError("in v(1),v(2)... we need 1 <= v(i) <= M.")
except ValueError as e:
raise argparse.ArgumentError(self,e.message)
setattr(args, self.dest, (N,M,pattern))
class BipartiteLeft(argparse.Action):
def __call__(self, parser, args, values, option_string = None):
try:
l,r,d = positive_int(values[0]),positive_int(values[1]),positive_int(values[2])
if d > r :
raise ValueError('In a bipartite graph, left degree d is at most r.')
except ValueError as e:
raise argparse.ArgumentError(self,e.message)
setattr(args, self.dest, (l,r,d))
gr=parser.add_argument_group("Bipartite graph structure "+suffix,
description="""
The structure of this CNF formula depends on a bipartite graph, which
can be read from file (in one of the supported format), or generated
using one of the included constructions.""")
gr=gr.add_mutually_exclusive_group(required=False)
gr.add_argument('--input'+suffix,'-i'+suffix,
type=argparse.FileType('r',0),
metavar="<input>",
default='-',
help="""Read the graph from file. Setting '<input>' to '-' is
another way to read from standard input. (default: -)
""")
gr.add_argument('--bp'+suffix,nargs=3,action=IntIntFloat,metavar=('l','r','p'),
help="Random bipartite graph with independent edges")
gr.add_argument('--bm'+suffix,type=positive_int,nargs=3,action=BipartiteEdge,metavar=('l','r','m'),
help="Bipartite graph with m random edges")
gr.add_argument('--bd'+suffix,type=positive_int,nargs=3,action=BipartiteLeft,metavar=('l','r','d'),
help="Bipartite graph with d random edges per left vertex")
gr.add_argument('--bregular'+suffix,nargs=3,action=BipartiteRegular,metavar=('l','r','d'),
help="Bipartite regular graph, with d random edges per left vertex.")
gr.add_argument('--bshift'+suffix,type=positive_int,nargs='*',action=BipartiteShift,metavar=('N'),
help="Args <N> <M> <v1> <v2> ... NxM bipartite. Vertex i connexted to i+v1, i+v2,... (mod M)")
gr.add_argument('--bcomplete'+suffix,type=positive_int,nargs=2,action='store',metavar=('l','r'),
help="Complete bipartite graph")
gr=parser.add_argument_group("Modify the graph structure")
gr.add_argument('--plantbiclique'+suffix,type=positive_int,nargs=2,action='store',metavar=('l','r'),
help="Plant a random (l,r)-bipartite clique")
gr.add_argument('--addedges'+suffix,type=positive_int,action='store',metavar="<k>",
help="Add k NEW random edges to the graph (applied in the end)")
gr=parser.add_argument_group("File I/O options",
description="""
Additional option regarding the input and output of the files
containing the graph structure.
""")
gr.add_argument('--savegraph'+suffix,'-sg'+suffix,
type=argparse.FileType('wb',0),
metavar="<graph_file>",
default=None,
help="""Save the graph to <graph_file>. Setting '<graph_file>' to '-'sends
the graph to standard output. (default: -) """)
gr.add_argument('--graphformat'+suffix,'-gf'+suffix,
choices=graph_formats()['bipartite'],
default='autodetect',
help="Format of the graph file. (default: autodetect)")
@staticmethod
def obtain_graph(args,suffix=""):
"""Build a Bipartite graph according to command line arguments
Arguments:
- `args`: command line options
"""
if getattr(args,"bp"+suffix) is not None:
l,r,p = getattr(args,"bp"+suffix)
G=bipartite_random_graph(l,r,p)
elif getattr(args,"bm"+suffix) is not None:
l,r,m = getattr(args,"bm"+suffix)
G=bipartite_gnmk_random_graph(l,r,m)
elif getattr(args,"bd"+suffix) is not None:
l,r,d = getattr(args,"bd"+suffix)
G=bipartite_random_left_regular(l,r,d)
elif getattr(args,"bregular"+suffix) is not None:
l,r,d = getattr(args,"bregular"+suffix)
G=bipartite_random_regular(l,r,d)
elif getattr(args,"bshift"+suffix) is not None:
N,M,pattern = getattr(args,"bshift"+suffix)
G=bipartite_shift(N,M,pattern)
elif getattr(args,"bcomplete"+suffix) is not None:
l,r = getattr(args,"bcomplete"+suffix)
G=complete_bipartite_graph(l,r)
# Workaround: the bipartite labels are missing in old version of networkx
for i in range(0,l):
G.add_node(i,bipartite=0)
for i in range(l,l+r):
G.add_node(i,bipartite=1)
elif getattr(args,"graphformat"+suffix) is not None:
try:
print("INFO: reading bipartite graph {} from '{}'".format(suffix,getattr(args,"input"+suffix).name),
file=sys.stderr)
G=readGraph(getattr(args,"input"+suffix),
"bipartite",
getattr(args,"graphformat"+suffix))
except ValueError as e:
print("ERROR ON '{}'. {}".format(getattr(args,"input"+suffix).name,e),file=sys.stderr)
exit(-1)
else:
raise RuntimeError("Command line does not specify a bipartite graph")
# Graph modifications
if getattr(args,"plantbiclique"+suffix) is not None:
l,r = getattr(args,"plantbiclique"+suffix)
left,right = bipartite_sets(G)
if l > len(left) or r > len(right) :
raise ValueError("Clique cannot be larger than graph")
left = random.sample(left, l)
right = random.sample(right, r)
for v,w in product(left,right):
G.add_edge(v,w)
if getattr(args,"addedges"+suffix) is not None:
k = getattr(args,"addedges"+suffix)
G.add_edges_from(sample_missing_edges(G,k))
if hasattr(G, 'name'):
G.name = "{} with {} new random edges".format(G.name,k)
# Output the graph is requested
if getattr(args,"savegraph"+suffix) is not None:
writeGraph(G,
getattr(args,"savegraph"+suffix),
'bipartite',
getattr(args,"graphformat"+suffix))
return G
| gpl-3.0 |
BackupGGCode/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_except.py | 203 | 3344 | """Fixer for except statements with named exceptions.
The following cases will be converted:
- "except E, T:" where T is a name:
except E as T:
- "except E, T:" where T is not a name, tuple or list:
except E as t:
T = t
This is done because the target of an "except" clause must be a
name.
- "except E, T:" where T is a tuple or list literal:
except E as t:
T = t.args
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
def find_excepts(nodes):
for i, n in enumerate(nodes):
if n.type == syms.except_clause:
if n.children[0].value == 'except':
yield (n, nodes[i+2])
class FixExcept(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
try_stmt< 'try' ':' (simple_stmt | suite)
cleanup=(except_clause ':' (simple_stmt | suite))+
tail=(['except' ':' (simple_stmt | suite)]
['else' ':' (simple_stmt | suite)]
['finally' ':' (simple_stmt | suite)]) >
"""
def transform(self, node, results):
syms = self.syms
tail = [n.clone() for n in results["tail"]]
try_cleanup = [ch.clone() for ch in results["cleanup"]]
for except_clause, e_suite in find_excepts(try_cleanup):
if len(except_clause.children) == 4:
(E, comma, N) = except_clause.children[1:4]
comma.replace(Name("as", prefix=" "))
if N.type != token.NAME:
# Generate a new N for the except clause
new_N = Name(self.new_name(), prefix=" ")
target = N.clone()
target.prefix = ""
N.replace(new_N)
new_N = new_N.clone()
# Insert "old_N = new_N" as the first statement in
# the except body. This loop skips leading whitespace
# and indents
#TODO(cwinter) suite-cleanup
suite_stmts = e_suite.children
for i, stmt in enumerate(suite_stmts):
if isinstance(stmt, pytree.Node):
break
# The assignment is different if old_N is a tuple or list
# In that case, the assignment is old_N = new_N.args
if is_tuple(N) or is_list(N):
assign = Assign(target, Attr(new_N, Name('args')))
else:
assign = Assign(target, new_N)
#TODO(cwinter) stopgap until children becomes a smart list
for child in reversed(suite_stmts[:i]):
e_suite.insert_child(0, child)
e_suite.insert_child(i, assign)
elif N.prefix == "":
# No space after a comma is legal; no space after "as",
# not so much.
N.prefix = " "
#TODO(cwinter) fix this when children becomes a smart list
children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
return pytree.Node(node.type, children)
| apache-2.0 |
jt6562/XX-Net | python27/1.0/lib/dummy_thread.py | 73 | 4563 | """Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import thread
except ImportError:
import dummy_thread as thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
import traceback as _traceback
class error(Exception):
"""Dummy implementation of thread.error."""
def __init__(self, *args):
self.args = args
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
_traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of thread.get_ident().
Since this module should only be used when threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
| bsd-2-clause |
gengue/django | tests/queries/models.py | 33 | 16621 | """
Various complex queries that have been problematic in the past.
"""
from __future__ import unicode_literals
import threading
from django.db import models
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
class DumbCategory(models.Model):
pass
class ProxyCategory(DumbCategory):
class Meta:
proxy = True
@python_2_unicode_compatible
class NamedCategory(DumbCategory):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True,
related_name='children')
category = models.ForeignKey(NamedCategory, null=True, default=None)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __str__(self):
return self.note
def __init__(self, *args, **kwargs):
super(Note, self).__init__(*args, **kwargs)
# Regression for #13227 -- having an attribute that
# is unpickleable doesn't stop you from cloning queries
# that use objects of that type as an argument.
self.lock = threading.Lock()
@python_2_unicode_compatible
class Annotation(models.Model):
name = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
notes = models.ManyToManyField(Note)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
value = models.IntegerField(null=True)
class Meta:
ordering = ['info']
def __str__(self):
return self.info
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
modified = models.DateTimeField(blank=True, null=True)
tags = models.ManyToManyField(Tag, blank=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num', null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __str__(self):
return '%d: %s' % (self.rank, self.author.name)
@python_2_unicode_compatible
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __str__(self):
return self.title
@python_2_unicode_compatible
class Number(models.Model):
num = models.IntegerField()
def __str__(self):
return six.text_type(self.num)
# Symmetrical m2m field with a normal field using the reverse accessor name
# ("valid").
class Valid(models.Model):
valid = models.CharField(max_length=10)
parent = models.ManyToManyField('self')
class Meta:
ordering = ['valid']
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
# A model and custom default manager combination.
class CustomManager(models.Manager):
def get_queryset(self):
qs = super(CustomManager, self).get_queryset()
return qs.filter(public=True, tag__name='t1')
@python_2_unicode_compatible
class ManagedModel(models.Model):
data = models.CharField(max_length=10)
tag = models.ForeignKey(Tag)
public = models.BooleanField(default=True)
objects = CustomManager()
normal_manager = models.Manager()
def __str__(self):
return self.data
# An inter-related setup with multiple paths from Child to Detail.
class Detail(models.Model):
data = models.CharField(max_length=10)
class MemberManager(models.Manager):
def get_queryset(self):
return super(MemberManager, self).get_queryset().select_related("details")
class Member(models.Model):
name = models.CharField(max_length=10)
details = models.OneToOneField(Detail, primary_key=True)
objects = MemberManager()
class Child(models.Model):
person = models.OneToOneField(Member, primary_key=True)
parent = models.ForeignKey(Member, related_name="children")
# Custom primary keys interfered with ordering in the past.
class CustomPk(models.Model):
name = models.CharField(max_length=10, primary_key=True)
extra = models.CharField(max_length=10)
class Meta:
ordering = ['name', 'extra']
class Related(models.Model):
custom = models.ForeignKey(CustomPk)
class CustomPkTag(models.Model):
id = models.CharField(max_length=20, primary_key=True)
custom_pk = models.ManyToManyField(CustomPk)
tag = models.CharField(max_length=20)
# An inter-related setup with a model subclass that has a nullable
# path to another model, and a return path from that model.
@python_2_unicode_compatible
class Celebrity(models.Model):
name = models.CharField("Name", max_length=20)
greatest_fan = models.ForeignKey("Fan", null=True, unique=True)
def __str__(self):
return self.name
class TvChef(Celebrity):
pass
class Fan(models.Model):
fan_of = models.ForeignKey(Celebrity)
# Multiple foreign keys
@python_2_unicode_compatible
class LeafA(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class LeafB(models.Model):
data = models.CharField(max_length=10)
class Join(models.Model):
a = models.ForeignKey(LeafA)
b = models.ForeignKey(LeafB)
@python_2_unicode_compatible
class ReservedName(models.Model):
name = models.CharField(max_length=20)
order = models.IntegerField()
def __str__(self):
return self.name
# A simpler shared-foreign-key setup that can expose some problems.
@python_2_unicode_compatible
class SharedConnection(models.Model):
data = models.CharField(max_length=10)
def __str__(self):
return self.data
class PointerA(models.Model):
connection = models.ForeignKey(SharedConnection)
class PointerB(models.Model):
connection = models.ForeignKey(SharedConnection)
# Multi-layer ordering
@python_2_unicode_compatible
class SingleObject(models.Model):
name = models.CharField(max_length=10)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class RelatedObject(models.Model):
single = models.ForeignKey(SingleObject, null=True)
f = models.IntegerField(null=True)
class Meta:
ordering = ['single']
@python_2_unicode_compatible
class Plaything(models.Model):
name = models.CharField(max_length=10)
others = models.ForeignKey(RelatedObject, null=True)
class Meta:
ordering = ['others']
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
name = models.CharField(max_length=20)
created = models.DateTimeField()
def __str__(self):
return self.name
@python_2_unicode_compatible
class Food(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Eaten(models.Model):
food = models.ForeignKey(Food, to_field="name", null=True)
meal = models.CharField(max_length=20)
def __str__(self):
return "%s at %s" % (self.food, self.meal)
@python_2_unicode_compatible
class Node(models.Model):
num = models.IntegerField(unique=True)
parent = models.ForeignKey("self", to_field="num", null=True)
def __str__(self):
return "%s" % self.num
# Bug #12252
@python_2_unicode_compatible
class ObjectA(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
def __iter__(self):
# Ticket #23721
assert False, 'type checking should happen without calling model __iter__'
class ProxyObjectA(ObjectA):
class Meta:
proxy = True
class ChildObjectA(ObjectA):
pass
@python_2_unicode_compatible
class ObjectB(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA)
num = models.PositiveSmallIntegerField()
def __str__(self):
return self.name
class ProxyObjectB(ObjectB):
class Meta:
proxy = True
@python_2_unicode_compatible
class ObjectC(models.Model):
name = models.CharField(max_length=50)
objecta = models.ForeignKey(ObjectA, null=True)
objectb = models.ForeignKey(ObjectB, null=True)
childobjecta = models.ForeignKey(ChildObjectA, null=True, related_name='ca_pk')
def __str__(self):
return self.name
@python_2_unicode_compatible
class SimpleCategory(models.Model):
name = models.CharField(max_length=15)
def __str__(self):
return self.name
@python_2_unicode_compatible
class SpecialCategory(SimpleCategory):
special_name = models.CharField(max_length=15)
def __str__(self):
return self.name + " " + self.special_name
@python_2_unicode_compatible
class CategoryItem(models.Model):
category = models.ForeignKey(SimpleCategory)
def __str__(self):
return "category item: " + str(self.category)
@python_2_unicode_compatible
class OneToOneCategory(models.Model):
new_name = models.CharField(max_length=15)
category = models.OneToOneField(SimpleCategory)
def __str__(self):
return "one2one " + self.new_name
class CategoryRelationship(models.Model):
first = models.ForeignKey(SimpleCategory, related_name='first_rel')
second = models.ForeignKey(SimpleCategory, related_name='second_rel')
class NullableName(models.Model):
name = models.CharField(max_length=20, null=True)
class Meta:
ordering = ['id']
class ModelD(models.Model):
name = models.TextField()
class ModelC(models.Model):
name = models.TextField()
class ModelB(models.Model):
name = models.TextField()
c = models.ForeignKey(ModelC)
class ModelA(models.Model):
name = models.TextField()
b = models.ForeignKey(ModelB, null=True)
d = models.ForeignKey(ModelD)
@python_2_unicode_compatible
class Job(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class JobResponsibilities(models.Model):
job = models.ForeignKey(Job, to_field='name')
responsibility = models.ForeignKey('Responsibility', to_field='description')
@python_2_unicode_compatible
class Responsibility(models.Model):
description = models.CharField(max_length=20, unique=True)
jobs = models.ManyToManyField(Job, through=JobResponsibilities,
related_name='responsibilities')
def __str__(self):
return self.description
# Models for disjunction join promotion low level testing.
class FK1(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK2(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class FK3(models.Model):
f1 = models.TextField()
f2 = models.TextField()
class BaseA(models.Model):
a = models.ForeignKey(FK1, null=True)
b = models.ForeignKey(FK2, null=True)
c = models.ForeignKey(FK3, null=True)
@python_2_unicode_compatible
class Identifier(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Program(models.Model):
identifier = models.OneToOneField(Identifier)
class Channel(models.Model):
programs = models.ManyToManyField(Program)
identifier = models.OneToOneField(Identifier)
class Book(models.Model):
title = models.TextField()
chapter = models.ForeignKey('Chapter')
class Chapter(models.Model):
title = models.TextField()
paragraph = models.ForeignKey('Paragraph')
class Paragraph(models.Model):
text = models.TextField()
page = models.ManyToManyField('Page')
class Page(models.Model):
text = models.TextField()
class MyObject(models.Model):
parent = models.ForeignKey('self', null=True, blank=True, related_name='children')
data = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
# Models for #17600 regressions
@python_2_unicode_compatible
class Order(models.Model):
id = models.IntegerField(primary_key=True)
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
@python_2_unicode_compatible
class OrderItem(models.Model):
order = models.ForeignKey(Order, related_name='items')
status = models.IntegerField()
class Meta:
ordering = ('pk', )
def __str__(self):
return '%s' % self.pk
class BaseUser(models.Model):
pass
@python_2_unicode_compatible
class Task(models.Model):
title = models.CharField(max_length=10)
owner = models.ForeignKey(BaseUser, related_name='owner')
creator = models.ForeignKey(BaseUser, related_name='creator')
def __str__(self):
return self.title
@python_2_unicode_compatible
class Staff(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return self.name
@python_2_unicode_compatible
class StaffUser(BaseUser):
staff = models.OneToOneField(Staff, related_name='user')
def __str__(self):
return self.staff
class Ticket21203Parent(models.Model):
parentid = models.AutoField(primary_key=True)
parent_bool = models.BooleanField(default=True)
created = models.DateTimeField(auto_now=True)
class Ticket21203Child(models.Model):
childid = models.AutoField(primary_key=True)
parent = models.ForeignKey(Ticket21203Parent)
class Person(models.Model):
name = models.CharField(max_length=128)
@python_2_unicode_compatible
class Company(models.Model):
name = models.CharField(max_length=128)
employees = models.ManyToManyField(Person, related_name='employers', through='Employment')
def __str__(self):
return self.name
class Employment(models.Model):
employer = models.ForeignKey(Company)
employee = models.ForeignKey(Person)
title = models.CharField(max_length=128)
# Bug #22429
class School(models.Model):
pass
class Student(models.Model):
school = models.ForeignKey(School)
class Classroom(models.Model):
school = models.ForeignKey(School)
students = models.ManyToManyField(Student, related_name='classroom')
class Ticket23605AParent(models.Model):
pass
class Ticket23605A(Ticket23605AParent):
pass
class Ticket23605B(models.Model):
modela_fk = models.ForeignKey(Ticket23605A)
modelc_fk = models.ForeignKey("Ticket23605C")
field_b0 = models.IntegerField(null=True)
field_b1 = models.BooleanField(default=False)
class Ticket23605C(models.Model):
field_c0 = models.FloatField()
# db_table names have capital letters to ensure they are quoted in queries.
class Individual(models.Model):
alive = models.BooleanField()
class Meta:
db_table = 'Individual'
class RelatedIndividual(models.Model):
related = models.ForeignKey(Individual, related_name='related_individual')
class Meta:
db_table = 'RelatedIndividual'
| bsd-3-clause |
nerzhul/ansible | contrib/inventory/cobbler.py | 93 | 10625 | #!/usr/bin/env python
"""
Cobbler external inventory script
=================================
Ansible has a feature where instead of reading from /etc/ansible/hosts
as a text file, it can query external programs to obtain the list
of hosts, groups the hosts are in, and even variables to assign to each host.
To use this, copy this file over /etc/ansible/hosts and chmod +x the file.
This, more or less, allows you to keep one central database containing
info about all of your managed instances.
This script is an example of sourcing that data from Cobbler
(http://cobbler.github.com). With cobbler each --mgmt-class in cobbler
will correspond to a group in Ansible, and --ks-meta variables will be
passed down for use in templates or even in argument lines.
NOTE: The cobbler system names will not be used. Make sure a
cobbler --dns-name is set for each cobbler system. If a system
appears with two DNS names we do not add it twice because we don't want
ansible talking to it twice. The first one found will be used. If no
--dns-name is set the system will NOT be visible to ansible. We do
not add cobbler system names because there is no requirement in cobbler
that those correspond to addresses.
See http://ansible.github.com/api.html for more info
Tested with Cobbler 2.0.11.
Changelog:
- 2015-06-21 dmccue: Modified to support run-once _meta retrieval, results in
higher performance at ansible startup. Groups are determined by owner rather than
default mgmt_classes. DNS name determined from hostname. cobbler values are written
to a 'cobbler' fact namespace
- 2013-09-01 pgehres: Refactored implementation to make use of caching and to
limit the number of connections to external cobbler server for performance.
Added use of cobbler.ini file to configure settings. Tested with Cobbler 2.4.0
"""
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import argparse
import ConfigParser
import os
import re
from time import time
import xmlrpclib
try:
import json
except ImportError:
import simplejson as json
from six import iteritems
# NOTE -- this file assumes Ansible is being accessed FROM the cobbler
# server, so it does not attempt to login with a username and password.
# this will be addressed in a future version of this script.
orderby_keyname = 'owners' # alternatively 'mgmt_classes'
class CobblerInventory(object):
def __init__(self):
""" Main execution path """
self.conn = None
self.inventory = dict() # A list of groups and the hosts in that group
self.cache = dict() # Details about hosts in the inventory
# Read settings and parse CLI arguments
self.read_settings()
self.parse_cli_args()
# Cache
if self.args.refresh_cache:
self.update_cache()
elif not self.is_cache_valid():
self.update_cache()
else:
self.load_inventory_from_cache()
self.load_cache_from_cache()
data_to_print = ""
# Data to print
if self.args.host:
data_to_print += self.get_host_info()
else:
self.inventory['_meta'] = { 'hostvars': {} }
for hostname in self.cache:
self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname] }
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
def _connect(self):
if not self.conn:
self.conn = xmlrpclib.Server(self.cobbler_host, allow_none=True)
self.token = None
if self.cobbler_username is not None:
self.token = self.conn.login(self.cobbler_username, self.cobbler_password)
def is_cache_valid(self):
""" Determines if the cache files have expired, or if it is still valid """
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_inventory):
return True
return False
def read_settings(self):
""" Reads the settings from the cobbler.ini file """
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/cobbler.ini')
self.cobbler_host = config.get('cobbler', 'host')
self.cobbler_username = None
self.cobbler_password = None
if config.has_option('cobbler', 'username'):
self.cobbler_username = config.get('cobbler', 'username')
if config.has_option('cobbler', 'password'):
self.cobbler_password = config.get('cobbler', 'password')
# Cache related
cache_path = config.get('cobbler', 'cache_path')
self.cache_path_cache = cache_path + "/ansible-cobbler.cache"
self.cache_path_inventory = cache_path + "/ansible-cobbler.index"
self.cache_max_age = config.getint('cobbler', 'cache_max_age')
def parse_cli_args(self):
""" Command line argument processing """
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Cobbler')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to cobbler (default: False - use cache files)')
self.args = parser.parse_args()
def update_cache(self):
""" Make calls to cobbler and save the output in a cache """
self._connect()
self.groups = dict()
self.hosts = dict()
if self.token is not None:
data = self.conn.get_systems(self.token)
else:
data = self.conn.get_systems()
for host in data:
# Get the FQDN for the host and add it to the right groups
dns_name = host['hostname'] #None
ksmeta = None
interfaces = host['interfaces']
# hostname is often empty for non-static IP hosts
if dns_name == '':
for (iname, ivalue) in iteritems(interfaces):
if ivalue['management'] or not ivalue['static']:
this_dns_name = ivalue.get('dns_name', None)
if this_dns_name is not None and this_dns_name is not "":
dns_name = this_dns_name
if dns_name == '':
continue
status = host['status']
profile = host['profile']
classes = host[orderby_keyname]
if status not in self.inventory:
self.inventory[status] = []
self.inventory[status].append(dns_name)
if profile not in self.inventory:
self.inventory[profile] = []
self.inventory[profile].append(dns_name)
for cls in classes:
if cls not in self.inventory:
self.inventory[cls] = []
self.inventory[cls].append(dns_name)
# Since we already have all of the data for the host, update the host details as well
# The old way was ksmeta only -- provide backwards compatibility
self.cache[dns_name] = host
if "ks_meta" in host:
for key, value in iteritems(host["ks_meta"]):
self.cache[dns_name][key] = value
self.write_to_cache(self.cache, self.cache_path_cache)
self.write_to_cache(self.inventory, self.cache_path_inventory)
def get_host_info(self):
""" Get variables about a specific host """
if not self.cache or len(self.cache) == 0:
# Need to load index from cache
self.load_cache_from_cache()
if not self.args.host in self.cache:
# try updating the cache
self.update_cache()
if not self.args.host in self.cache:
# host might not exist anymore
return self.json_format_dict({}, True)
return self.json_format_dict(self.cache[self.args.host], True)
def push(self, my_dict, key, element):
""" Pushed an element onto an array that may not have been defined in the dict """
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def load_inventory_from_cache(self):
""" Reads the index from the cache file sets self.index """
cache = open(self.cache_path_inventory, 'r')
json_inventory = cache.read()
self.inventory = json.loads(json_inventory)
def load_cache_from_cache(self):
""" Reads the cache from the cache file sets self.cache """
cache = open(self.cache_path_cache, 'r')
json_cache = cache.read()
self.cache = json.loads(json_cache)
def write_to_cache(self, data, filename):
""" Writes data in JSON format to a file """
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def to_safe(self, word):
""" Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
""" Converts a dict to a JSON object and dumps it as a formatted string """
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
CobblerInventory()
| gpl-3.0 |
yufengg/tensorflow | tensorflow/compiler/tests/adagrad_test.py | 27 | 5263 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adagrad."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
class AdagradOptimizerTest(XLATestCase):
def testBasic(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testTensorLearningRate(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(
constant_op.constant(3.0), initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testSharing(self):
for dtype in self.float_types:
with self.test_session(), self.test_scope():
var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.1], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.01], dtype=dtype)
ada_opt = adagrad.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(
zip([grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
variables.global_variables_initializer().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
inovtec-solutions/OpenERP | openerp/addons/account/report/account_report.py | 54 | 13022 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from openerp import pooler
from openerp import tools
from openerp.osv import fields,osv
def _code_get(self, cr, uid, context=None):
acc_type_obj = self.pool.get('account.account.type')
ids = acc_type_obj.search(cr, uid, [])
res = acc_type_obj.read(cr, uid, ids, ['code', 'name'], context)
return [(r['code'], r['name']) for r in res]
class report_account_receivable(osv.osv):
_name = "report.account.receivable"
_description = "Receivable accounts"
_auto = False
_columns = {
'name': fields.char('Week of Year', size=7, readonly=True),
'type': fields.selection(_code_get, 'Account Type', required=True),
'balance':fields.float('Balance', readonly=True),
'debit':fields.float('Debit', readonly=True),
'credit':fields.float('Credit', readonly=True),
}
_order = 'name desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_account_receivable')
cr.execute("""
create or replace view report_account_receivable as (
select
min(l.id) as id,
to_char(date,'YYYY:IW') as name,
sum(l.debit-l.credit) as balance,
sum(l.debit) as debit,
sum(l.credit) as credit,
a.type
from
account_move_line l
left join
account_account a on (l.account_id=a.id)
where
l.state <> 'draft'
group by
to_char(date,'YYYY:IW'), a.type
)""")
report_account_receivable()
#a.type in ('receivable','payable')
class temp_range(osv.osv):
_name = 'temp.range'
_description = 'A Temporary table used for Dashboard view'
_columns = {
'name': fields.char('Range',size=64)
}
temp_range()
class report_aged_receivable(osv.osv):
_name = "report.aged.receivable"
_description = "Aged Receivable Till Today"
_auto = False
def __init__(self, pool, cr):
super(report_aged_receivable, self).__init__(pool, cr)
self.called = False
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
""" To call the init() method timely
"""
if context is None:context = {}
if not self.called:
self.init(cr, user)
self.called = True # To make sure that init doesn't get called multiple times
res = super(report_aged_receivable, self).fields_view_get(cr, user, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
return res
def _calc_bal(self, cr, uid, ids, name, args, context=None):
res = {}
for period in self.read(cr, uid, ids, ['name'], context=context):
date1,date2 = period['name'].split(' to ')
cr.execute("SELECT SUM(credit-debit) FROM account_move_line AS line, account_account as ac \
WHERE (line.account_id=ac.id) AND ac.type='receivable' \
AND (COALESCE(line.date,date) BETWEEN %s AND %s) \
AND (reconcile_id IS NULL) AND ac.active",(str(date2),str(date1),))
amount = cr.fetchone()
amount = amount[0] or 0.00
res[period['id']] = amount
return res
_columns = {
'name': fields.char('Month Range', size=7, readonly=True),
'balance': fields.function(_calc_bal, string='Balance', readonly=True),
}
def init(self, cr, uid=1):
""" This view will be used in dashboard
The reason writing this code here is, we need to check date range from today to first date of fiscal year.
"""
pool_obj_fy = pooler.get_pool(cr.dbname).get('account.fiscalyear')
today = time.strftime('%Y-%m-%d')
fy_id = pool_obj_fy.find(cr, uid, exception=False)
LIST_RANGES = []
if fy_id:
fy_start_date = pool_obj_fy.read(cr, uid, fy_id, ['date_start'])['date_start']
fy_start_date = datetime.strptime(fy_start_date, '%Y-%m-%d')
last_month_date = datetime.strptime(today, '%Y-%m-%d') - relativedelta(months=1)
while (last_month_date > fy_start_date):
LIST_RANGES.append(today + " to " + last_month_date.strftime('%Y-%m-%d'))
today = (last_month_date- relativedelta(days=1)).strftime('%Y-%m-%d')
last_month_date = datetime.strptime(today, '%Y-%m-%d') - relativedelta(months=1)
LIST_RANGES.append(today +" to " + fy_start_date.strftime('%Y-%m-%d'))
cr.execute('delete from temp_range')
for range in LIST_RANGES:
pooler.get_pool(cr.dbname).get('temp.range').create(cr, uid, {'name':range})
cr.execute("""
create or replace view report_aged_receivable as (
select id,name from temp_range
)""")
report_aged_receivable()
class report_invoice_created(osv.osv):
_name = "report.invoice.created"
_description = "Report of Invoices Created within Last 15 days"
_auto = False
_columns = {
'name': fields.char('Description', size=64, readonly=True),
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True),
'number': fields.char('Invoice Number', size=32, readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'amount_untaxed': fields.float('Untaxed', readonly=True),
'amount_total': fields.float('Total', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'date_invoice': fields.date('Invoice Date', readonly=True),
'date_due': fields.date('Due Date', readonly=True),
'residual': fields.float('Residual', readonly=True),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Done'),
('cancel','Cancelled')
],'Status', readonly=True),
'origin': fields.char('Source Document', size=64, readonly=True, help="Reference of the document that generated this invoice report."),
'create_date': fields.datetime('Create Date', readonly=True)
}
_order = 'create_date'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_invoice_created')
cr.execute("""create or replace view report_invoice_created as (
select
inv.id as id, inv.name as name, inv.type as type,
inv.number as number, inv.partner_id as partner_id,
inv.amount_untaxed as amount_untaxed,
inv.amount_total as amount_total, inv.currency_id as currency_id,
inv.date_invoice as date_invoice, inv.date_due as date_due,
inv.residual as residual, inv.state as state,
inv.origin as origin, inv.create_date as create_date
from
account_invoice inv
where
(to_date(to_char(inv.create_date, 'YYYY-MM-dd'),'YYYY-MM-dd') <= CURRENT_DATE)
AND
(to_date(to_char(inv.create_date, 'YYYY-MM-dd'),'YYYY-MM-dd') > (CURRENT_DATE-15))
)""")
report_invoice_created()
class report_account_type_sales(osv.osv):
_name = "report.account_type.sales"
_description = "Report of the Sales by Account Type"
_auto = False
_columns = {
'name': fields.char('Year', size=64, required=False, readonly=True),
'period_id': fields.many2one('account.period', 'Force Period', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'quantity': fields.float('Quantity', readonly=True),
'user_type': fields.many2one('account.account.type', 'Account Type', readonly=True),
'amount_total': fields.float('Total', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True),
}
_order = 'name desc,amount_total desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_account_type_sales')
cr.execute("""create or replace view report_account_type_sales as (
select
min(inv_line.id) as id,
to_char(inv.date_invoice, 'YYYY') as name,
to_char(inv.date_invoice,'MM') as month,
sum(inv_line.price_subtotal) as amount_total,
inv.currency_id as currency_id,
inv.period_id,
inv_line.product_id,
sum(inv_line.quantity) as quantity,
account.user_type
from
account_invoice_line inv_line
inner join account_invoice inv on inv.id = inv_line.invoice_id
inner join account_account account on account.id = inv_line.account_id
where
inv.state in ('open','paid')
group by
to_char(inv.date_invoice, 'YYYY'),to_char(inv.date_invoice,'MM'),inv.currency_id, inv.period_id, inv_line.product_id, account.user_type
)""")
report_account_type_sales()
class report_account_sales(osv.osv):
_name = "report.account.sales"
_description = "Report of the Sales by Account"
_auto = False
_columns = {
'name': fields.char('Year', size=64, required=False, readonly=True, select=True),
'period_id': fields.many2one('account.period', 'Force Period', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'quantity': fields.float('Quantity', readonly=True),
'account_id': fields.many2one('account.account', 'Account', readonly=True),
'amount_total': fields.float('Total', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True),
}
_order = 'name desc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_account_sales')
cr.execute("""create or replace view report_account_sales as (
select
min(inv_line.id) as id,
to_char(inv.date_invoice, 'YYYY') as name,
to_char(inv.date_invoice,'MM') as month,
sum(inv_line.price_subtotal) as amount_total,
inv.currency_id as currency_id,
inv.period_id,
inv_line.product_id,
sum(inv_line.quantity) as quantity,
account.id as account_id
from
account_invoice_line inv_line
inner join account_invoice inv on inv.id = inv_line.invoice_id
inner join account_account account on account.id = inv_line.account_id
where
inv.state in ('open','paid')
group by
to_char(inv.date_invoice, 'YYYY'),to_char(inv.date_invoice,'MM'),inv.currency_id, inv.period_id, inv_line.product_id, account.id
)""")
report_account_sales()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chainer/chainer | tests/chainer_tests/backends_tests/test_cpu.py | 6 | 2524 | import unittest
import numpy
from chainer import backend
from chainer import testing
class TestCpuDevice(unittest.TestCase):
def test_hashable(self):
assert isinstance(hash(backend.CpuDevice()), int)
class TestCpuDeviceFromArray(unittest.TestCase):
def check_device(self, device):
assert device.xp is numpy
assert device.supported_array_types == (numpy.ndarray,)
assert device.name == '@numpy'
assert str(device) == '@numpy'
def test_init(self):
device = backend.CpuDevice()
self.check_device(device)
def test_from_array(self):
arr = numpy.ndarray((2,), numpy.float32)
expected_device = backend.CpuDevice()
device = backend.CpuDevice.from_array(arr)
self.check_device(device)
assert device == expected_device
device = backend.get_device_from_array(arr)
self.check_device(device)
assert device == expected_device
@testing.backend.inject_backend_tests(
None,
[
{'use_cuda': True},
{'use_ideep': 'always'},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
])
class TestCpuDeviceFromArrayInvalidArray(unittest.TestCase):
def test_from_array(self, backend_config):
arr = backend_config.get_array(numpy.ndarray((2,), numpy.float32))
device = backend.CpuDevice.from_array(arr)
assert device is None
@testing.parameterize(*testing.product(
{
'value': [None, 1, (), numpy.float32(1)],
}))
class TestCpuDeviceFromArrayInvalidValue(unittest.TestCase):
def test_from_array(self):
device = backend.CpuDevice.from_array(self.value)
assert device is None
@testing.backend.inject_backend_tests( # backend_config2
None,
[
{},
{'use_cuda': True},
{'use_ideep': 'always'},
{'use_chainerx': True, 'chainerx_device': 'native:0'},
])
@testing.backend.inject_backend_tests( # backend_config1
None,
[
{},
])
class TestCpuIsArraySupported(unittest.TestCase):
def test_is_array_supported(self, backend_config1, backend_config2):
target = backend_config1.device # backend.CpuDevice
arr = backend_config2.get_array(numpy.ndarray((2,), numpy.float32))
device = backend_config2.device
if isinstance(device, backend.CpuDevice):
assert target.is_array_supported(arr)
else:
assert not target.is_array_supported(arr)
testing.run_module(__name__, __file__)
| mit |
atilag/hammerhead-nexus5-kernel | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
KellyChan/python-examples | python/django/elf/elf/src/elf/urls.py | 3 | 3517 | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Master: Index
url(r'^$', 'master.views.IndexView', name='index'),
url(r'^signup$', 'master.views.SignUpView', name='signup'),
url(r'^login$', 'master.views.LoginView', name='login'),
url(r'^logout$', 'master.views.LogoutView', name='logout'),
# Master: Dashboard
url(r'^dashboard/(?P<username>\w+)$', 'master.views.DashboardView', name='dashboard'),
# Master: Event
url(r'^dashboard/(?P<username>\w+)/event-manage/add/$', 'master.views.EventManageAddView', name='master-event-manage-add'),
url(r'^dashboard/(?P<username>\w+)/event-manage/delete/(?P<eventid>\d+)$', 'master.views.EventManageDeleteView', name='master-event-manage-delete'),
url(r'^dashboard/(?P<username>\w+)/event-manage/update/(?P<eventid>\d+)$', 'master.views.EventManageUpdateView', name='master-event-manage-update'),
# Master: Todo
url(r'^dashboard/(?P<username>\w+)/todo-manage/add/$', 'master.views.TodoManageAddView', name='master-todo-manage-add'),
url(r'^dashboard/(?P<username>\w+)/todo-manage/done/(?P<todoid>\d+)$', 'master.views.TodoManageDoneView', name='master-todo-manage-done'),
url(r'^dashboard/(?P<username>\w+)/todo-manage/delete/(?P<todoid>\d+)$', 'master.views.TodoManageDeleteView', name='master-todo-manage-delete'),
url(r'^dashboard/(?P<username>\w+)/todo-manage/update/(?P<todoid>\d+)$', 'master.views.TodoManageUpdateView', name='master-todo-manage-update'),
# Master: 10K Hours
url(r'^dashboard/(?P<username>\w+)/10khours-manage/add/$', 'master.views.HoursManageAddView', name='master-hours-manage-add'),
url(r'^dashboard/(?P<username>\w+)/10khours-manage/delete/(?P<trackerid>\d+)$', 'master.views.HoursManageDeleteView', name='master-hours-manage-delete'),
url(r'^dashboard/(?P<username>\w+)/10khours-manage/update/(?P<trackerid>\d+)$', 'master.views.HoursManageUpdateView', name='master-hours-manage-update'),
# Master: Mood
url(r'^dashboard/(?P<username>\w+)/mood-manage/add/$', 'master.views.MoodManageAddView', name='master-mood-manage-add'),
url(r'^dashboard/(?P<username>\w+)/mood-manage/delete/(?P<moodid>\d+)$', 'master.views.MoodManageDeleteView', name='master-mood-manage-delete'),
url(r'^dashboard/(?P<username>\w+)/mood-manage/update/(?P<moodid>\d+)$', 'master.views.MoodManageUpdateView', name='master-mood-manage-update'),
# Master: Dream
url(r'^dashboard/(?P<username>\w+)/dream-manage/add/$', 'master.views.DreamManageAddView', name='master-dream-manage-add'),
url(r'^dashboard/(?P<username>\w+)/dream-manage/delete/(?P<dreamid>\d+)$', 'master.views.DreamManageDeleteView', name='master-dream-manage-delete'),
url(r'^dashboard/(?P<username>\w+)/dream-manage/update/(?P<dreamid>\d+)$', 'master.views.DreamManageUpdateView', name='master-dream-manage-update'),
# Master: Diary
url(r'^dashboard/(?P<username>\w+)/diary-manage/add/$', 'master.views.DiaryManageAddView', name='master-diary-manage-add'),
url(r'^dashboard/(?P<username>\w+)/diary-manage/delete/(?P<diaryid>\d+)$', 'master.views.DiaryManageDeleteView', name='master-diary-manage-delete'),
url(r'^dashboard/(?P<username>\w+)/diary-manage/update/(?P<diaryid>\d+)$', 'master.views.DiaryManageUpdateView', name='master-diary-manage-update'),
# Apps
url(r'^dashboard/(?P<username>\w+)/todo/$', include('todo.urls')),
# Admin
url(r'^admin/', include(admin.site.urls)),
)
| mit |
rahushen/ansible | lib/ansible/modules/network/avi/avi_wafpolicy.py | 26 | 5675 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_wafpolicy
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of WafPolicy Avi RESTful Object
description:
- This module is used to configure WafPolicy object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.5"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
created_by:
description:
- Creator name.
- Field introduced in 17.2.4.
crs_groups:
description:
- Waf rules are categorized in to groups based on their characterization.
- These groups are system created with crs groups.
- Field introduced in 17.2.1.
description:
description:
- Field introduced in 17.2.1.
mode:
description:
- Waf policy mode.
- This can be detection or enforcement.
- Enum options - WAF_MODE_DETECTION_ONLY, WAF_MODE_ENFORCEMENT.
- Field introduced in 17.2.1.
- Default value when not specified in API or module is interpreted by Avi Controller as WAF_MODE_DETECTION_ONLY.
required: true
name:
description:
- Field introduced in 17.2.1.
required: true
paranoia_level:
description:
- Waf ruleset paranoia mode.
- This is used to select rules based on the paranoia-level tag.
- Enum options - WAF_PARANOIA_LEVEL_LOW, WAF_PARANOIA_LEVEL_MEDIUM, WAF_PARANOIA_LEVEL_HIGH, WAF_PARANOIA_LEVEL_EXTREME.
- Field introduced in 17.2.1.
- Default value when not specified in API or module is interpreted by Avi Controller as WAF_PARANOIA_LEVEL_LOW.
post_crs_groups:
description:
- Waf rules are categorized in to groups based on their characterization.
- These groups are created by the user and will be enforced after the crs groups.
- Field introduced in 17.2.1.
pre_crs_groups:
description:
- Waf rules are categorized in to groups based on their characterization.
- These groups are created by the user and will be enforced before the crs groups.
- Field introduced in 17.2.1.
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.2.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Field introduced in 17.2.1.
waf_profile_ref:
description:
- Waf profile for waf policy.
- It is a reference to an object of type wafprofile.
- Field introduced in 17.2.1.
required: true
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create WafPolicy object
avi_wafpolicy:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_wafpolicy
"""
RETURN = '''
obj:
description: WafPolicy (api/wafpolicy) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
created_by=dict(type='str',),
crs_groups=dict(type='list',),
description=dict(type='str',),
mode=dict(type='str', required=True),
name=dict(type='str', required=True),
paranoia_level=dict(type='str',),
post_crs_groups=dict(type='list',),
pre_crs_groups=dict(type='list',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
waf_profile_ref=dict(type='str', required=True),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'wafpolicy',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
propublica/facebook-political-ads | backend/classifier/classifier/commands/entities.py | 1 | 2373 | """
Extract entities
"""
import json
from bs4 import BeautifulSoup
import click
import spacy
import en_core_web_sm
from classifier.utilities import DB, entities_confs
# from LAWhttps://spacy.io/usage/linguistic-features#section-named-entities
LABELS = {
'PERSON': 'Person',
'NORP': 'Group',
'ORG': 'Organization',
'GPE': 'Region',
'LOC': 'Location',
'EVENT': 'Event',
'FAC': 'Facility',
'LAW': 'Law'
}
@click.command("entities")
@click.pass_context
def entities(ctx):
"""
Extract likely entitites from the database outputs a csv for now
"""
for (directory, conf) in entities_confs(ctx.obj["base"]):
if conf:
lang = directory.split('/')[1]
print("running entity extraction for %s" % lang)
nlp = en_core_web_sm.load()
ads = DB.query("select * from ads where political_probability > 0.70 and lang = '%s' and entities = '[]'::jsonb" % lang)
query = "update ads set entities=:entities where id=:id"
updates = []
for advert in ads:
doc = BeautifulSoup(advert["html"], "html.parser")
text = ' '.join([graf.get_text() for graf in doc.select("p")])
update = {"id": advert["id"], "entities": set()}
for ent in nlp(text).ents:
if ent.text in conf["exclude"] or ent.text.isspace():
continue
has_parent = False
for parent, children in conf["parents"].items():
if ent.text in children["entities"]:
has_parent = True
update["entities"].add((parent, children["label"]))
if not has_parent:
update["entities"].add((ent.text, ent.label_))
update["entities"] = json.dumps([{"entity": e[0],
"entity_type": LABELS[e[1]]}
for e in update["entities"]
if e[1] in LABELS.keys()])
updates.append(update)
if len(updates) >= 100:
DB.bulk_query(query, updates)
updates = []
if updates:
DB.bulk_query(query, updates)
| mit |
davidwilson826/empty-app | HeadSoccer.py | 1 | 14692 | '''
Head Soccer
Author: David Wilson
Credit: http://stackoverflow.com/questions/7370801/measure-time-elapsed-in-python,
https://www.mathsisfun.com/hexadecimal-decimal-colors.html, http://brythonserver.github.io/ggame/
'''
from ggame import App, Sprite, CircleAsset, RectangleAsset, Color, LineStyle, TextAsset
from time import time
SCREEN_WIDTH = 1200
SCREEN_HEIGHT = 700
#SCREEN_WIDTH = 1000
#SCREEN_HEIGHT = 600
black = Color(0x000000, 1.0)
white = Color(0xffffff, 1.0)
blue = Color(0x0000ff, 1.0)
green = Color(0x00ff00, 1.0)
red = Color(0xff0000, 1.0)
yellow = Color(0xffff00, 1.0)
cyan = Color(0x00ffff, 1.0)
magenta = Color(0xff00ff, 1.0)
orange = Color(0xFFA500, 1.0)
purple = Color(0x800080, 1.0)
gray = Color(0xBEBEBE, 1.0)
noline = LineStyle(0.0, black)
thinline = LineStyle(1.0, black)
def classDestroy(sclass):
while len(HeadSoccer.getSpritesbyClass(sclass)) > 0:
for x in HeadSoccer.getSpritesbyClass(sclass):
x.destroy()
GRAVITY = 25
class Button(Sprite):
def __init__(self, asset, position):
super().__init__(asset, position)
self.fxcenter = self.fycenter = 0.5
class Border(Sprite):
def __init__(self, asset, position):
super().__init__(asset, position)
class Goal(Sprite):
asset = RectangleAsset(50, 300, noline, black)
def __init__(self, position):
super().__init__(Goal.asset, position)
self.ident = len(HeadSoccer.getSpritesbyClass(Goal))-1
class PhysicsObject(Sprite):
def __init__(self, asset, position):
super().__init__(asset, position)
self.fxcenter = self.fycenter = 0.5
self.velocity = [0,0]
self.circularCollisionModel()
def step(self):
self.x += self.velocity[0]*deltaTime
self.y += self.velocity[1]*deltaTime
class Player(PhysicsObject):
def __init__(self, asset, position):
super().__init__(asset, position)
self.mag = 50
self.speed = 200
self.jumpForce = 500
self.mass = 2
PlayerCover((0,0))
def right(self, event):
self.velocity[0] = self.speed
def left(self, event):
self.velocity[0] = -self.speed
def stop(self, event):
self.velocity[0] = 0
def jump(self, event):
if self.y == SCREEN_HEIGHT:
self.velocity[1] = -self.jumpForce
def step(self):
if self.x <= 25 and self.velocity[0] < 0:
self.velocity[0] = 0
if self.x >= SCREEN_WIDTH-25 and self.velocity[0] > 0:
self.velocity[0] = 0
super().step()
if self.y < SCREEN_HEIGHT:
self.velocity[1] += GRAVITY
elif self.y >= SCREEN_HEIGHT:
self.velocity[1] = 0
self.y = SCREEN_HEIGHT
class Player1(Player):
def __init__(self, asset, position):
super().__init__(asset, position)
HeadSoccer.listenKeyEvent('keydown', 'a', self.left)
HeadSoccer.listenKeyEvent('keydown', 'd', self.right)
HeadSoccer.listenKeyEvent('keyup', 'a', self.stop)
HeadSoccer.listenKeyEvent('keyup', 'd', self.stop)
HeadSoccer.listenKeyEvent('keydown', 'w', self.jump)
class Player2(Player):
def __init__(self, asset, position):
super().__init__(asset, position)
HeadSoccer.listenKeyEvent('keydown', 'left arrow', self.left)
HeadSoccer.listenKeyEvent('keydown', 'right arrow', self.right)
HeadSoccer.listenKeyEvent('keyup', 'left arrow', self.stop)
HeadSoccer.listenKeyEvent('keyup', 'right arrow', self.stop)
HeadSoccer.listenKeyEvent('keydown', 'up arrow', self.jump)
class PlayerCover(Sprite):
asset = RectangleAsset(102, 52, noline, white)
def __init__(self, position):
super().__init__(PlayerCover.asset, position)
self.follow = Player1
def step(self):
for x in HeadSoccer.getSpritesbyClass(self.follow):
self.x = x.x-51
self.y = x.y
class Ball(PhysicsObject):
asset = CircleAsset(30, noline, black)
def __init__(self, position):
super().__init__(Ball.asset, position)
self.mag = 42
self.mass = 1
HeadSoccer.listenKeyEvent('keydown', 'p', self.right)
HeadSoccer.listenKeyEvent('keydown', 'i', self.left)
self.scored = False
self.velCollision = [0,0]
self.scoreTime = 0
def right(self, event):
self.velocity[0] += self.mag
def left(self, event):
self.velocity[0] -= self.mag
def bounce(self):
self.velocity[1] *= -1
self.velocity[1] -= GRAVITY
# self.velocity[1] += 50
def step(self):
super().step()
if self.y >= SCREEN_HEIGHT-30 or self.y <= 30:
self.bounce()
if self.x <= 30 or self.x >= SCREEN_WIDTH-30:
self.velocity[0] *= -1
self.velocity[1] += GRAVITY
for x in [Player1, Player2]:
if len(self.collidingWithSprites(x)) > 0 and self.y <= HeadSoccer.getSpritesbyClass(x)[0].y+30:
colliding = self.collidingWithSprites(x)[0]
self.velCollision = self.velocity[:]
for x in range(2):
self.velocity[x] = (self.mass-colliding.mass)/(self.mass+colliding.mass)*(self.velCollision[x]-colliding.velocity[x])+colliding.velocity[x]
colliding.velocity[x] = (2*self.mass)/(self.mass+colliding.mass)*(self.velCollision[x]-colliding.velocity[x])+colliding.velocity[x]
if len(self.collidingWithSprites(Goal)) > 0:
if self.y <= SCREEN_HEIGHT-230:
if self.x <= 80 or self.x >= SCREEN_WIDTH-80:
self.bounce()
print('hello')
elif self.scored == False:
for x in self.collidingWithSprites(Goal):
HeadSoccer.getSpritesbyClass(ScoreText)[0].goal(x)
self.scored = True
self.scoreTime = time()
HeadSoccer.getSpritesbyClass(ScoreText)[0].visible = True
if self.scored == True and time()-self.scoreTime >= 2:
self.velocity = [0,0]
self.x = SCREEN_WIDTH/2
self.y = SCREEN_HEIGHT/2
for x in [Player1, Player2]:
player = HeadSoccer.getSpritesbyClass(x)[0]
player.x = SCREEN_WIDTH/4
player.y = SCREEN_HEIGHT
player.velocity = [0,0]
player.x *= 3
HeadSoccer.getSpritesbyClass(ScoreText)[0].visible = False
self.scored = False
class ScoreText(Sprite):
asset = TextAsset('Goal!')
def __init__(self, position):
super().__init__(ScoreText.asset, position)
self.fxcenter = self.fycenter = 0.5
self.visible = False
global score
score = [0,0]
self.placeScore()
def goal(self, Goal):
score[Goal.ident] += 1
self.placeScore()
def placeScore(self):
classDestroy(ScoreNum)
ScoreNum(TextAsset(score[0]), (SCREEN_WIDTH/8,SCREEN_HEIGHT/2))
ScoreNum(TextAsset(score[1]), (SCREEN_WIDTH*(7/8),SCREEN_HEIGHT/2))
class TextSprite(Sprite):
def __init__(self, asset, position):
super().__init__(asset, position)
self.fxcenter = self.fycenter = 0.5
class TitleText(TextSprite):
pass
class FlashingText(TextSprite):
pass
class PlayerColor(TextSprite):
pass
class Instructions(TextSprite):
pass
class ScoreNum(TextSprite):
pass
class TimeText(TextSprite):
pass
class TimeUpText(TextSprite):
pass
class HeadSoccer(App):
def __init__(self):
super().__init__()
self.width = 0.15*SCREEN_WIDTH
self.height = 0.15*SCREEN_HEIGHT
self.buttoncolors = [blue, red, green, yellow, cyan, magenta, orange, purple, gray]
self.buttons = [((x%3-1)/5*SCREEN_WIDTH+SCREEN_WIDTH/2-self.width/2,
(x//3-1)/5*SCREEN_HEIGHT+SCREEN_HEIGHT/2-self.height/2, self.buttoncolors[x]) for x in range(9)]
self.start = 0
#self.go = False
self.frameTime = 0
self.deltaTime = 0
self.gameTime = 90
TitleText(TextAsset('Head Soccer!', width=SCREEN_WIDTH, style='50pt Helvetica'),
(SCREEN_WIDTH/2, SCREEN_HEIGHT/4))
self.listenMouseEvent('mousedown', self.placeButtonsEvent)
#self.intro = True
#self.restart = False
self.transparency = 1
self.direction = 0
self.playercolors = []
self.stage = 'intro'
def placeButtonsEvent(self, event):
self.unlistenMouseEvent('mousedown', self.placeButtonsEvent)
#self.intro = False
self.getSpritesbyClass(TitleText)[0].destroy()
self.getSpritesbyClass(FlashingText)[0].destroy()
self.placeButtons()
def placeButtons(self):
self.stage = 'buttons'
for x in self.buttons:
Button(RectangleAsset(self.width, self.height, thinline, x[2]), (x[0],x[1]))
self.listenMouseEvent('mousedown', self.buttonClick)
for x in [('1',0.15*SCREEN_WIDTH,0.5*SCREEN_HEIGHT), ('2',0.85*SCREEN_WIDTH,0.5*SCREEN_HEIGHT)]:
PlayerColor(TextAsset('Player '+x[0]+' color:', width=128), (x[1],x[2]))
def buttonClick(self, event):
for x in self.buttons:
if x[0] <= event.x <= x[0]+self.width and x[1] <= event.y <= x[1]+self.height:
self.playercolors.append(x[2])
if len(self.playercolors) == 1:
pos = 0.15
else:
pos = 0.85
PlayerColor(RectangleAsset(0.05*SCREEN_WIDTH, 0.05*SCREEN_HEIGHT, thinline, x[2]),
(pos*SCREEN_WIDTH-64,0.5*SCREEN_HEIGHT+15))
if len(self.playercolors) == 1:
Instructions(TextAsset('Press "q" to change colors', width=SCREEN_WIDTH), (SCREEN_WIDTH/2, 50))
self.listenKeyEvent('keydown', 'q', self.changeColors)
else:
self.stage = 'ready'
self.listenKeyEvent('keydown', 'space', self.begin)
#self.prepGame(self.playercolors)
def changeColors(self, event):
self.playercolors = []
for x in self.getSpritesbyClass(PlayerColor)[2:]:
x.destroy()
self.stage = 'buttons'
if len(self.getSpritesbyClass(Instructions)) > 0:
self.getSpritesbyClass(Instructions)[0].destroy()
self.unlistenKeyEvent('keydown', 'space', self.begin)
self.unlistenKeyEvent('keydown', 'q', self.changeColors)
classDestroy(FlashingText)
def begin(self, event):
self.unlistenKeyEvent('keydown', 'space', self.begin)
self.prepGame(self.playercolors)
def prepGame(self, colors):
self.unlistenMouseEvent('mousedown', self.buttonClick)
classDestroy(Button)
classDestroy(PlayerColor)
classDestroy(FlashingText)
classDestroy(Instructions)
Player1(CircleAsset(50, thinline, colors[0]), (SCREEN_WIDTH/4,SCREEN_HEIGHT))
Player2(CircleAsset(50, thinline, colors[1]), (SCREEN_WIDTH*3/4,SCREEN_HEIGHT))
self.getSpritesbyClass(PlayerCover)[1].follow = Player2
Ball((SCREEN_WIDTH/2,SCREEN_HEIGHT/2))
for x in [(0,0,10,SCREEN_HEIGHT), (SCREEN_WIDTH-5,0,10,SCREEN_HEIGHT),
(0,SCREEN_HEIGHT-5,SCREEN_WIDTH+5,10), (0,0,SCREEN_WIDTH+5,10)]:
Border(RectangleAsset(x[2], x[3], noline, black), (x[0],x[1]))
Goal((SCREEN_WIDTH-50,SCREEN_HEIGHT-300))
Goal((0,SCREEN_HEIGHT-300))
ScoreText((SCREEN_WIDTH/2,SCREEN_HEIGHT/2))
self.start = time()
self.timeGame()
self.frameTime = time()
#self.go = True
self.stage = 'play'
def timeGame(self):
remaining = self.gameTime-time()+self.start
if remaining < 0:
remaining = 0
if score[0] > score[1]:
winner = 'Player 1 wins!'
elif score[1] > score[0]:
winner = 'Player 2 wins!'
else:
winner = "It's a draw!"
TimeUpText(TextAsset("Time's up! "+winner, width=SCREEN_WIDTH), (SCREEN_WIDTH/2,SCREEN_HEIGHT/6))
self.getSpritesbyClass(ScoreText)[0].destroy()
#self.go = False
self.transparency = 1
self.direction = 0
#self.restart = True
self.stage = 'restart'
self.listenKeyEvent('keydown', 'space', self.restartGame)
seconds = remaining%60
if seconds < 10:
placeholder = ':0'
else:
placeholder = ':'
TimeText(TextAsset(str(int(remaining//60))+placeholder+str(int(seconds))),
(SCREEN_WIDTH/2,SCREEN_HEIGHT/4))
def restartGame(self, event):
self.unlistenKeyEvent('keydown', 'space', self.restartGame)
#self.restart = False
for x in [Ball, Player1, Player2, PlayerCover, Goal, Border, TimeUpText, TimeText, ScoreNum, FlashingText]:
classDestroy(x)
self.playercolors = []
self.placeButtons()
def flashText(self, text, ypos):
classDestroy(FlashingText)
FlashingText(TextAsset(text, width=SCREEN_WIDTH, style='20pt Helvetica',
fill=Color(0x000000, self.transparency)), (SCREEN_WIDTH/2,ypos))
if self.transparency == 1:
self.direction = -0.01
elif self.transparency == 0:
self.direction = 0.01
self.transparency += self.direction
self.transparency = round(self.transparency, 2)
def step(self):
#if self.intro == True:
if self.stage == 'intro':
self.flashText('Click to Continue',SCREEN_HEIGHT/2)
#elif self.restart == True:
elif self.stage == 'restart':
self.flashText('Press Space to Restart',SCREEN_HEIGHT/2)
elif self.stage == 'ready':
self.flashText('Press Space to Begin',SCREEN_HEIGHT*0.85)
#if self.go == True:
if self.stage == 'play':
self.getSpritesbyClass(TimeText)[0].destroy()
self.timeGame()
global deltaTime
deltaTime = time()-self.frameTime
self.frameTime = time()
for x in [Ball, Player1, Player2, PlayerCover]:
for y in self.getSpritesbyClass(x):
y.step()
HeadSoccer().run()
| mit |
ssgeejr/mitropm | browser-ext/third_party/firefox-addon-sdk/python-lib/cuddlefish/tests/test_version.py | 37 | 1044 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import unittest
import shutil
from cuddlefish._version import get_versions
class Version(unittest.TestCase):
def get_basedir(self):
return os.path.join(".test_tmp", self.id())
def make_basedir(self):
basedir = self.get_basedir()
if os.path.isdir(basedir):
here = os.path.abspath(os.getcwd())
assert os.path.abspath(basedir).startswith(here) # safety
shutil.rmtree(basedir)
os.makedirs(basedir)
return basedir
def test_current_version(self):
# the SDK should be able to determine its own version. We don't care
# what it is, merely that it can be computed.
version = get_versions()["version"]
self.failUnless(isinstance(version, str), (version, type(version)))
self.failUnless(len(version) > 0, version)
| gpl-3.0 |
celiafish/VisTrails | vistrails/gui/extras/core/__init__.py | 58 | 1885 | ###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
pass | bsd-3-clause |
tedsunnyday/SE-Server | server/lib/mongoengine/base/datastructures.py | 9 | 5007 | import weakref
from mongoengine.common import _import_class
__all__ = ("BaseDict", "BaseList")
class BaseDict(dict):
"""A special dict so we can watch any changes
"""
_dereferenced = False
_instance = None
_name = None
def __init__(self, dict_items, instance, name):
Document = _import_class('Document')
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(instance, (Document, EmbeddedDocument)):
self._instance = weakref.proxy(instance)
self._name = name
return super(BaseDict, self).__init__(dict_items)
def __getitem__(self, *args, **kwargs):
value = super(BaseDict, self).__getitem__(*args, **kwargs)
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(value, EmbeddedDocument) and value._instance is None:
value._instance = self._instance
return value
def __setitem__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).__setitem__(*args, **kwargs)
def __delete__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).__delete__(*args, **kwargs)
def __delitem__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).__delitem__(*args, **kwargs)
def __delattr__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).__delattr__(*args, **kwargs)
def __getstate__(self):
self.instance = None
self._dereferenced = False
return self
def __setstate__(self, state):
self = state
return self
def clear(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).clear(*args, **kwargs)
def pop(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).pop(*args, **kwargs)
def popitem(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).popitem(*args, **kwargs)
def update(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseDict, self).update(*args, **kwargs)
def _mark_as_changed(self):
if hasattr(self._instance, '_mark_as_changed'):
self._instance._mark_as_changed(self._name)
class BaseList(list):
"""A special list so we can watch any changes
"""
_dereferenced = False
_instance = None
_name = None
def __init__(self, list_items, instance, name):
Document = _import_class('Document')
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(instance, (Document, EmbeddedDocument)):
self._instance = weakref.proxy(instance)
self._name = name
return super(BaseList, self).__init__(list_items)
def __getitem__(self, *args, **kwargs):
value = super(BaseList, self).__getitem__(*args, **kwargs)
EmbeddedDocument = _import_class('EmbeddedDocument')
if isinstance(value, EmbeddedDocument) and value._instance is None:
value._instance = self._instance
return value
def __setitem__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).__setitem__(*args, **kwargs)
def __delitem__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).__delitem__(*args, **kwargs)
def __setslice__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).__setslice__(*args, **kwargs)
def __delslice__(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).__delslice__(*args, **kwargs)
def __getstate__(self):
self.instance = None
self._dereferenced = False
return self
def __setstate__(self, state):
self = state
return self
def append(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).append(*args, **kwargs)
def extend(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).extend(*args, **kwargs)
def insert(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).insert(*args, **kwargs)
def pop(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).pop(*args, **kwargs)
def remove(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).remove(*args, **kwargs)
def reverse(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).reverse(*args, **kwargs)
def sort(self, *args, **kwargs):
self._mark_as_changed()
return super(BaseList, self).sort(*args, **kwargs)
def _mark_as_changed(self):
if hasattr(self._instance, '_mark_as_changed'):
self._instance._mark_as_changed(self._name)
| apache-2.0 |
hundeboll/core | lib/oelite/arch.py | 2 | 22674 | import oebakery
from oebakery import die, err, warn, info, debug
import os
import operator
import bb
# Handle all the arhicture related variables.
# To be able to reuse definitions for both build, machine and sdk
# architectures, the usual bitbake variables are not used, but a more
# hierarchical setup using a number of Python dictionaries.
gccspecs = {}
cpuspecs = {
'm68k' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
'elf' : 'ELF 32-bit MSB .*, foobar',
},
'mcf51' : {
'mcpu' : '51',
},
'mcf51ac' : {
'mcpu' : '51ac',
},
'mcf51cn' : {
'mcpu' : '51cn',
},
'mcf51em' : {
'mcpu' : '51em',
},
'mcf51qe' : {
'mcpu' : '51qe',
},
'mcf5206' : {
'mcpu' : '5206',
},
'mcf5206e' : {
'mcpu' : '5206e',
},
'mcf5208' : {
'mcpu' : '5208',
},
'mcf52277' : {
'mcpu' : '52277',
},
},
'powerpc' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
'elf' : 'ELF 32-bit MSB .*, PowerPC or cisco 4500',
},
'603e' : {
'mcpu' : '603e',
'float' : 'hard',
},
'e300c1' : {
'mcpu' : 'e300c1',
'float' : 'hard',
},
'e300c2' : {
'mcpu' : 'e300c2',
},
'e300c3' : {
'mcpu' : 'e300c3',
'float' : 'hard',
},
'e300c4' : {
'mcpu' : 'e300c4',
'float' : 'hard',
},
},
'powerpc64' : {
'DEFAULT' : {
'wordsize' : '64',
'endian' : 'b',
},
},
'arm' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, ARM',
'abi flags' : [
['arm abi', 'eabi', {
'eabi' : {
'os' : 'eabi',
},
# Currently, OE-lite does only support EABI for
# ARM. When/if OABI is added, os should be kept as
# linux-gnu for OABI
}
],
]
},
'920t' : {
'mcpu' : 'arm920t',
'mtune' : 'arm920t',
},
'926ejs' : {
'march' : 'armv5te',
'mcpu' : 'arm926ej-s',
'mtune' : 'arm926ej-s',
},
'1176jzfs' : {
'march' : 'armv6',
'mcpu' : 'arm1176jzf-s',
'mtune' : 'arm1176jzf-s',
'abi flags' : [
['float abi', 'hard', {
'hard' : {
'float' : 'hard',
'fpu' : 'vfp',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'vfp',
},
'soft' : {
'float' : 'soft',
},
}
]
]
},
'cortexa7' : {
'mcpu' : 'cortex-a7',
'mtune' : 'cortex-a7',
'abi flags' : [
['float abi', 'softfp', {
'hard' : {
'float' : 'hard',
'fpu' : 'neon-vfpv4',
'vendor' : 'hf',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'neon-vfpv4',
'vendor' : '',
},
'soft' : {
'float' : 'soft',
'vendor' : 'soft',
},
}
],
['instruction set', 'thumb', {
'arm' : { },
'thumb' : {
'thumb' : '1',
'vendor' : 't',
},
}
],
]
},
'cortexa8' : {
'mcpu' : 'cortex-a8',
'mtune' : 'cortex-a8',
'abi flags' : [
['float abi', 'hard', {
'hard' : {
'float' : 'hard',
'fpu' : 'neon',
'vendor' : 'neon',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'neon',
'vendor' : 'neonsfp',
},
'soft' : {
'float' : 'soft',
'vendor' : 'sfp',
},
}
],
['instruction set', 'thumb', {
'arm' : {
'mode' : 'arm',
},
'thumb' : {
'mode' : 'thumb',
'vendor' : 't',
},
}
],
]
},
'cortexa9' : {
'mcpu' : 'cortex-a9',
'mtune' : 'cortex-a9',
'abi flags' : [
['float abi', 'hard', {
'hard' : {
'float' : 'hard',
'fpu' : 'neon',
'vendor' : 'neon',
},
'softfp' : {
'float' : 'softfp',
'fpu' : 'neon',
'vendor' : 'neonsfp',
},
'soft' : {
'float' : 'soft',
'vendor' : 'sfp',
},
}
],
['instruction set', 'thumb', {
'arm' : {
'mode' : 'arm',
},
'thumb' : {
'mode' : 'thumb',
'vendor' : 't',
},
}
],
]
},
},
'armeb' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'avr32' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'mips' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'mipsel' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'sparc' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'b',
},
},
'bfin' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'sh3' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'sh4' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
},
},
'i386' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i386',
'fpu' : '387',
'float' : 'hard',
},
},
'i486' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i486',
'fpu' : '387',
'float' : 'hard',
},
'winchipc6' : {
'march' : 'winchip-c6',
},
'winchip2' : {
'march' : 'winchip2',
},
},
'i586' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i586',
'fpu' : '387',
'float' : 'hard',
},
'mmx' : {
'march' : 'pentium-mmx',
},
'k6' : {
'march' : 'k6',
},
'k62' : {
'march' : 'k6-2',
},
'geode' : {
'march' : 'geode',
},
'c3' : {
'march' : 'c3',
},
'c32' : {
'march' : 'c3-2',
},
},
'i686' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'i686',
'fpu' : '387',
'float' : 'hard',
},
'mmx' : {
'march' : 'pentium2',
},
'sse' : {
'march' : 'pentium3',
'fpu' : 'sse',
},
'sse2' : {
'march' : 'pentium-m',
'fpu' : 'sse',
},
'athlon' : {
'march' : 'athlon',
},
'athlon4' : {
'march' : 'athlon-4',
'fpu' : 'sse',
},
},
'i786' : {
'DEFAULT' : {
'wordsize' : '32',
'endian' : 'l',
'elf' : 'ELF 32-bit LSB .*, Intel 80386',
'march' : 'pentium4',
'fpu' : 'sse',
'float' : 'hard',
},
'sse3' : {
'march' : 'prescott',
},
},
'x86_64' : {
'DEFAULT' : {
'wordsize' : '64',
'endian' : 'l',
'elf' : 'ELF 64-bit LSB .*, x86-64',
'march' : 'opteron',
'fpu' : 'sse',
'float' : 'hard',
},
'sse3' : {
'march' : 'k8-sse3',
},
'nocona' : {
'march' : 'nocona',
},
'core2' : {
'march' : 'core2',
},
'atom' : {
'march' : 'atom',
},
'amdfam10' : {
'march' : 'amdfam10',
},
},
'ia64' : {
'DEFAULT' : {
'wordsize' : '64',
'endian' : 'l',
},
},
}
cpumap = {
'powerpc' : {
'mpc5121e' : 'e300c4',
'mpc5125' : 'e300c4',
'mpc8313' : 'e300c3',
'mpc8313e' : 'e300c3',
'mpc8360' : 'e300c1',
'mpc8270' : 'g2le',
},
'arm' : {
'at91rm9200' : '920t',
'at91sam9260' : '926ejs',
'omap3520' : ('cortexa8', ('omap3', 'omap')),
'omap3530' : ('cortexa8', ('omap3', 'omap')),
'omap4430' : ('cortexa9neon', ('omap4', 'omap')),
'omap4440' : ('cortexa9neon', ('omap4', 'omap')),
'imx21' : ('926ejs', 'imx'),
'imx23' : ('926ejs', 'mxs'),
'imx25' : ('926ejs', 'imx'),
'imx27' : ('926ejs', 'imx'),
'imx28' : ('926ejs', 'mxs'),
'imx280' : ('926ejs', ('imx28', 'mxs')),
'imx281' : ('926ejs', ('imx28', 'mxs')),
'imx283' : ('926ejs', ('imx28', 'mxs')),
'imx285' : ('926ejs', ('imx28', 'mxs')),
'imx286' : ('926ejs', ('imx28', 'mxs')),
'imx287' : ('926ejs', ('imx28', 'mxs')),
'imx31' : ('1136jfs', 'imx'),
'imx35' : ('1136jfs', 'imx'),
'imx51' : ('cortexa8', 'imx'),
'imx512' : ('cortexa8', ('imx51', 'imx')),
'imx513' : ('cortexa8', ('imx51', 'imx')),
'imx514' : ('cortexa8', ('imx51', 'imx')),
'imx515' : ('cortexa8', ('imx51', 'imx')),
'imx516' : ('cortexa8', ('imx51', 'imx')),
'imx53' : ('cortexa8', 'imx'),
'imx534' : ('cortexa8', ('imx53', 'imx')),
'imx535' : ('cortexa8', ('imx53', 'imx')),
'imx536' : ('cortexa8', ('imx53', 'imx')),
'imx537' : ('cortexa8', ('imx53', 'imx')),
'imx538' : ('cortexa8', ('imx53', 'imx')),
'imx6' : ('cortexa9', 'imx'),
'ls1021a' : ('cortexa7', ('ls102x', 'ls1', 'layerscape')),
'imx6sl' : ('cortexa9', ('imx6', 'imx')),
'imx6dl' : ('cortexa9', ('imx6', 'imx')),
'imx6q' : ('cortexa9', ('imx6', 'imx')),
},
'x86' : {
'celeronm575' : (('i686', 'sse2'),),
},
}
osspecs = {
'mingw32' : {
'exeext' : '.exe',
'elf' : 'PE32 .* for MS Windows .* Intel 80386 32-bit',
},
}
def init(d):
sanity(d)
gcc_version = d.get('GCC_VERSION')
arch_set_build_arch(d, gcc_version)
arch_set_cross_arch(d, 'MACHINE', gcc_version)
arch_set_cross_arch(d, 'SDK', gcc_version)
return
def sanity(d):
import bb
fail = False
sdk_cpu = d.get("SDK_CPU")
if not sdk_cpu:
bb.error("SDK_CPU not set")
fail = True
sdk_os = d.get("SDK_OS")
if not sdk_os:
bb.error("SDK_OS not set")
fail = True
machine = d.get("MACHINE")
machine_cpu = d.get("MACHINE_CPU")
machine_os = d.get("MACHINE_OS")
if machine:
pass
elif machine_cpu and machine_os:
pass
elif machine_cpu:
bb.error("MACHINE_CPU set, but not MACHINE_OS")
fail = True
elif machine_os:
bb.error("MACHINE_OS set, but not MACHINE_CPU")
fail = True
else:
bb.error("MACHINE or MACHINE_CPU and MACHINE_OS must be set")
fail = True
if fail:
bb.fatal("Invalid MACHINE and/or SDK specification\n"
"Check your conf/local.conf file and/or machine and distro config files.")
return
def update(d):
gcc_version = d.get('GCC_VERSION')
arch_update(d, 'BUILD', gcc_version)
arch_update(d, 'HOST', gcc_version)
arch_update(d, 'TARGET', gcc_version)
return
def arch_set_build_arch(d, gcc_version):
try:
guess = globals()['config_guess_cache']
except KeyError:
#bb.debug("config.guess")
script = arch_find_script(d, 'config.guess')
try:
guess = arch_split(os.popen(script).readline().strip())
except OSError, e:
#bb.fatal('config.guess failed: '+e)
return None
config_guess_cache = guess
globals()['config_guess_cache'] = config_guess_cache
# Replace the silly 'pc' vendor with 'unknown' to yield a result
# comparable with arch_cross().
if guess[1] == 'pc':
guess[1] = 'unknown'
guess[1] = "build_" + guess[1]
d.set('BUILD_ARCH', '-'.join(guess))
return
def arch_set_cross_arch(d, prefix, gcc_version):
cross_arch = '%s-%s'%(d.get(prefix+'_CPU', True),
d.get(prefix+'_OS', True))
cross_arch = arch_config_sub(d, cross_arch)
abis = (d.get(prefix+'_ABI', True) or "").split()
if prefix == "MACHINE":
vendor_prefix = None
else:
vendor_prefix = prefix.lower() + "_"
cross_arch = arch_fixup(cross_arch, gcc_version, abis, vendor_prefix)
d[prefix+'_ARCH'] = cross_arch[0]
if cross_arch[1]:
d[prefix+'_CPU_FAMILIES'] = " ".join(cross_arch[1])
return
def arch_update(d, prefix, gcc_version):
arch = d.get(prefix+'_ARCH', True)
gccspec = arch_gccspec(arch, gcc_version)
(cpu, vendor, os) = arch_split(arch)
d[prefix+'_CPU'] = cpu
d[prefix+'_VENDOR'] = vendor
d[prefix+'_OS'] = os
ost = os.split('-',1)
if len(ost) > 1:
d[prefix+'_BASEOS'] = ost[0]
else:
d[prefix+'_BASEOS'] = ""
for spec in gccspec:
if spec in ("abi flags"):
continue
d[prefix+'_'+spec.upper()] = gccspec[spec]
return
def arch_fixup(arch, gcc, abis, vendor_prefix=None):
import re
gccv=re.search('(\d+)[.](\d+)[.]?',gcc).groups()
(cpu, vendor, os) = arch_split(arch)
if vendor == 'pc':
vendor = 'unknown'
families = []
if cpu in cpumap and vendor in cpumap[cpu]:
mapto = cpumap[cpu][vendor]
families = [vendor]
if isinstance(mapto, basestring):
vendor = mapto
else:
assert isinstance(mapto, tuple) and len(mapto) in (1, 2)
if isinstance(mapto[0], basestring):
vendor = mapto[0]
else:
assert isinstance(mapto[0], tuple) and len(mapto[0]) == 2
cpu = mapto[0][0]
vendor = mapto[0][1]
if len(mapto) > 1:
if isinstance(mapto[1], basestring):
families.append(mapto[1])
else:
assert isinstance(mapto[1], tuple)
families.extend(mapto[1])
families.append(vendor)
if cpu == "powerpc":
if vendor in ('e300c1', 'e300c4'):
vendor = '603e'
if vendor in ('e300c2', 'e300c3'):
if gccv[0] < 4 or (gccv[0] == 4 and gccv[1] < 4):
vendor = '603e'
if cpu in cpuspecs and vendor in cpuspecs[cpu]:
pass
elif vendor == 'unknown':
pass
else:
bb.fatal("unknown cpu vendor: %s"%vendor)
vendor = 'unknown'
# Merge DEFAULT and vendor abi_flags, keeping DEFAULT flags first
abi_flags = []
if "DEFAULT" in cpuspecs[cpu] and 'abi flags' in cpuspecs[cpu]["DEFAULT"]:
abi_flags += cpuspecs[cpu]["DEFAULT"]["abi flags"]
if vendor in cpuspecs[cpu] and 'abi flags' in cpuspecs[cpu][vendor]:
for abi_flag in cpuspecs[cpu][vendor]['abi flags']:
try:
flag_index = map(operator.itemgetter(0), abi_flags).index(
abi_flag)
abi_flags[flag_index][1] = abi_flag[1]
for flag_value in abi_flag[2].items():
abi_flags[flag_index][2][flag_value[0]] = flag_value[1]
except ValueError:
abi_flags.append(abi_flag)
if abi_flags:
cpuspec = cpuspecs[cpu][vendor]
extra_vendor = []
extra_os = []
for abi_flag in abi_flags:
diff = set(abis).intersection(set(abi_flag[2]))
if len(diff) > 1:
bb.fatal("ABI with %s is invalid, only one of %s should be given"
% (', '.join(diff), ', '.join(abi_flag[2].keys())))
if len(diff) == 1:
abi_select = diff.pop()
abis.remove(abi_select)
else:
abi_select = abi_flag[1]
if 'vendor' in abi_flag[2][abi_select]:
extra_vendor.append(abi_flag[2][abi_select].pop('vendor'))
if 'os' in abi_flag[2][abi_select]:
extra_os.append(abi_flag[2][abi_select].pop('os'))
cpuspec.update(abi_flag[2][abi_select])
vendor = vendor + ''.join(extra_vendor)
os = os + ''.join(extra_os)
cpuspecs[cpu].update({vendor : cpuspec})
if len(abis) > 0:
bb.fatal("ABI %s not valid for arch %s-%s-%s" %(', '.join(abis), cpu,vendor,os))
if vendor_prefix:
vendor = vendor_prefix + vendor
return ('-'.join((cpu, vendor, os)), families)
def arch_gccspec(arch, gcc):
import re
if gcc in gccspecs:
if arch in gccspecs[gcc]:
return gccspecs[gcc][arch]
else:
gccspecs[gcc] = {}
gccv=re.search('(\d+)[.](\d+)[.]?',gcc).groups()
(cpu, vendor, os) = arch_split(arch)
gccspec = {}
if cpu in cpuspecs:
gccspec.update(cpuspecs[cpu]['DEFAULT'])
if cpu in cpuspecs and vendor in cpuspecs[cpu]:
gccspec.update(cpuspecs[cpu][vendor])
if os in osspecs:
gccspec.update(osspecs[os])
try:
if gccspec['mcpu'] in ('e300c1', 'e300c4'):
gccspec['mcpu'] = '603e'
if gccspec['mtune'] in ('e300c1', 'e300c4'):
gccspec['mtune'] = '603e'
if gccspec['mcpu'] in ('e300c2', 'e300c3'):
if gccv[0] < 4 or (gccv[0] == 4 and gccv[1] < 4):
gccspec['mcpu'] = '603e'
if gccspec['mtune'] in ('e300c2', 'e300c3'):
if gccv[0] < 4 or (gccv[0] == 4 and gccv[1] < 4):
gccspec['mtune'] = '603e'
except KeyError, e:
#bb.debug("KeyError in arch_gccspec: ")
pass
gccspecs[gcc][arch] = gccspec
return gccspec
def arch_config_sub(d, arch):
try:
config_sub_cache = globals()['config_sub_cache']
except KeyError:
config_sub_cache = {}
globals()['config_sub_cache'] = config_sub_cache
try:
canonical_arch = config_sub_cache[arch]
except KeyError:
script = arch_find_script(d, 'config.sub')
try:
bb.debug("%s %s"%(script, arch))
canonical_arch = os.popen("%s %s"%(script, arch)).readline().strip()
config_sub_cache[arch] = canonical_arch
except OSError, e:
bb.error("config.sub(%s) failed: %s"%(arch, e))
return arch
return canonical_arch
def arch_split(arch):
archtuple = arch.split('-', 2)
if len(archtuple) == 3:
return archtuple
else:
bb.error('invalid arch string: '+arch)
return None
def arch_find_script(d, filename):
try:
scripts = globals()['arch_scripts']
except KeyError:
scripts = {}
globals()['arch_scripts'] = scripts
if not filename in scripts:
for oepath in d.get('OEPATH', 1).split(':'):
filepath = os.path.join(oepath, 'scripts', filename)
if os.path.isfile(filepath):
#bb.debug("found %s: %s"%(filename, filepath))
scripts[filename] = filepath
break
if not filename in scripts:
bb.error('could not find script: %s'%filename)
return scripts[filename]
| mit |
gsbullmer/restaurant-menu-directory | menus/final_project.py | 1 | 16044 | from flask import Flask, render_template, url_for, request, redirect, flash, jsonify
from sqlalchemy.sql import func
app = Flask(__name__)
# imports for CRUD Operations
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Restaurant, Base, MenuItem, User
# imports for OAuth
from flask import session as login_session
import random, string
from oauth2client.client import flow_from_clientsecrets, FlowExchangeError
import httplib2, json, requests
from flask import make_response
CLIENT_ID = json.loads(
open('client_secrets.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Restaurant Menu Application"
# Create session and connect to DB
engine = create_engine('sqlite:///restaurantmenuwithusers.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind = engine)
session = DBSession()
@app.route('/')
@app.route('/restaurant/')
def showRestaurants():
restaurants = session.query(Restaurant).order_by('name').all()
if 'user_id' not in login_session:
current_user = None
else:
current_user = getUserInfo(login_session['user_id'])
return render_template("restaurants.html", restaurants = restaurants, current_user = current_user, getUserInfo = getUserInfo)
@app.route('/restaurant/new/', methods = ['GET', 'POST'])
def newRestaurant():
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
newRestaurant = Restaurant(name = request.form['name'], user_id = login_session['user_id'])
session.add(newRestaurant)
session.commit()
flash("New restaurant created")
return redirect(url_for('showRestaurants'))
else:
return render_template("newRestaurant.html")
@app.route('/restaurant/<int:restaurant_id>/edit/', methods = ['GET', 'POST'])
def editRestaurant(restaurant_id):
if 'username' not in login_session:
return redirect(url_for('showLogin'))
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
if login_session['user_id'] != restaurant.user_id:
flash("You don't have permission to edit that restaurant.")
return redirect(url_for('showRestaurants'))
if request.method == 'POST':
restaurant.name = request.form['name']
session.add(restaurant)
session.commit()
flash("Restaurant updated")
return redirect(url_for('showRestaurants'))
else:
return render_template("editRestaurant.html", restaurant = restaurant)
@app.route('/restaurant/<int:restaurant_id>/delete/', methods = ['GET', 'POST'])
def deleteRestaurant(restaurant_id):
if 'username' not in login_session:
return redirect(url_for('showLogin'))
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
if login_session['user_id'] != restaurant.user_id:
flash("You don't have permission to delete that restaurant.")
return redirect(url_for('showRestaurants'))
if request.method == 'POST':
session.delete(restaurant)
session.commit()
flash("Restaurant deleted")
return redirect(url_for('showRestaurants'))
else:
return render_template("deleteRestaurant.html", restaurant = restaurant)
@app.route('/restaurant/<int:restaurant_id>/')
@app.route('/restaurant/<int:restaurant_id>/menu/')
def showMenu(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = restaurant.id).order_by('name').all()
courses = []
for i in items:
courses.append(i.course)
courses = list(set(courses))
courses.sort()
if 'user_id' not in login_session:
current_user = None
else:
current_user = getUserInfo(login_session['user_id'])
return render_template("menu.html", restaurant = restaurant, items = items, courses = courses, current_user = current_user, getUserInfo = getUserInfo)
@app.route('/restaurant/<int:restaurant_id>/menu/new/', methods = ['GET', 'POST'])
def newMenuItem(restaurant_id):
if 'username' not in login_session:
return redirect(url_for('showLogin'))
if request.method == 'POST':
newItem = MenuItem(name = request.form['name'], price = request.form["price"], description = request.form["description"], course = request.form["course"], restaurant_id = restaurant_id, user_id = restaurant.user_id)
session.add(newItem)
session.commit()
flash("New menu item created")
return redirect(url_for('showMenu', restaurant_id = restaurant_id))
else:
return render_template("newMenuItem.html", restaurant_id = restaurant_id)
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/edit/', methods = ['GET', 'POST'])
def editMenuItem(restaurant_id, menu_id):
if 'username' not in login_session:
return redirect(url_for('showLogin'))
item = session.query(MenuItem).filter_by(id = menu_id).one()
if login_session['user_id'] != item.user_id :
flash("You don't have permission to edit that menu item.")
return redirect(url_for('showMenu', restaurant_id = restaurant_id))
if request.method == "POST":
if request.form["name"]:
item.name = request.form["name"]
if request.form["price"]:
item.price = request.form["price"]
if request.form["description"]:
item.description = request.form["description"]
if request.form["course"]:
item.course = request.form["course"]
session.add(item)
session.commit()
flash("Menu item updated")
return redirect(url_for("showMenu", restaurant_id = restaurant_id))
else:
return render_template("editMenuItem.html", restaurant_id = restaurant_id, menu_id = menu_id, item = item)
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/delete/', methods = ['GET', 'POST'])
def deleteMenuItem(restaurant_id, menu_id):
if 'username' not in login_session:
return redirect(url_for('showLogin'))
item = session.query(MenuItem).filter_by(id = menu_id).one()
if login_session['user_id'] != item.user_id :
flash("You don't have permission to delete that menu item.")
return redirect(url_for('showMenu', restaurant_id = restaurant_id))
if request.method == "POST":
session.delete(item)
session.commit()
flash("Menu item deleted")
return redirect(url_for("showMenu", restaurant_id = restaurant_id))
else:
return render_template("deleteMenuItem.html", restaurant_id = restaurant_id, item = item)
# Making an API Endpoint (GET Request)
@app.route('/restaurant/JSON')
def restaurantsJSON():
restaurants = session.query(Restaurant).all()
return jsonify(Restaurants = [r.serialize for r in restaurants])
@app.route('/restaurant/<int:restaurant_id>/menu/JSON')
def restaurantMenuJSON(restaurant_id):
restaurant = session.query(Restaurant).filter_by(id = restaurant_id).one()
items = session.query(MenuItem).filter_by(restaurant_id = restaurant.id).all()
return jsonify(MenuItems = [i.serialize for i in items])
@app.route('/restaurant/<int:restaurant_id>/menu/<int:menu_id>/JSON')
def menuItemJSON(restaurant_id, menu_id):
item = session.query(MenuItem).filter_by(id = menu_id).one()
return jsonify(MenuItem = item.serialize)
@app.route('/login')
def showLogin():
state = "".join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))
login_session['state'] = state
return render_template("login.html", STATE = state)
@app.route('/gconnect', methods = ["POST"])
def gconnect():
# Validate state token
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps('Invalid state token'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Obtain authorization code
code = request.data
try:
# Upgrade the authorization code into a credentials object
oauth_flow = flow_from_clientsecrets('client_secrets.json',
scope = '')
oauth_flow.redirect_uri = 'postmessage'
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
response = make_response(json.dumps('Failed to upgrade the authorization code.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Check that the access token is valid
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s' % access_token)
h = httplib2.Http()
result = json.loads(h.request(url, "GET")[1])
# If there was an error in the access token info, abort
if result.get('error') is not None:
response = make_response(json.dumps(result.get('error')), 500)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token in used for the intended user
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
response = make_response(json.dumps("Token's user ID doesn't match given user ID."), 401)
response.headers['Content-Type'] = 'application/json'
return response
# Verify that the access token is valid for this app
if result['issued_to'] != CLIENT_ID:
response = make_response(json.dumps("Token's client ID doesn't match app's ID."), 401)
print "Token's client ID doesn't match app's ID."
response.headers['Content-Type'] = 'application/json'
return response
# Check to see if user is already logged in
stored_credentials = login_session.get('credentials')
stored_gplus_id = login_session.get('gplus_id')
if stored_credentials is not None and gplus_id == stored_gplus_id:
response = make_response(json.dumps("Current user is already connected"), 200)
response.headers['Content-Type'] = 'application/json'
# return response
# Store the access token in the session for later use
login_session['provider'] = 'google'
login_session['credentials'] = credentials
login_session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': credentials.access_token, 'alt':'json'}
answer = requests.get(userinfo_url, params = params)
data = json.loads(answer.text)
login_session['username'] = data["name"]
login_session['picture'] = data["picture"]
login_session['email'] = data["email"]
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300px; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
# DISCONNECT - Revoke a current user's token and reset their login_session
@app.route('/gdisconnect')
def gdisconnect():
# Only disconnect a connected user
credentials = login_session.get('credentials')
if credentials is None:
response = make_response(
json.dumps('Current user not connected.'), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = credentials.access_token
url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % access_token
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if result['status'] == '200':
response = make_response(json.dumps('Successfully disconnected.'), 200)
response.headers['Content-Type'] = 'application/json'
return response
else:
# For whatever reason, the given token was invalid
response = make_response(
json.dumps('Failed to revoke token for given user.', 400))
response.headers['Content-Type'] = 'application/json'
return response
@app.route('/fbconnect', methods=["POST"])
def fbconnect():
if request.args.get('state') != login_session['state']:
response = make_response(json.dumps("Invalid state parameter."), 401)
response.headers['Content-Type'] = 'application/json'
return response
access_token = request.data
app_id = json.loads(open('fb_client_secrets.json', 'r').read())['web']['app_id']
app_secret = json.loads(open('fb_client_secrets.json', 'r').read())['web']['app_secret']
url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_echange_token=%s' % (app_id, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, "GET")[1]
userinfo_url = "https://graph.facebook.com/v2.4/me"
token = result.split("&")[0]
url = 'https://graph.facebook.com/v2.4/me?%s&fields=name,id,email' % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
login_session['provider'] = 'facebook'
login_session['username'] = data["name"]
login_session['email'] = data["email"]
login_session['facebook_id'] = data["id"]
url = 'https://graph.facebook.com/v2.4/me/picture?%s&redirect=0&height=200&width=200' % token
h = httplib2.Http()
result = h.request(url, "GET")[1]
data = json.loads(result)
login_session['picture'] = data["data"]["url"]
user_id = getUserID(login_session['email'])
if not user_id:
user_id = createUser(login_session)
login_session['user_id'] = user_id
output = ''
output += '<h1>Welcome, '
output += login_session['username']
output += '!</h1>'
output += '<img src="'
output += login_session['picture']
output += ' " style = "width: 300; height: 300px;border-radius: 150px;-webkit-border-radius: 150px;-moz-border-radius: 150px;"> '
flash("you are now logged in as %s" % login_session['username'])
print "done!"
return output
@app.route('/fbdisconnect')
def fbdisconnect():
facebook_id = login_session['facebook_id']
url = 'https://graph.facebook.com/%s/permissions' % facebook_id
h = httplib2.Http()
result = h.request(url, 'DELETE')[0]
return "You have been logged out."
@app.route('/disconnect')
def disconnect():
if 'provider' in login_session:
if login_session['provider'] == 'google':
gdisconnect()
del login_session['gplus_id']
del login_session['credentials']
if login_session['provider'] == 'facebook':
fbdisconnect()
del login_session['facebook_id']
del login_session['username']
del login_session['email']
del login_session['picture']
del login_session['user_id']
del login_session['provider']
flash("You have successfully been logged out.")
return redirect(url_for('showRestaurants'))
else:
flash("You were not logged in to begin with!")
return redirect(url_for('showRestaurants'))
# User helper functions
def getUserID(email):
try:
user = session.query(User).filter_by(email = email).one()
return user.id
except:
return None
def getUserInfo(user_id):
user = session.query(User).filter_by(id = user_id).one()
return user
def createUser(login_session):
newUser = User(name = login_session['username'], email = login_session['email'], picture = login_session['picture'])
session.add(newUser)
session.commit()
user = session.query(User).filter_by(email = login_session['email']).one()
return user.id
if __name__ == '__main__':
app.secret_key = 'super secret key'
app.debug = True
app.run(host = '0.0.0.0', port = 5000)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.