text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
#! /usr/bin/python
# Python ctypes bindings for VLC
#
# Copyright (C) 2009-2012 the VideoLAN team
# $Id: $
#
# Authors: Olivier Aubert <contact at olivieraubert.net>
# Jean Brouwers <MrJean1 at gmail.com>
# Geoff Salmon <geoff.salmon at gmail.com>
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of the
# License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
"""This module provides bindings for the LibVLC public API, see
U{http://wiki.videolan.org/LibVLC}.
You can find the documentation and a README file with some examples
at U{http://www.advene.org/download/python-ctypes/}.
Basically, the most important class is L{Instance}, which is used
to create a libvlc instance. From this instance, you then create
L{MediaPlayer} and L{MediaListPlayer} instances.
Alternatively, you may create instances of the L{MediaPlayer} and
L{MediaListPlayer} class directly and an instance of L{Instance}
will be implicitly created. The latter can be obtained using the
C{get_instance} method of L{MediaPlayer} and L{MediaListPlayer}.
"""
import ctypes
from ctypes.util import find_library
import os
import sys
import functools
# Used by EventManager in override.py
from inspect import getargspec
__version__ = "N/A"
build_date = "Wed Apr 1 21:28:00 2015"
if sys.version_info[0] > 2:
str = str
unicode = str
bytes = bytes
basestring = (str, bytes)
PYTHON3 = True
def str_to_bytes(s):
"""Translate string or bytes to bytes.
"""
if isinstance(s, str):
return bytes(s, sys.getfilesystemencoding())
else:
return s
def bytes_to_str(b):
"""Translate bytes to string.
"""
if isinstance(b, bytes):
return b.decode(sys.getfilesystemencoding())
else:
return b
else:
str = str
unicode = unicode
bytes = str
basestring = basestring
PYTHON3 = False
def str_to_bytes(s):
"""Translate string or bytes to bytes.
"""
if isinstance(s, unicode):
return s.encode(sys.getfilesystemencoding())
else:
return s
def bytes_to_str(b):
"""Translate bytes to unicode string.
"""
if isinstance(b, str):
return unicode(b, sys.getfilesystemencoding())
else:
return b
# Internal guard to prevent internal classes to be directly
# instanciated.
_internal_guard = object()
def find_lib():
dll = None
plugin_path = None
if sys.platform.startswith('linux'):
p = find_library('vlc')
try:
dll = ctypes.CDLL(p)
except OSError: # may fail
dll = ctypes.CDLL('libvlc.so.5')
elif sys.platform.startswith('win'):
p = find_library('libvlc.dll')
if p is None:
try: # some registry settings
# leaner than win32api, win32con
if PYTHON3:
import winreg as w
else:
import _winreg as w
for r in w.HKEY_LOCAL_MACHINE, w.HKEY_CURRENT_USER:
try:
r = w.OpenKey(r, 'Software\\VideoLAN\\VLC')
plugin_path, _ = w.QueryValueEx(r, 'InstallDir')
w.CloseKey(r)
break
except w.error:
pass
except ImportError: # no PyWin32
pass
if plugin_path is None:
# try some standard locations.
for p in ('Program Files\\VideoLan\\', 'VideoLan\\',
'Program Files\\', ''):
p = 'C:\\' + p + 'VLC\\libvlc.dll'
if os.path.exists(p):
plugin_path = os.path.dirname(p)
break
if plugin_path is not None: # try loading
p = os.getcwd()
os.chdir(plugin_path)
# if chdir failed, this will raise an exception
dll = ctypes.CDLL('libvlc.dll')
# restore cwd after dll has been loaded
os.chdir(p)
else: # may fail
dll = ctypes.CDLL('libvlc.dll')
else:
plugin_path = os.path.dirname(p)
dll = ctypes.CDLL(p)
elif sys.platform.startswith('darwin'):
# FIXME: should find a means to configure path
d = '/Applications/VLC.app/Contents/MacOS/'
p = d + 'lib/libvlc.dylib'
if os.path.exists(p):
dll = ctypes.CDLL(p)
d += 'modules'
if os.path.isdir(d):
plugin_path = d
else: # hope, some PATH is set...
dll = ctypes.CDLL('libvlc.dylib')
else:
raise NotImplementedError('%s: %s not supported' % (sys.argv[0], sys.platform))
return (dll, plugin_path)
# plugin_path used on win32 and MacOS in override.py
dll, plugin_path = find_lib()
class VLCException(Exception):
"""Exception raised by libvlc methods.
"""
pass
try:
_Ints = (int, long)
except NameError: # no long in Python 3+
_Ints = int
_Seqs = (list, tuple)
# Used for handling *event_manager() methods.
class memoize_parameterless(object):
"""Decorator. Caches a parameterless method's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
Adapted from https://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self._cache = {}
def __call__(self, obj):
try:
return self._cache[obj]
except KeyError:
v = self._cache[obj] = self.func(obj)
return v
def __repr__(self):
"""Return the function's docstring.
"""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods.
"""
return functools.partial(self.__call__, obj)
# Default instance. It is used to instanciate classes directly in the
# OO-wrapper.
_default_instance = None
def get_default_instance():
"""Return the default VLC.Instance.
"""
global _default_instance
if _default_instance is None:
_default_instance = Instance()
return _default_instance
_Cfunctions = {} # from LibVLC __version__
_Globals = globals() # sys.modules[__name__].__dict__
def _Cfunction(name, flags, errcheck, *types):
"""(INTERNAL) New ctypes function binding.
"""
if hasattr(dll, name) and name in _Globals:
p = ctypes.CFUNCTYPE(*types)
f = p((name, dll), flags)
if errcheck is not None:
f.errcheck = errcheck
# replace the Python function
# in this module, but only when
# running as python -O or -OO
if __debug__:
_Cfunctions[name] = f
else:
_Globals[name] = f
return f
raise NameError('no function %r' % (name,))
def _Cobject(cls, ctype):
"""(INTERNAL) New instance from ctypes.
"""
o = object.__new__(cls)
o._as_parameter_ = ctype
return o
def _Constructor(cls, ptr=_internal_guard):
"""(INTERNAL) New wrapper from ctypes.
"""
if ptr == _internal_guard:
raise VLCException("(INTERNAL) ctypes class. You should get references for this class through methods of the LibVLC API.")
if ptr is None or ptr == 0:
return None
return _Cobject(cls, ctypes.c_void_p(ptr))
class _Cstruct(ctypes.Structure):
"""(INTERNAL) Base class for ctypes structures.
"""
_fields_ = [] # list of 2-tuples ('name', ctyptes.<type>)
def __str__(self):
l = [' %s:\t%s' % (n, getattr(self, n)) for n, _ in self._fields_]
return '\n'.join([self.__class__.__name__] + l)
def __repr__(self):
return '%s.%s' % (self.__class__.__module__, self)
class _Ctype(object):
"""(INTERNAL) Base class for ctypes.
"""
@staticmethod
def from_param(this): # not self
"""(INTERNAL) ctypes parameter conversion method.
"""
if this is None:
return None
return this._as_parameter_
class ListPOINTER(object):
"""Just like a POINTER but accept a list of ctype as an argument.
"""
def __init__(self, etype):
self.etype = etype
def from_param(self, param):
if isinstance(param, _Seqs):
return (self.etype * len(param))(*param)
# errcheck functions for some native functions.
def string_result(result, func, arguments):
"""Errcheck function. Returns a string and frees the original pointer.
It assumes the result is a char *.
"""
if result:
# make a python string copy
s = bytes_to_str(ctypes.string_at(result))
# free original string ptr
libvlc_free(result)
return s
return None
def class_result(classname):
"""Errcheck function. Returns a function that creates the specified class.
"""
def wrap_errcheck(result, func, arguments):
if result is None:
return None
return classname(result)
return wrap_errcheck
# Wrapper for the opaque struct libvlc_log_t
class Log(ctypes.Structure):
pass
Log_ptr = ctypes.POINTER(Log)
# FILE* ctypes wrapper, copied from
# http://svn.python.org/projects/ctypes/trunk/ctypeslib/ctypeslib/contrib/pythonhdr.py
class FILE(ctypes.Structure):
pass
FILE_ptr = ctypes.POINTER(FILE)
if PYTHON3:
PyFile_FromFd = ctypes.pythonapi.PyFile_FromFd
PyFile_FromFd.restype = ctypes.py_object
PyFile_FromFd.argtypes = [ctypes.c_int,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_int,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_int ]
PyFile_AsFd = ctypes.pythonapi.PyObject_AsFileDescriptor
PyFile_AsFd.restype = ctypes.c_int
PyFile_AsFd.argtypes = [ctypes.py_object]
else:
PyFile_FromFile = ctypes.pythonapi.PyFile_FromFile
PyFile_FromFile.restype = ctypes.py_object
PyFile_FromFile.argtypes = [FILE_ptr,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.CFUNCTYPE(ctypes.c_int, FILE_ptr)]
PyFile_AsFile = ctypes.pythonapi.PyFile_AsFile
PyFile_AsFile.restype = FILE_ptr
PyFile_AsFile.argtypes = [ctypes.py_object]
# Generated enum types #
class _Enum(ctypes.c_uint):
'''(INTERNAL) Base class
'''
_enum_names_ = {}
def __str__(self):
n = self._enum_names_.get(self.value, '') or ('FIXME_(%r)' % (self.value,))
return '.'.join((self.__class__.__name__, n))
def __hash__(self):
return self.value
def __repr__(self):
return '.'.join((self.__class__.__module__, self.__str__()))
def __eq__(self, other):
return ( (isinstance(other, _Enum) and self.value == other.value)
or (isinstance(other, _Ints) and self.value == other) )
def __ne__(self, other):
return not self.__eq__(other)
class LogLevel(_Enum):
'''Logging messages level.
\note future libvlc versions may define new levels.
'''
_enum_names_ = {
0: 'DEBUG',
2: 'NOTICE',
3: 'WARNING',
4: 'ERROR',
}
LogLevel.DEBUG = LogLevel(0)
LogLevel.ERROR = LogLevel(4)
LogLevel.NOTICE = LogLevel(2)
LogLevel.WARNING = LogLevel(3)
class EventType(_Enum):
'''Event types.
'''
_enum_names_ = {
0: 'MediaMetaChanged',
1: 'MediaSubItemAdded',
2: 'MediaDurationChanged',
3: 'MediaParsedChanged',
4: 'MediaFreed',
5: 'MediaStateChanged',
6: 'MediaSubItemTreeAdded',
0x100: 'MediaPlayerMediaChanged',
257: 'MediaPlayerNothingSpecial',
258: 'MediaPlayerOpening',
259: 'MediaPlayerBuffering',
260: 'MediaPlayerPlaying',
261: 'MediaPlayerPaused',
262: 'MediaPlayerStopped',
263: 'MediaPlayerForward',
264: 'MediaPlayerBackward',
265: 'MediaPlayerEndReached',
266: 'MediaPlayerEncounteredError',
267: 'MediaPlayerTimeChanged',
268: 'MediaPlayerPositionChanged',
269: 'MediaPlayerSeekableChanged',
270: 'MediaPlayerPausableChanged',
271: 'MediaPlayerTitleChanged',
272: 'MediaPlayerSnapshotTaken',
273: 'MediaPlayerLengthChanged',
274: 'MediaPlayerVout',
275: 'MediaPlayerScrambledChanged',
276: 'MediaPlayerESAdded',
277: 'MediaPlayerESDeleted',
278: 'MediaPlayerESSelected',
0x200: 'MediaListItemAdded',
513: 'MediaListWillAddItem',
514: 'MediaListItemDeleted',
515: 'MediaListWillDeleteItem',
516: 'MediaListEndReached',
0x300: 'MediaListViewItemAdded',
769: 'MediaListViewWillAddItem',
770: 'MediaListViewItemDeleted',
771: 'MediaListViewWillDeleteItem',
0x400: 'MediaListPlayerPlayed',
1025: 'MediaListPlayerNextItemSet',
1026: 'MediaListPlayerStopped',
0x500: 'MediaDiscovererStarted',
1281: 'MediaDiscovererEnded',
0x600: 'VlmMediaAdded',
1537: 'VlmMediaRemoved',
1538: 'VlmMediaChanged',
1539: 'VlmMediaInstanceStarted',
1540: 'VlmMediaInstanceStopped',
1541: 'VlmMediaInstanceStatusInit',
1542: 'VlmMediaInstanceStatusOpening',
1543: 'VlmMediaInstanceStatusPlaying',
1544: 'VlmMediaInstanceStatusPause',
1545: 'VlmMediaInstanceStatusEnd',
1546: 'VlmMediaInstanceStatusError',
}
EventType.MediaDiscovererEnded = EventType(1281)
EventType.MediaDiscovererStarted = EventType(0x500)
EventType.MediaDurationChanged = EventType(2)
EventType.MediaFreed = EventType(4)
EventType.MediaListEndReached = EventType(516)
EventType.MediaListItemAdded = EventType(0x200)
EventType.MediaListItemDeleted = EventType(514)
EventType.MediaListPlayerNextItemSet = EventType(1025)
EventType.MediaListPlayerPlayed = EventType(0x400)
EventType.MediaListPlayerStopped = EventType(1026)
EventType.MediaListViewItemAdded = EventType(0x300)
EventType.MediaListViewItemDeleted = EventType(770)
EventType.MediaListViewWillAddItem = EventType(769)
EventType.MediaListViewWillDeleteItem = EventType(771)
EventType.MediaListWillAddItem = EventType(513)
EventType.MediaListWillDeleteItem = EventType(515)
EventType.MediaMetaChanged = EventType(0)
EventType.MediaParsedChanged = EventType(3)
EventType.MediaPlayerBackward = EventType(264)
EventType.MediaPlayerBuffering = EventType(259)
EventType.MediaPlayerESAdded = EventType(276)
EventType.MediaPlayerESDeleted = EventType(277)
EventType.MediaPlayerESSelected = EventType(278)
EventType.MediaPlayerEncounteredError = EventType(266)
EventType.MediaPlayerEndReached = EventType(265)
EventType.MediaPlayerForward = EventType(263)
EventType.MediaPlayerLengthChanged = EventType(273)
EventType.MediaPlayerMediaChanged = EventType(0x100)
EventType.MediaPlayerNothingSpecial = EventType(257)
EventType.MediaPlayerOpening = EventType(258)
EventType.MediaPlayerPausableChanged = EventType(270)
EventType.MediaPlayerPaused = EventType(261)
EventType.MediaPlayerPlaying = EventType(260)
EventType.MediaPlayerPositionChanged = EventType(268)
EventType.MediaPlayerScrambledChanged = EventType(275)
EventType.MediaPlayerSeekableChanged = EventType(269)
EventType.MediaPlayerSnapshotTaken = EventType(272)
EventType.MediaPlayerStopped = EventType(262)
EventType.MediaPlayerTimeChanged = EventType(267)
EventType.MediaPlayerTitleChanged = EventType(271)
EventType.MediaPlayerVout = EventType(274)
EventType.MediaStateChanged = EventType(5)
EventType.MediaSubItemAdded = EventType(1)
EventType.MediaSubItemTreeAdded = EventType(6)
EventType.VlmMediaAdded = EventType(0x600)
EventType.VlmMediaChanged = EventType(1538)
EventType.VlmMediaInstanceStarted = EventType(1539)
EventType.VlmMediaInstanceStatusEnd = EventType(1545)
EventType.VlmMediaInstanceStatusError = EventType(1546)
EventType.VlmMediaInstanceStatusInit = EventType(1541)
EventType.VlmMediaInstanceStatusOpening = EventType(1542)
EventType.VlmMediaInstanceStatusPause = EventType(1544)
EventType.VlmMediaInstanceStatusPlaying = EventType(1543)
EventType.VlmMediaInstanceStopped = EventType(1540)
EventType.VlmMediaRemoved = EventType(1537)
class Meta(_Enum):
'''Meta data types.
'''
_enum_names_ = {
0: 'Title',
1: 'Artist',
2: 'Genre',
3: 'Copyright',
4: 'Album',
5: 'TrackNumber',
6: 'Description',
7: 'Rating',
8: 'Date',
9: 'Setting',
10: 'URL',
11: 'Language',
12: 'NowPlaying',
13: 'Publisher',
14: 'EncodedBy',
15: 'ArtworkURL',
16: 'TrackID',
17: 'TrackTotal',
18: 'Director',
19: 'Season',
20: 'Episode',
21: 'ShowName',
22: 'Actors',
23: 'AlbumArtist',
24: 'DiscNumber',
}
Meta.Actors = Meta(22)
Meta.Album = Meta(4)
Meta.AlbumArtist = Meta(23)
Meta.Artist = Meta(1)
Meta.ArtworkURL = Meta(15)
Meta.Copyright = Meta(3)
Meta.Date = Meta(8)
Meta.Description = Meta(6)
Meta.Director = Meta(18)
Meta.DiscNumber = Meta(24)
Meta.EncodedBy = Meta(14)
Meta.Episode = Meta(20)
Meta.Genre = Meta(2)
Meta.Language = Meta(11)
Meta.NowPlaying = Meta(12)
Meta.Publisher = Meta(13)
Meta.Rating = Meta(7)
Meta.Season = Meta(19)
Meta.Setting = Meta(9)
Meta.ShowName = Meta(21)
Meta.Title = Meta(0)
Meta.TrackID = Meta(16)
Meta.TrackNumber = Meta(5)
Meta.TrackTotal = Meta(17)
Meta.URL = Meta(10)
class State(_Enum):
'''Note the order of libvlc_state_t enum must match exactly the order of
See mediacontrol_playerstatus, See input_state_e enums,
and videolan.libvlc.state (at bindings/cil/src/media.cs).
expected states by web plugins are:
idle/close=0, opening=1, buffering=2, playing=3, paused=4,
stopping=5, ended=6, error=7.
'''
_enum_names_ = {
0: 'NothingSpecial',
1: 'Opening',
2: 'Buffering',
3: 'Playing',
4: 'Paused',
5: 'Stopped',
6: 'Ended',
7: 'Error',
}
State.Buffering = State(2)
State.Ended = State(6)
State.Error = State(7)
State.NothingSpecial = State(0)
State.Opening = State(1)
State.Paused = State(4)
State.Playing = State(3)
State.Stopped = State(5)
class TrackType(_Enum):
'''N/A
'''
_enum_names_ = {
-1: 'unknown',
0: 'audio',
1: 'video',
2: 'text',
}
TrackType.audio = TrackType(0)
TrackType.text = TrackType(2)
TrackType.unknown = TrackType(-1)
TrackType.video = TrackType(1)
class MediaType(_Enum):
'''Media type
See libvlc_media_get_type.
'''
_enum_names_ = {
0: 'unknown',
1: 'file',
2: 'directory',
3: 'disc',
4: 'stream',
5: 'playlist',
}
MediaType.directory = MediaType(2)
MediaType.disc = MediaType(3)
MediaType.file = MediaType(1)
MediaType.playlist = MediaType(5)
MediaType.stream = MediaType(4)
MediaType.unknown = MediaType(0)
class MediaParseFlag(_Enum):
'''Parse flags used by libvlc_media_parse_with_options()
See libvlc_media_parse_with_options.
'''
_enum_names_ = {
0x00: 'local',
0x01: 'network',
0x02: 'local',
0x04: 'network',
}
MediaParseFlag.local = MediaParseFlag(0x00)
MediaParseFlag.local = MediaParseFlag(0x02)
MediaParseFlag.network = MediaParseFlag(0x01)
MediaParseFlag.network = MediaParseFlag(0x04)
class PlaybackMode(_Enum):
'''Defines playback modes for playlist.
'''
_enum_names_ = {
0: 'default',
1: 'loop',
2: 'repeat',
}
PlaybackMode.default = PlaybackMode(0)
PlaybackMode.loop = PlaybackMode(1)
PlaybackMode.repeat = PlaybackMode(2)
class VideoMarqueeOption(_Enum):
'''Marq options definition.
'''
_enum_names_ = {
0: 'Enable',
1: 'Text',
2: 'Color',
3: 'Opacity',
4: 'Position',
5: 'Refresh',
6: 'Size',
7: 'Timeout',
8: 'marquee_X',
9: 'marquee_Y',
}
VideoMarqueeOption.Color = VideoMarqueeOption(2)
VideoMarqueeOption.Enable = VideoMarqueeOption(0)
VideoMarqueeOption.Opacity = VideoMarqueeOption(3)
VideoMarqueeOption.Position = VideoMarqueeOption(4)
VideoMarqueeOption.Refresh = VideoMarqueeOption(5)
VideoMarqueeOption.Size = VideoMarqueeOption(6)
VideoMarqueeOption.Text = VideoMarqueeOption(1)
VideoMarqueeOption.Timeout = VideoMarqueeOption(7)
VideoMarqueeOption.marquee_X = VideoMarqueeOption(8)
VideoMarqueeOption.marquee_Y = VideoMarqueeOption(9)
class NavigateMode(_Enum):
'''Navigation mode.
'''
_enum_names_ = {
0: 'activate',
1: 'up',
2: 'down',
3: 'left',
4: 'right',
}
NavigateMode.activate = NavigateMode(0)
NavigateMode.down = NavigateMode(2)
NavigateMode.left = NavigateMode(3)
NavigateMode.right = NavigateMode(4)
NavigateMode.up = NavigateMode(1)
class Position(_Enum):
'''Enumeration of values used to set position (e.g. of video title).
'''
_enum_names_ = {
-1: 'disable',
0: 'center',
1: 'left',
2: 'right',
3: 'top',
4: 'left',
5: 'right',
6: 'bottom',
7: 'left',
8: 'right',
}
Position.bottom = Position(6)
Position.center = Position(0)
Position.disable = Position(-1)
Position.left = Position(1)
Position.left = Position(4)
Position.left = Position(7)
Position.right = Position(2)
Position.right = Position(5)
Position.right = Position(8)
Position.top = Position(3)
class VideoLogoOption(_Enum):
'''Option values for libvlc_video_{get,set}_logo_{int,string}.
'''
_enum_names_ = {
0: 'enable',
1: 'file',
2: 'logo_x',
3: 'logo_y',
4: 'delay',
5: 'repeat',
6: 'opacity',
7: 'position',
}
VideoLogoOption.delay = VideoLogoOption(4)
VideoLogoOption.enable = VideoLogoOption(0)
VideoLogoOption.file = VideoLogoOption(1)
VideoLogoOption.logo_x = VideoLogoOption(2)
VideoLogoOption.logo_y = VideoLogoOption(3)
VideoLogoOption.opacity = VideoLogoOption(6)
VideoLogoOption.position = VideoLogoOption(7)
VideoLogoOption.repeat = VideoLogoOption(5)
class VideoAdjustOption(_Enum):
'''Option values for libvlc_video_{get,set}_adjust_{int,float,bool}.
'''
_enum_names_ = {
0: 'Enable',
1: 'Contrast',
2: 'Brightness',
3: 'Hue',
4: 'Saturation',
5: 'Gamma',
}
VideoAdjustOption.Brightness = VideoAdjustOption(2)
VideoAdjustOption.Contrast = VideoAdjustOption(1)
VideoAdjustOption.Enable = VideoAdjustOption(0)
VideoAdjustOption.Gamma = VideoAdjustOption(5)
VideoAdjustOption.Hue = VideoAdjustOption(3)
VideoAdjustOption.Saturation = VideoAdjustOption(4)
class AudioOutputDeviceTypes(_Enum):
'''Audio device types.
'''
_enum_names_ = {
-1: 'Error',
1: 'Mono',
2: 'Stereo',
4: '_2F2R',
5: '_3F2R',
6: '_5_1',
7: '_6_1',
8: '_7_1',
10: 'SPDIF',
}
AudioOutputDeviceTypes.Error = AudioOutputDeviceTypes(-1)
AudioOutputDeviceTypes.Mono = AudioOutputDeviceTypes(1)
AudioOutputDeviceTypes.SPDIF = AudioOutputDeviceTypes(10)
AudioOutputDeviceTypes.Stereo = AudioOutputDeviceTypes(2)
AudioOutputDeviceTypes._2F2R = AudioOutputDeviceTypes(4)
AudioOutputDeviceTypes._3F2R = AudioOutputDeviceTypes(5)
AudioOutputDeviceTypes._5_1 = AudioOutputDeviceTypes(6)
AudioOutputDeviceTypes._6_1 = AudioOutputDeviceTypes(7)
AudioOutputDeviceTypes._7_1 = AudioOutputDeviceTypes(8)
class AudioOutputChannel(_Enum):
'''Audio channels.
'''
_enum_names_ = {
-1: 'Error',
1: 'Stereo',
2: 'RStereo',
3: 'Left',
4: 'Right',
5: 'Dolbys',
}
AudioOutputChannel.Dolbys = AudioOutputChannel(5)
AudioOutputChannel.Error = AudioOutputChannel(-1)
AudioOutputChannel.Left = AudioOutputChannel(3)
AudioOutputChannel.RStereo = AudioOutputChannel(2)
AudioOutputChannel.Right = AudioOutputChannel(4)
AudioOutputChannel.Stereo = AudioOutputChannel(1)
class Callback(ctypes.c_void_p):
"""Callback function notification
\param p_event the event triggering the callback
"""
pass
class LogCb(ctypes.c_void_p):
"""Callback prototype for LibVLC log message handler.
\param data data pointer as given to L{libvlc_log_set}()
\param level message level (@ref enum libvlc_log_level)
\param ctx message context (meta-information about the message)
\param fmt printf() format string (as defined by ISO C11)
\param args variable argument list for the format
\note Log message handlers <b>must</b> be thread-safe.
\warning The message context pointer, the format string parameters and the
variable arguments are only valid until the callback returns.
"""
pass
class MediaOpenCb(ctypes.c_void_p):
"""Callback prototype to open a custom bitstream input media.
The same media item can be opened multiple times. Each time, this callback
is invoked. It should allocate and initialize any instance-specific
resources, then store them in *datap. The instance resources can be freed
in the @ref libvlc_close_cb callback.
\param opaque private pointer as passed to L{libvlc_media_new_callbacks}()
\param datap storage space for a private data pointer [OUT]
\param sizep byte length of the bitstream or 0 if unknown [OUT]
\note For convenience, *datap is initially NULL and *sizep is initially 0.
\return 0 on success, non-zero on error. In case of failure, the other
callbacks will not be invoked and any value stored in *datap and *sizep is
discarded.
"""
pass
class MediaReadCb(ctypes.c_void_p):
"""Callback prototype to read data from a custom bitstream input media.
\param opaque private pointer as set by the @ref libvlc_media_open_cb
callback
\param buf start address of the buffer to read data into
\param len bytes length of the buffer
\return strictly positive number of bytes read, 0 on end-of-stream,
or -1 on non-recoverable error
\note If no data is immediately available, then the callback should sleep.
\warning The application is responsible for avoiding deadlock situations.
In particular, the callback should return an error if playback is stopped;
if it does not return, then L{libvlc_media_player_stop}() will never return.
"""
pass
class MediaSeekCb(ctypes.c_void_p):
"""Callback prototype to seek a custom bitstream input media.
\param opaque private pointer as set by the @ref libvlc_media_open_cb
callback
\param offset absolute byte offset to seek to
\return 0 on success, -1 on error.
"""
pass
class MediaCloseCb(ctypes.c_void_p):
"""Callback prototype to close a custom bitstream input media.
\param opaque private pointer as set by the @ref libvlc_media_open_cb
callback
"""
pass
class VideoLockCb(ctypes.c_void_p):
"""Callback prototype to allocate and lock a picture buffer.
Whenever a new video frame needs to be decoded, the lock callback is
invoked. Depending on the video chroma, one or three pixel planes of
adequate dimensions must be returned via the second parameter. Those
planes must be aligned on 32-bytes boundaries.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param planes start address of the pixel planes (LibVLC allocates the array
of void pointers, this callback must initialize the array) [OUT]
\return a private pointer for the display and unlock callbacks to identify
the picture buffers
"""
pass
class VideoUnlockCb(ctypes.c_void_p):
"""Callback prototype to unlock a picture buffer.
When the video frame decoding is complete, the unlock callback is invoked.
This callback might not be needed at all. It is only an indication that the
application can now read the pixel values if it needs to.
\warning A picture buffer is unlocked after the picture is decoded,
but before the picture is displayed.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param picture private pointer returned from the @ref libvlc_video_lock_cb
callback [IN]
\param planes pixel planes as defined by the @ref libvlc_video_lock_cb
callback (this parameter is only for convenience) [IN]
"""
pass
class VideoDisplayCb(ctypes.c_void_p):
"""Callback prototype to display a picture.
When the video frame needs to be shown, as determined by the media playback
clock, the display callback is invoked.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param picture private pointer returned from the @ref libvlc_video_lock_cb
callback [IN]
"""
pass
class VideoFormatCb(ctypes.c_void_p):
"""Callback prototype to configure picture buffers format.
This callback gets the format of the video as output by the video decoder
and the chain of video filters (if any). It can opt to change any parameter
as it needs. In that case, LibVLC will attempt to convert the video format
(rescaling and chroma conversion) but these operations can be CPU intensive.
\param opaque pointer to the private pointer passed to
L{libvlc_video_set_callbacks}() [IN/OUT]
\param chroma pointer to the 4 bytes video format identifier [IN/OUT]
\param width pointer to the pixel width [IN/OUT]
\param height pointer to the pixel height [IN/OUT]
\param pitches table of scanline pitches in bytes for each pixel plane
(the table is allocated by LibVLC) [OUT]
\param lines table of scanlines count for each plane [OUT]
\return the number of picture buffers allocated, 0 indicates failure
\note
For each pixels plane, the scanline pitch must be bigger than or equal to
the number of bytes per pixel multiplied by the pixel width.
Similarly, the number of scanlines must be bigger than of equal to
the pixel height.
Furthermore, we recommend that pitches and lines be multiple of 32
to not break assumptions that might be held by optimized code
in the video decoders, video filters and/or video converters.
"""
pass
class VideoCleanupCb(ctypes.c_void_p):
"""Callback prototype to configure picture buffers format.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}()
(and possibly modified by @ref libvlc_video_format_cb) [IN]
"""
pass
class AudioPlayCb(ctypes.c_void_p):
"""Callback prototype for audio playback.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param samples pointer to the first audio sample to play back [IN]
\param count number of audio samples to play back
\param pts expected play time stamp (see libvlc_delay())
"""
pass
class AudioPauseCb(ctypes.c_void_p):
"""Callback prototype for audio pause.
\note The pause callback is never called if the audio is already paused.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param pts time stamp of the pause request (should be elapsed already)
"""
pass
class AudioResumeCb(ctypes.c_void_p):
"""Callback prototype for audio resumption (i.e. restart from pause).
\note The resume callback is never called if the audio is not paused.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param pts time stamp of the resumption request (should be elapsed already)
"""
pass
class AudioFlushCb(ctypes.c_void_p):
"""Callback prototype for audio buffer flush
(i.e. discard all pending buffers and stop playback as soon as possible).
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
"""
pass
class AudioDrainCb(ctypes.c_void_p):
"""Callback prototype for audio buffer drain
(i.e. wait for pending buffers to be played).
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
"""
pass
class AudioSetVolumeCb(ctypes.c_void_p):
"""Callback prototype for audio volume change.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param volume software volume (1. = nominal, 0. = mute)
\param mute muted flag
"""
pass
class AudioSetupCb(ctypes.c_void_p):
"""Callback prototype to setup the audio playback.
This is called when the media player needs to create a new audio output.
\param opaque pointer to the data pointer passed to
L{libvlc_audio_set_callbacks}() [IN/OUT]
\param format 4 bytes sample format [IN/OUT]
\param rate sample rate [IN/OUT]
\param channels channels count [IN/OUT]
\return 0 on success, anything else to skip audio playback
"""
pass
class AudioCleanupCb(ctypes.c_void_p):
"""Callback prototype for audio playback cleanup.
This is called when the media player no longer needs an audio output.
\param opaque data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
"""
pass
class CallbackDecorators(object):
"Class holding various method decorators for callback functions."
Callback = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
Callback.__doc__ = '''Callback function notification
\param p_event the event triggering the callback
'''
LogCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, Log_ptr, ctypes.c_char_p, ctypes.c_void_p)
LogCb.__doc__ = '''Callback prototype for LibVLC log message handler.
\param data data pointer as given to L{libvlc_log_set}()
\param level message level (@ref enum libvlc_log_level)
\param ctx message context (meta-information about the message)
\param fmt printf() format string (as defined by ISO C11)
\param args variable argument list for the format
\note Log message handlers <b>must</b> be thread-safe.
\warning The message context pointer, the format string parameters and the
variable arguments are only valid until the callback returns.
'''
MediaOpenCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_int), ctypes.c_void_p, ListPOINTER(ctypes.c_void_p), ctypes.POINTER(ctypes.c_uint64))
MediaOpenCb.__doc__ = '''Callback prototype to open a custom bitstream input media.
The same media item can be opened multiple times. Each time, this callback
is invoked. It should allocate and initialize any instance-specific
resources, then store them in *datap. The instance resources can be freed
in the @ref libvlc_close_cb callback.
\param opaque private pointer as passed to L{libvlc_media_new_callbacks}()
\param datap storage space for a private data pointer [OUT]
\param sizep byte length of the bitstream or 0 if unknown [OUT]
\note For convenience, *datap is initially NULL and *sizep is initially 0.
\return 0 on success, non-zero on error. In case of failure, the other
callbacks will not be invoked and any value stored in *datap and *sizep is
discarded.
'''
MediaReadCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_ssize_t), ctypes.c_void_p, ctypes.c_char_p, ctypes.c_size_t)
MediaReadCb.__doc__ = '''Callback prototype to read data from a custom bitstream input media.
\param opaque private pointer as set by the @ref libvlc_media_open_cb
callback
\param buf start address of the buffer to read data into
\param len bytes length of the buffer
\return strictly positive number of bytes read, 0 on end-of-stream,
or -1 on non-recoverable error
\note If no data is immediately available, then the callback should sleep.
\warning The application is responsible for avoiding deadlock situations.
In particular, the callback should return an error if playback is stopped;
if it does not return, then L{libvlc_media_player_stop}() will never return.
'''
MediaSeekCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_int), ctypes.c_void_p, ctypes.c_uint64)
MediaSeekCb.__doc__ = '''Callback prototype to seek a custom bitstream input media.
\param opaque private pointer as set by the @ref libvlc_media_open_cb
callback
\param offset absolute byte offset to seek to
\return 0 on success, -1 on error.
'''
MediaCloseCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
MediaCloseCb.__doc__ = '''Callback prototype to close a custom bitstream input media.
\param opaque private pointer as set by the @ref libvlc_media_open_cb
callback
'''
VideoLockCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ListPOINTER(ctypes.c_void_p))
VideoLockCb.__doc__ = '''Callback prototype to allocate and lock a picture buffer.
Whenever a new video frame needs to be decoded, the lock callback is
invoked. Depending on the video chroma, one or three pixel planes of
adequate dimensions must be returned via the second parameter. Those
planes must be aligned on 32-bytes boundaries.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param planes start address of the pixel planes (LibVLC allocates the array
of void pointers, this callback must initialize the array) [OUT]
\return a private pointer for the display and unlock callbacks to identify
the picture buffers
'''
VideoUnlockCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ListPOINTER(ctypes.c_void_p))
VideoUnlockCb.__doc__ = '''Callback prototype to unlock a picture buffer.
When the video frame decoding is complete, the unlock callback is invoked.
This callback might not be needed at all. It is only an indication that the
application can now read the pixel values if it needs to.
\warning A picture buffer is unlocked after the picture is decoded,
but before the picture is displayed.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param picture private pointer returned from the @ref libvlc_video_lock_cb
callback [IN]
\param planes pixel planes as defined by the @ref libvlc_video_lock_cb
callback (this parameter is only for convenience) [IN]
'''
VideoDisplayCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p)
VideoDisplayCb.__doc__ = '''Callback prototype to display a picture.
When the video frame needs to be shown, as determined by the media playback
clock, the display callback is invoked.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}() [IN]
\param picture private pointer returned from the @ref libvlc_video_lock_cb
callback [IN]
'''
VideoFormatCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_uint), ListPOINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
VideoFormatCb.__doc__ = '''Callback prototype to configure picture buffers format.
This callback gets the format of the video as output by the video decoder
and the chain of video filters (if any). It can opt to change any parameter
as it needs. In that case, LibVLC will attempt to convert the video format
(rescaling and chroma conversion) but these operations can be CPU intensive.
\param opaque pointer to the private pointer passed to
L{libvlc_video_set_callbacks}() [IN/OUT]
\param chroma pointer to the 4 bytes video format identifier [IN/OUT]
\param width pointer to the pixel width [IN/OUT]
\param height pointer to the pixel height [IN/OUT]
\param pitches table of scanline pitches in bytes for each pixel plane
(the table is allocated by LibVLC) [OUT]
\param lines table of scanlines count for each plane [OUT]
\return the number of picture buffers allocated, 0 indicates failure
\note
For each pixels plane, the scanline pitch must be bigger than or equal to
the number of bytes per pixel multiplied by the pixel width.
Similarly, the number of scanlines must be bigger than of equal to
the pixel height.
Furthermore, we recommend that pitches and lines be multiple of 32
to not break assumptions that might be held by optimized code
in the video decoders, video filters and/or video converters.
'''
VideoCleanupCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
VideoCleanupCb.__doc__ = '''Callback prototype to configure picture buffers format.
\param opaque private pointer as passed to L{libvlc_video_set_callbacks}()
(and possibly modified by @ref libvlc_video_format_cb) [IN]
'''
AudioPlayCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_uint, ctypes.c_int64)
AudioPlayCb.__doc__ = '''Callback prototype for audio playback.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param samples pointer to the first audio sample to play back [IN]
\param count number of audio samples to play back
\param pts expected play time stamp (see libvlc_delay())
'''
AudioPauseCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64)
AudioPauseCb.__doc__ = '''Callback prototype for audio pause.
\note The pause callback is never called if the audio is already paused.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param pts time stamp of the pause request (should be elapsed already)
'''
AudioResumeCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64)
AudioResumeCb.__doc__ = '''Callback prototype for audio resumption (i.e. restart from pause).
\note The resume callback is never called if the audio is not paused.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param pts time stamp of the resumption request (should be elapsed already)
'''
AudioFlushCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int64)
AudioFlushCb.__doc__ = '''Callback prototype for audio buffer flush
(i.e. discard all pending buffers and stop playback as soon as possible).
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
'''
AudioDrainCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
AudioDrainCb.__doc__ = '''Callback prototype for audio buffer drain
(i.e. wait for pending buffers to be played).
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
'''
AudioSetVolumeCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_float, ctypes.c_bool)
AudioSetVolumeCb.__doc__ = '''Callback prototype for audio volume change.
\param data data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
\param volume software volume (1. = nominal, 0. = mute)
\param mute muted flag
'''
AudioSetupCb = ctypes.CFUNCTYPE(ctypes.POINTER(ctypes.c_int), ListPOINTER(ctypes.c_void_p), ctypes.c_char_p, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
AudioSetupCb.__doc__ = '''Callback prototype to setup the audio playback.
This is called when the media player needs to create a new audio output.
\param opaque pointer to the data pointer passed to
L{libvlc_audio_set_callbacks}() [IN/OUT]
\param format 4 bytes sample format [IN/OUT]
\param rate sample rate [IN/OUT]
\param channels channels count [IN/OUT]
\return 0 on success, anything else to skip audio playback
'''
AudioCleanupCb = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)
AudioCleanupCb.__doc__ = '''Callback prototype for audio playback cleanup.
This is called when the media player no longer needs an audio output.
\param opaque data pointer as passed to L{libvlc_audio_set_callbacks}() [IN]
'''
cb = CallbackDecorators
# End of generated enum types #
# From libvlc_structures.h
class AudioOutput(_Cstruct):
def __str__(self):
return '%s(%s:%s)' % (self.__class__.__name__, self.name, self.description)
AudioOutput._fields_ = [ # recursive struct
('name', ctypes.c_char_p),
('description', ctypes.c_char_p),
('next', ctypes.POINTER(AudioOutput)),
]
class LogMessage(_Cstruct):
_fields_ = [
('size', ctypes.c_uint ),
('severity', ctypes.c_int ),
('type', ctypes.c_char_p),
('name', ctypes.c_char_p),
('header', ctypes.c_char_p),
('message', ctypes.c_char_p),
]
def __init__(self):
super(LogMessage, self).__init__()
self.size = ctypes.sizeof(self)
def __str__(self):
return '%s(%d:%s): %s' % (self.__class__.__name__, self.severity, self.type, self.message)
class MediaEvent(_Cstruct):
_fields_ = [
('media_name', ctypes.c_char_p),
('instance_name', ctypes.c_char_p),
]
class MediaStats(_Cstruct):
_fields_ = [
('read_bytes', ctypes.c_int ),
('input_bitrate', ctypes.c_float),
('demux_read_bytes', ctypes.c_int ),
('demux_bitrate', ctypes.c_float),
('demux_corrupted', ctypes.c_int ),
('demux_discontinuity', ctypes.c_int ),
('decoded_video', ctypes.c_int ),
('decoded_audio', ctypes.c_int ),
('displayed_pictures', ctypes.c_int ),
('lost_pictures', ctypes.c_int ),
('played_abuffers', ctypes.c_int ),
('lost_abuffers', ctypes.c_int ),
('sent_packets', ctypes.c_int ),
('sent_bytes', ctypes.c_int ),
('send_bitrate', ctypes.c_float),
]
class MediaTrackInfo(_Cstruct):
_fields_ = [
('codec', ctypes.c_uint32),
('id', ctypes.c_int ),
('type', TrackType ),
('profile', ctypes.c_int ),
('level', ctypes.c_int ),
('channels_or_height', ctypes.c_uint ),
('rate_or_width', ctypes.c_uint ),
]
class AudioTrack(_Cstruct):
_fields_ = [
('channels', ctypes.c_uint),
('rate', ctypes.c_uint),
]
class VideoTrack(_Cstruct):
_fields_ = [
('height', ctypes.c_uint),
('width', ctypes.c_uint),
('sar_num', ctypes.c_uint),
('sar_den', ctypes.c_uint),
('frame_rate_num', ctypes.c_uint),
('frame_rate_den', ctypes.c_uint),
]
class SubtitleTrack(_Cstruct):
_fields_ = [
('encoding', ctypes.c_char_p),
]
class MediaTrackTracks(ctypes.Union):
_fields_ = [
('audio', ctypes.POINTER(AudioTrack)),
('video', ctypes.POINTER(VideoTrack)),
('subtitle', ctypes.POINTER(SubtitleTrack)),
]
class MediaTrack(_Cstruct):
_anonymous_ = ("u",)
_fields_ = [
('codec', ctypes.c_uint32),
('original_fourcc', ctypes.c_uint32),
('id', ctypes.c_int ),
('type', TrackType ),
('profile', ctypes.c_int ),
('level', ctypes.c_int ),
('u', MediaTrackTracks),
('bitrate', ctypes.c_uint),
('language', ctypes.c_char_p),
('description', ctypes.c_char_p),
]
class PlaylistItem(_Cstruct):
_fields_ = [
('id', ctypes.c_int ),
('uri', ctypes.c_char_p),
('name', ctypes.c_char_p),
]
def __str__(self):
return '%s #%d %s (uri %s)' % (self.__class__.__name__, self.id, self.name, self.uri)
class Position(object):
"""Enum-like, immutable window position constants.
See e.g. VideoMarqueeOption.Position.
"""
Center = 0
Left = 1
CenterLeft = 1
Right = 2
CenterRight = 2
Top = 4
TopCenter = 4
TopLeft = 5
TopRight = 6
Bottom = 8
BottomCenter = 8
BottomLeft = 9
BottomRight = 10
def __init__(self, *unused):
raise TypeError('constants only')
def __setattr__(self, *unused): #PYCHOK expected
raise TypeError('immutable constants')
class Rectangle(_Cstruct):
_fields_ = [
('top', ctypes.c_int),
('left', ctypes.c_int),
('bottom', ctypes.c_int),
('right', ctypes.c_int),
]
class TrackDescription(_Cstruct):
def __str__(self):
return '%s(%d:%s)' % (self.__class__.__name__, self.id, self.name)
TrackDescription._fields_ = [ # recursive struct
('id', ctypes.c_int ),
('name', ctypes.c_char_p),
('next', ctypes.POINTER(TrackDescription)),
]
def track_description_list(head):
"""Convert a TrackDescription linked list to a Python list (and release the former).
"""
r = []
if head:
item = head
while item:
item = item.contents
r.append((item.id, item.name))
item = item.next
try:
libvlc_track_description_release(head)
except NameError:
libvlc_track_description_list_release(head)
return r
class EventUnion(ctypes.Union):
_fields_ = [
('meta_type', ctypes.c_uint ),
('new_child', ctypes.c_uint ),
('new_duration', ctypes.c_longlong),
('new_status', ctypes.c_int ),
('media', ctypes.c_void_p ),
('new_state', ctypes.c_uint ),
# Media instance
('new_position', ctypes.c_float ),
('new_time', ctypes.c_longlong),
('new_title', ctypes.c_int ),
('new_seekable', ctypes.c_longlong),
('new_pausable', ctypes.c_longlong),
# FIXME: Skipped MediaList and MediaListView...
('filename', ctypes.c_char_p ),
('new_length', ctypes.c_longlong),
('media_event', MediaEvent ),
]
class Event(_Cstruct):
_fields_ = [
('type', EventType ),
('object', ctypes.c_void_p),
('u', EventUnion ),
]
class ModuleDescription(_Cstruct):
def __str__(self):
return '%s %s (%s)' % (self.__class__.__name__, self.shortname, self.name)
ModuleDescription._fields_ = [ # recursive struct
('name', ctypes.c_char_p),
('shortname', ctypes.c_char_p),
('longname', ctypes.c_char_p),
('help', ctypes.c_char_p),
('next', ctypes.POINTER(ModuleDescription)),
]
def module_description_list(head):
"""Convert a ModuleDescription linked list to a Python list (and release the former).
"""
r = []
if head:
item = head
while item:
item = item.contents
r.append((item.name, item.shortname, item.longname, item.help))
item = item.next
libvlc_module_description_list_release(head)
return r
class AudioOutputDevice(_Cstruct):
def __str__(self):
return '%s(%d:%s)' % (self.__class__.__name__, self.id, self.name)
AudioOutputDevice._fields_ = [ # recursive struct
('next', ctypes.POINTER(AudioOutputDevice)),
('device', ctypes.c_char_p ),
('description', ctypes.c_char_p),
]
# End of header.py #
class EventManager(_Ctype):
'''Create an event manager with callback handler.
This class interposes the registration and handling of
event notifications in order to (a) remove the need for
decorating each callback functions with the decorator
'@callbackmethod', (b) allow any number of positional
and/or keyword arguments to the callback (in addition
to the Event instance) and (c) to preserve the Python
objects such that the callback and argument objects
remain alive (i.e. are not garbage collected) until
B{after} the notification has been unregistered.
@note: Only a single notification can be registered
for each event type in an EventManager instance.
'''
_callback_handler = None
_callbacks = {}
def __new__(cls, ptr=_internal_guard):
if ptr == _internal_guard:
raise VLCException("(INTERNAL) ctypes class.\nYou should get a reference to EventManager through the MediaPlayer.event_manager() method.")
return _Constructor(cls, ptr)
def event_attach(self, eventtype, callback, *args, **kwds):
"""Register an event notification.
@param eventtype: the desired event type to be notified about.
@param callback: the function to call when the event occurs.
@param args: optional positional arguments for the callback.
@param kwds: optional keyword arguments for the callback.
@return: 0 on success, ENOMEM on error.
@note: The callback function must have at least one argument,
an Event instance. Any other, optional positional and keyword
arguments are in B{addition} to the first one.
"""
if not isinstance(eventtype, EventType):
raise VLCException("%s required: %r" % ('EventType', eventtype))
if not hasattr(callback, '__call__'): # callable()
raise VLCException("%s required: %r" % ('callable', callback))
# check that the callback expects arguments
if not any(getargspec(callback)[:2]): # list(...)
raise VLCException("%s required: %r" % ('argument', callback))
if self._callback_handler is None:
_called_from_ctypes = ctypes.CFUNCTYPE(None, ctypes.POINTER(Event), ctypes.c_void_p)
@_called_from_ctypes
def _callback_handler(event, k):
"""(INTERNAL) handle callback call from ctypes.
@note: We cannot simply make this an EventManager
method since ctypes does not prepend self as the
first parameter, hence this closure.
"""
try: # retrieve Python callback and arguments
call, args, kwds = self._callbacks[k]
# deref event.contents to simplify callback code
call(event.contents, *args, **kwds)
except KeyError: # detached?
pass
self._callback_handler = _callback_handler
self._callbacks = {}
k = eventtype.value
r = libvlc_event_attach(self, k, self._callback_handler, k)
if not r:
self._callbacks[k] = (callback, args, kwds)
return r
def event_detach(self, eventtype):
"""Unregister an event notification.
@param eventtype: the event type notification to be removed.
"""
if not isinstance(eventtype, EventType):
raise VLCException("%s required: %r" % ('EventType', eventtype))
k = eventtype.value
if k in self._callbacks:
del self._callbacks[k] # remove, regardless of libvlc return value
libvlc_event_detach(self, k, self._callback_handler, k)
class Instance(_Ctype):
'''Create a new Instance instance.
It may take as parameter either:
- a string
- a list of strings as first parameters
- the parameters given as the constructor parameters (must be strings)
'''
def __new__(cls, *args):
if len(args) == 1:
# Only 1 arg. It is either a C pointer, or an arg string,
# or a tuple.
i = args[0]
if isinstance(i, _Ints):
return _Constructor(cls, i)
elif isinstance(i, basestring):
args = i.strip().split()
elif isinstance(i, _Seqs):
args = i
else:
raise VLCException('Instance %r' % (args,))
if not args and plugin_path is not None:
# no parameters passed, for win32 and MacOS,
# specify the plugin_path if detected earlier
args = ['vlc', '--plugin-path=' + plugin_path]
if PYTHON3:
args = [ str_to_bytes(a) for a in args ]
return libvlc_new(len(args), args)
def media_player_new(self, uri=None):
"""Create a new MediaPlayer instance.
@param uri: an optional URI to play in the player.
"""
p = libvlc_media_player_new(self)
if uri:
p.set_media(self.media_new(uri))
p._instance = self
return p
def media_list_player_new(self):
"""Create a new MediaListPlayer instance.
"""
p = libvlc_media_list_player_new(self)
p._instance = self
return p
def media_new(self, mrl, *options):
"""Create a new Media instance.
If mrl contains a colon (:) preceded by more than 1 letter, it
will be treated as a URL. Else, it will be considered as a
local path. If you need more control, directly use
media_new_location/media_new_path methods.
Options can be specified as supplementary string parameters,
but note that many options cannot be set at the media level,
and rather at the Instance level. For instance, the marquee
filter must be specified when creating the vlc.Instance or
vlc.MediaPlayer.
Alternatively, options can be added to the media using the
Media.add_options method (with the same limitation).
@param options: optional media option=value strings
"""
if ':' in mrl and mrl.index(':') > 1:
# Assume it is a URL
m = libvlc_media_new_location(self, str_to_bytes(mrl))
else:
# Else it should be a local path.
m = libvlc_media_new_path(self, str_to_bytes(os.path.normpath(mrl)))
for o in options:
libvlc_media_add_option(m, str_to_bytes(o))
m._instance = self
return m
def media_list_new(self, mrls=None):
"""Create a new MediaList instance.
@param mrls: optional list of MRL strings
"""
l = libvlc_media_list_new(self)
# We should take the lock, but since we did not leak the
# reference, nobody else can access it.
if mrls:
for m in mrls:
l.add_media(m)
l._instance = self
return l
def audio_output_enumerate_devices(self):
"""Enumerate the defined audio output devices.
@return: list of dicts {name:, description:, devices:}
"""
r = []
head = libvlc_audio_output_list_get(self)
if head:
i = head
while i:
i = i.contents
d = [{'id': libvlc_audio_output_device_id (self, i.name, d),
'longname': libvlc_audio_output_device_longname(self, i.name, d)}
for d in range(libvlc_audio_output_device_count (self, i.name))]
r.append({'name': i.name, 'description': i.description, 'devices': d})
i = i.next
libvlc_audio_output_list_release(head)
return r
def audio_filter_list_get(self):
"""Returns a list of available audio filters.
"""
return module_description_list(libvlc_audio_filter_list_get(self))
def video_filter_list_get(self):
"""Returns a list of available video filters.
"""
return module_description_list(libvlc_video_filter_list_get(self))
def release(self):
'''Decrement the reference count of a libvlc instance, and destroy it
if it reaches zero.
'''
return libvlc_release(self)
def retain(self):
'''Increments the reference count of a libvlc instance.
The initial reference count is 1 after L{new}() returns.
'''
return libvlc_retain(self)
def add_intf(self, name):
'''Try to start a user interface for the libvlc instance.
@param name: interface name, or NULL for default.
@return: 0 on success, -1 on error.
'''
return libvlc_add_intf(self, str_to_bytes(name))
def set_user_agent(self, name, http):
'''Sets the application name. LibVLC passes this as the user agent string
when a protocol requires it.
@param name: human-readable application name, e.g. "FooBar player 1.2.3".
@param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0".
@version: LibVLC 1.1.1 or later.
'''
return libvlc_set_user_agent(self, str_to_bytes(name), str_to_bytes(http))
def set_app_id(self, id, version, icon):
'''Sets some meta-information about the application.
See also L{set_user_agent}().
@param id: Java-style application identifier, e.g. "com.acme.foobar".
@param version: application version numbers, e.g. "1.2.3".
@param icon: application icon name, e.g. "foobar".
@version: LibVLC 2.1.0 or later.
'''
return libvlc_set_app_id(self, str_to_bytes(id), str_to_bytes(version), str_to_bytes(icon))
def log_unset(self):
'''Unsets the logging callback for a LibVLC instance. This is rarely needed:
the callback is implicitly unset when the instance is destroyed.
This function will wait for any pending callbacks invocation to complete
(causing a deadlock if called from within the callback).
@version: LibVLC 2.1.0 or later.
'''
return libvlc_log_unset(self)
def log_set(self, data, p_instance):
'''Sets the logging callback for a LibVLC instance.
This function is thread-safe: it will wait for any pending callbacks
invocation to complete.
@param data: opaque data pointer for the callback function @note Some log messages (especially debug) are emitted by LibVLC while is being initialized. These messages cannot be captured with this interface. @warning A deadlock may occur if this function is called from the callback.
@param p_instance: libvlc instance.
@version: LibVLC 2.1.0 or later.
'''
return libvlc_log_set(self, data, p_instance)
def log_set_file(self, stream):
'''Sets up logging to a file.
@param stream: FILE pointer opened for writing (the FILE pointer must remain valid until L{log_unset}()).
@version: LibVLC 2.1.0 or later.
'''
return libvlc_log_set_file(self, stream)
def media_new_location(self, psz_mrl):
'''Create a media with a certain given media resource location,
for instance a valid URL.
@note: To refer to a local file with this function,
the file://... URI syntax B{must} be used (see IETF RFC3986).
We recommend using L{media_new_path}() instead when dealing with
local files.
See L{media_release}.
@param psz_mrl: the media location.
@return: the newly created media or NULL on error.
'''
return libvlc_media_new_location(self, str_to_bytes(psz_mrl))
def media_new_path(self, path):
'''Create a media for a certain file path.
See L{media_release}.
@param path: local filesystem path.
@return: the newly created media or NULL on error.
'''
return libvlc_media_new_path(self, str_to_bytes(path))
def media_new_fd(self, fd):
'''Create a media for an already open file descriptor.
The file descriptor shall be open for reading (or reading and writing).
Regular file descriptors, pipe read descriptors and character device
descriptors (including TTYs) are supported on all platforms.
Block device descriptors are supported where available.
Directory descriptors are supported on systems that provide fdopendir().
Sockets are supported on all platforms where they are file descriptors,
i.e. all except Windows.
@note: This library will B{not} automatically close the file descriptor
under any circumstance. Nevertheless, a file descriptor can usually only be
rendered once in a media player. To render it a second time, the file
descriptor should probably be rewound to the beginning with lseek().
See L{media_release}.
@param fd: open file descriptor.
@return: the newly created media or NULL on error.
@version: LibVLC 1.1.5 and later.
'''
return libvlc_media_new_fd(self, fd)
def media_new_callbacks(self, open_cb, read_cb, seek_cb, close_cb, opaque):
'''Create a media with custom callbacks to read the data from.
@param open_cb: callback to open the custom bitstream input media.
@param read_cb: callback to read data (must not be NULL).
@param seek_cb: callback to seek, or NULL if seeking is not supported.
@param close_cb: callback to close the media, or NULL if unnecessary.
@param opaque: data pointer for the open callback.
@return: the newly created media or NULL on error @note If open_cb is NULL, the opaque pointer will be passed to read_cb, seek_cb and close_cb, and the stream size will be treated as unknown. @note The callbacks may be called asynchronously (from another thread). A single stream instance need not be reentrant. However the open_cb needs to be reentrant if the media is used by multiple player instances. @warning The callbacks may be used until all or any player instances that were supplied the media item are stopped. See L{media_release}.
@version: LibVLC 3.0.0 and later.
'''
return libvlc_media_new_callbacks(self, open_cb, read_cb, seek_cb, close_cb, opaque)
def media_new_as_node(self, psz_name):
'''Create a media as an empty node with a given name.
See L{media_release}.
@param psz_name: the name of the node.
@return: the new empty media or NULL on error.
'''
return libvlc_media_new_as_node(self, str_to_bytes(psz_name))
def media_discoverer_new(self, psz_name):
'''Create a media discoverer object by name.
After this object is created, you should attach to events in order to be
notified of the discoverer state.
You should also attach to media_list events in order to be notified of new
items discovered.
You need to call L{media_discoverer_start}() in order to start the
discovery.
See L{media_discoverer_media_list}
See L{media_discoverer_event_manager}
See L{media_discoverer_start}.
@param psz_name: service name.
@return: media discover object or NULL in case of error.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_media_discoverer_new(self, str_to_bytes(psz_name))
def media_library_new(self):
'''Create an new Media Library object.
@return: a new object or NULL on error.
'''
return libvlc_media_library_new(self)
def audio_output_list_get(self):
'''Gets the list of available audio output modules.
@return: list of available audio outputs. It must be freed it with In case of error, NULL is returned.
'''
return libvlc_audio_output_list_get(self)
def audio_output_device_list_get(self, aout):
'''Gets a list of audio output devices for a given audio output module,
See L{audio_output_device_set}().
@note: Not all audio outputs support this. In particular, an empty (NULL)
list of devices does B{not} imply that the specified audio output does
not work.
@note: The list might not be exhaustive.
@warning: Some audio output devices in the list might not actually work in
some circumstances. By default, it is recommended to not specify any
explicit audio device.
@param psz_aout: audio output name (as returned by L{audio_output_list_get}()).
@return: A NULL-terminated linked list of potential audio output devices. It must be freed it with L{audio_output_device_list_release}().
@version: LibVLC 2.1.0 or later.
'''
return libvlc_audio_output_device_list_get(self, str_to_bytes(aout))
def vlm_release(self):
'''Release the vlm instance related to the given L{Instance}.
'''
return libvlc_vlm_release(self)
def vlm_add_broadcast(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Add a broadcast, with one input.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_broadcast(self, str_to_bytes(psz_name), str_to_bytes(psz_input), str_to_bytes(psz_output), i_options, ppsz_options, b_enabled, b_loop)
def vlm_add_vod(self, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux):
'''Add a vod, with one input.
@param psz_name: the name of the new vod media.
@param psz_input: the input MRL.
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new vod.
@param psz_mux: the muxer of the vod media.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_vod(self, str_to_bytes(psz_name), str_to_bytes(psz_input), i_options, ppsz_options, b_enabled, str_to_bytes(psz_mux))
def vlm_del_media(self, psz_name):
'''Delete a media (VOD or broadcast).
@param psz_name: the media to delete.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_del_media(self, str_to_bytes(psz_name))
def vlm_set_enabled(self, psz_name, b_enabled):
'''Enable or disable a media (VOD or broadcast).
@param psz_name: the media to work on.
@param b_enabled: the new status.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_enabled(self, str_to_bytes(psz_name), b_enabled)
def vlm_set_output(self, psz_name, psz_output):
'''Set the output for a media.
@param psz_name: the media to work on.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_output(self, str_to_bytes(psz_name), str_to_bytes(psz_output))
def vlm_set_input(self, psz_name, psz_input):
'''Set a media's input MRL. This will delete all existing inputs and
add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input))
def vlm_add_input(self, psz_name, psz_input):
'''Add a media's input MRL. This will add the specified one.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_add_input(self, str_to_bytes(psz_name), str_to_bytes(psz_input))
def vlm_set_loop(self, psz_name, b_loop):
'''Set a media's loop status.
@param psz_name: the media to work on.
@param b_loop: the new status.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_loop(self, str_to_bytes(psz_name), b_loop)
def vlm_set_mux(self, psz_name, psz_mux):
'''Set a media's vod muxer.
@param psz_name: the media to work on.
@param psz_mux: the new muxer.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_set_mux(self, str_to_bytes(psz_name), str_to_bytes(psz_mux))
def vlm_change_media(self, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Edit the parameters of a media. This will delete all existing inputs and
add the specified one.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_change_media(self, str_to_bytes(psz_name), str_to_bytes(psz_input), str_to_bytes(psz_output), i_options, ppsz_options, b_enabled, b_loop)
def vlm_play_media(self, psz_name):
'''Play the named broadcast.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_play_media(self, str_to_bytes(psz_name))
def vlm_stop_media(self, psz_name):
'''Stop the named broadcast.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_stop_media(self, str_to_bytes(psz_name))
def vlm_pause_media(self, psz_name):
'''Pause the named broadcast.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_pause_media(self, str_to_bytes(psz_name))
def vlm_seek_media(self, psz_name, f_percentage):
'''Seek in the named broadcast.
@param psz_name: the name of the broadcast.
@param f_percentage: the percentage to seek to.
@return: 0 on success, -1 on error.
'''
return libvlc_vlm_seek_media(self, str_to_bytes(psz_name), f_percentage)
def vlm_show_media(self, psz_name):
'''Return information about the named media as a JSON
string representation.
This function is mainly intended for debugging use,
if you want programmatic access to the state of
a vlm_media_instance_t, please use the corresponding
libvlc_vlm_get_media_instance_xxx -functions.
Currently there are no such functions available for
vlm_media_t though.
@param psz_name: the name of the media, if the name is an empty string, all media is described.
@return: string with information about named media, or NULL on error.
'''
return libvlc_vlm_show_media(self, str_to_bytes(psz_name))
def vlm_get_media_instance_position(self, psz_name, i_instance):
'''Get vlm_media instance position by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: position as float or -1. on error.
'''
return libvlc_vlm_get_media_instance_position(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_time(self, psz_name, i_instance):
'''Get vlm_media instance time by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: time as integer or -1 on error.
'''
return libvlc_vlm_get_media_instance_time(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_length(self, psz_name, i_instance):
'''Get vlm_media instance length by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: length of media item or -1 on error.
'''
return libvlc_vlm_get_media_instance_length(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_rate(self, psz_name, i_instance):
'''Get vlm_media instance playback rate by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: playback rate or -1 on error.
'''
return libvlc_vlm_get_media_instance_rate(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_title(self, psz_name, i_instance):
'''Get vlm_media instance title number by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: title as number or -1 on error.
@bug: will always return 0.
'''
return libvlc_vlm_get_media_instance_title(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_chapter(self, psz_name, i_instance):
'''Get vlm_media instance chapter number by name or instance id.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: chapter as number or -1 on error.
@bug: will always return 0.
'''
return libvlc_vlm_get_media_instance_chapter(self, str_to_bytes(psz_name), i_instance)
def vlm_get_media_instance_seekable(self, psz_name, i_instance):
'''Is libvlc instance seekable ?
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: 1 if seekable, 0 if not, -1 if media does not exist.
@bug: will always return 0.
'''
return libvlc_vlm_get_media_instance_seekable(self, str_to_bytes(psz_name), i_instance)
@memoize_parameterless
def vlm_get_event_manager(self):
'''Get libvlc_event_manager from a vlm media.
The p_event_manager is immutable, so you don't have to hold the lock.
@return: libvlc_event_manager.
'''
return libvlc_vlm_get_event_manager(self)
class Media(_Ctype):
'''Create a new Media instance.
Usage: Media(MRL, *options)
See vlc.Instance.media_new documentation for details.
'''
def __new__(cls, *args):
if args:
i = args[0]
if isinstance(i, _Ints):
return _Constructor(cls, i)
if isinstance(i, Instance):
return i.media_new(*args[1:])
o = get_default_instance().media_new(*args)
return o
def get_instance(self):
return getattr(self, '_instance', None)
def add_options(self, *options):
"""Add a list of options to the media.
Options must be written without the double-dash. Warning: most
audio and video options, such as text renderer, have no
effects on an individual media. These options must be set at
the vlc.Instance or vlc.MediaPlayer instanciation.
@param options: optional media option=value strings
"""
for o in options:
self.add_option(o)
def tracks_get(self):
"""Get media descriptor's elementary streams description
Note, you need to call L{parse}() or play the media at least once
before calling this function.
Not doing this will result in an empty array.
The result must be freed with L{tracks_release}.
@version: LibVLC 2.1.0 and later.
"""
mediaTrack_pp = ctypes.POINTER(MediaTrack)()
n = libvlc_media_tracks_get(self, byref(mediaTrack_pp))
info = cast(ctypes.mediaTrack_pp, ctypes.POINTER(ctypes.POINTER(MediaTrack) * n))
return info
def add_option(self, psz_options):
'''Add an option to the media.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
@note: The options are listed in 'vlc --long-help' from the command line,
e.g. "-sout-all". Keep in mind that available options and their semantics
vary across LibVLC versions and builds.
@warning: Not all options affects L{Media} objects:
Specifically, due to architectural issues most audio and video options,
such as text renderer options, have no effects on an individual media.
These options must be set through L{new}() instead.
@param psz_options: the options (as a string).
'''
return libvlc_media_add_option(self, str_to_bytes(psz_options))
def add_option_flag(self, psz_options, i_flags):
'''Add an option to the media with configurable flags.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
The options are detailed in vlc --long-help, for instance
"--sout-all". Note that all options are not usable on medias:
specifically, due to architectural issues, video-related options
such as text renderer options cannot be set on a single media. They
must be set on the whole libvlc instance instead.
@param psz_options: the options (as a string).
@param i_flags: the flags for this option.
'''
return libvlc_media_add_option_flag(self, str_to_bytes(psz_options), i_flags)
def retain(self):
'''Retain a reference to a media descriptor object (libvlc_media_t). Use
L{release}() to decrement the reference count of a
media descriptor object.
'''
return libvlc_media_retain(self)
def release(self):
'''Decrement the reference count of a media descriptor object. If the
reference count is 0, then L{release}() will release the
media descriptor object. It will send out an libvlc_MediaFreed event
to all listeners. If the media descriptor object has been released it
should not be used again.
'''
return libvlc_media_release(self)
def get_mrl(self):
'''Get the media resource locator (mrl) from a media descriptor object.
@return: string with mrl of media descriptor object.
'''
return libvlc_media_get_mrl(self)
def duplicate(self):
'''Duplicate a media descriptor object.
'''
return libvlc_media_duplicate(self)
def get_meta(self, e_meta):
'''Read the meta of the media.
If the media has not yet been parsed this will return NULL.
This methods automatically calls L{parse_async}(), so after calling
it you may receive a libvlc_MediaMetaChanged event. If you prefer a synchronous
version ensure that you call L{parse}() before get_meta().
See L{parse}
See L{parse_async}
See libvlc_MediaMetaChanged.
@param e_meta: the meta to read.
@return: the media's meta.
'''
return libvlc_media_get_meta(self, e_meta)
def set_meta(self, e_meta, psz_value):
'''Set the meta of the media (this function will not save the meta, call
L{save_meta} in order to save the meta).
@param e_meta: the meta to write.
@param psz_value: the media's meta.
'''
return libvlc_media_set_meta(self, e_meta, str_to_bytes(psz_value))
def save_meta(self):
'''Save the meta previously set.
@return: true if the write operation was successful.
'''
return libvlc_media_save_meta(self)
def get_state(self):
'''Get current state of media descriptor object. Possible media states
are defined in libvlc_structures.c ( libvlc_NothingSpecial=0,
libvlc_Opening, libvlc_Buffering, libvlc_Playing, libvlc_Paused,
libvlc_Stopped, libvlc_Ended,
libvlc_Error).
See libvlc_state_t.
@return: state of media descriptor object.
'''
return libvlc_media_get_state(self)
def get_stats(self, p_stats):
'''Get the current statistics about the media.
@param p_stats:: structure that contain the statistics about the media (this structure must be allocated by the caller).
@return: true if the statistics are available, false otherwise \libvlc_return_bool.
'''
return libvlc_media_get_stats(self, p_stats)
def subitems(self):
'''Get subitems of media descriptor object. This will increment
the reference count of supplied media descriptor object. Use
L{list_release}() to decrement the reference counting.
@return: list of media descriptor subitems or NULL.
'''
return libvlc_media_subitems(self)
@memoize_parameterless
def event_manager(self):
'''Get event manager from media descriptor object.
NOTE: this function doesn't increment reference counting.
@return: event manager object.
'''
return libvlc_media_event_manager(self)
def get_duration(self):
'''Get duration (in ms) of media descriptor object item.
@return: duration of media item or -1 on error.
'''
return libvlc_media_get_duration(self)
def parse(self):
'''Parse a media.
This fetches (local) art, meta data and tracks information.
The method is synchronous.
See L{parse_async}
See L{get_meta}
See libvlc_media_get_tracks_info.
'''
return libvlc_media_parse(self)
def parse_async(self):
'''Parse a media.
This fetches (local) art, meta data and tracks information.
The method is the asynchronous of L{parse}().
To track when this is over you can listen to libvlc_MediaParsedChanged
event. However if the media was already parsed you will not receive this
event.
See L{parse}
See libvlc_MediaParsedChanged
See L{get_meta}
See libvlc_media_get_tracks_info.
'''
return libvlc_media_parse_async(self)
def parse_with_options(self, parse_flag):
'''Parse the media asynchronously with options.
This fetches (local or network) art, meta data and/or tracks information.
This method is the extended version of L{parse_async}().
To track when this is over you can listen to libvlc_MediaParsedChanged
event. However if this functions returns an error, you will not receive this
event.
It uses a flag to specify parse options (see libvlc_media_parse_flag_t). All
these flags can be combined. By default, media is parsed if it's a local
file.
See libvlc_MediaParsedChanged
See L{get_meta}
See L{tracks_get}
See libvlc_media_parse_flag_t.
@param parse_flag: parse options:
@return: -1 in case of error, 0 otherwise.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_media_parse_with_options(self, parse_flag)
def is_parsed(self):
'''Get Parsed status for media descriptor object.
See libvlc_MediaParsedChanged.
@return: true if media object has been parsed otherwise it returns false \libvlc_return_bool.
'''
return libvlc_media_is_parsed(self)
def set_user_data(self, p_new_user_data):
'''Sets media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_new_user_data: pointer to user data.
'''
return libvlc_media_set_user_data(self, p_new_user_data)
def get_user_data(self):
'''Get media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
'''
return libvlc_media_get_user_data(self)
def get_type(self):
'''Get the media type of the media descriptor object.
@return: media type.
@version: LibVLC 3.0.0 and later. See libvlc_media_type_t.
'''
return libvlc_media_get_type(self)
def player_new_from_media(self):
'''Create a Media Player object from a Media.
@return: a new media player object, or NULL on error.
'''
return libvlc_media_player_new_from_media(self)
class MediaDiscoverer(_Ctype):
'''N/A
'''
def __new__(cls, ptr=_internal_guard):
'''(INTERNAL) ctypes wrapper constructor.
'''
return _Constructor(cls, ptr)
def start(self):
'''Start media discovery.
To stop it, call L{stop}() or
L{release}() directly.
See L{stop}.
@return: -1 in case of error, 0 otherwise.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_media_discoverer_start(self)
def stop(self):
'''Stop media discovery.
See L{start}.
@version: LibVLC 3.0.0 or later.
'''
return libvlc_media_discoverer_stop(self)
def release(self):
'''Release media discover object. If the reference count reaches 0, then
the object will be released.
'''
return libvlc_media_discoverer_release(self)
def localized_name(self):
'''Get media service discover object its localized name.
@return: localized name.
'''
return libvlc_media_discoverer_localized_name(self)
def media_list(self):
'''Get media service discover media list.
@return: list of media items.
'''
return libvlc_media_discoverer_media_list(self)
@memoize_parameterless
def event_manager(self):
'''Get event manager from media service discover object.
@return: event manager object.
'''
return libvlc_media_discoverer_event_manager(self)
def is_running(self):
'''Query if media service discover object is running.
@return: true if running, false if not \libvlc_return_bool.
'''
return libvlc_media_discoverer_is_running(self)
class MediaLibrary(_Ctype):
'''N/A
'''
def __new__(cls, ptr=_internal_guard):
'''(INTERNAL) ctypes wrapper constructor.
'''
return _Constructor(cls, ptr)
def release(self):
'''Release media library object. This functions decrements the
reference count of the media library object. If it reaches 0,
then the object will be released.
'''
return libvlc_media_library_release(self)
def retain(self):
'''Retain a reference to a media library object. This function will
increment the reference counting for this object. Use
L{release}() to decrement the reference count.
'''
return libvlc_media_library_retain(self)
def load(self):
'''Load media library.
@return: 0 on success, -1 on error.
'''
return libvlc_media_library_load(self)
def media_list(self):
'''Get media library subitems.
@return: media list subitems.
'''
return libvlc_media_library_media_list(self)
class MediaList(_Ctype):
'''Create a new MediaList instance.
Usage: MediaList(list_of_MRLs)
See vlc.Instance.media_list_new documentation for details.
'''
def __new__(cls, *args):
if args:
i = args[0]
if isinstance(i, _Ints):
return _Constructor(cls, i)
if isinstance(i, Instance):
return i.media_list_new(*args[1:])
o = get_default_instance().media_list_new(*args)
return o
def get_instance(self):
return getattr(self, '_instance', None)
def add_media(self, mrl):
"""Add media instance to media list.
The L{lock} should be held upon entering this function.
@param mrl: a media instance or a MRL.
@return: 0 on success, -1 if the media list is read-only.
"""
if isinstance(mrl, basestring):
mrl = (self.get_instance() or get_default_instance()).media_new(mrl)
return libvlc_media_list_add_media(self, mrl)
def release(self):
'''Release media list created with L{new}().
'''
return libvlc_media_list_release(self)
def retain(self):
'''Retain reference to a media list.
'''
return libvlc_media_list_retain(self)
def set_media(self, p_md):
'''Associate media instance with this media list instance.
If another media instance was present it will be released.
The L{lock} should NOT be held upon entering this function.
@param p_md: media instance to add.
'''
return libvlc_media_list_set_media(self, p_md)
def media(self):
'''Get media instance from this media list instance. This action will increase
the refcount on the media instance.
The L{lock} should NOT be held upon entering this function.
@return: media instance.
'''
return libvlc_media_list_media(self)
def insert_media(self, p_md, i_pos):
'''Insert media instance in media list on a position
The L{lock} should be held upon entering this function.
@param p_md: a media instance.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the media list is read-only.
'''
return libvlc_media_list_insert_media(self, p_md, i_pos)
def remove_index(self, i_pos):
'''Remove media instance from media list on a position
The L{lock} should be held upon entering this function.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the list is read-only or the item was not found.
'''
return libvlc_media_list_remove_index(self, i_pos)
def count(self):
'''Get count on media list items
The L{lock} should be held upon entering this function.
@return: number of items in media list.
'''
return libvlc_media_list_count(self)
def __len__(self):
return libvlc_media_list_count(self)
def item_at_index(self, i_pos):
'''List media instance in media list at a position
The L{lock} should be held upon entering this function.
@param i_pos: position in array where to insert.
@return: media instance at position i_pos, or NULL if not found. In case of success, L{media_retain}() is called to increase the refcount on the media.
'''
return libvlc_media_list_item_at_index(self, i_pos)
def __getitem__(self, i):
return libvlc_media_list_item_at_index(self, i)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def index_of_item(self, p_md):
'''Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{lock} should be held upon entering this function.
@param p_md: media instance.
@return: position of media instance or -1 if media not found.
'''
return libvlc_media_list_index_of_item(self, p_md)
def is_readonly(self):
'''This indicates if this media list is read-only from a user point of view.
@return: 1 on readonly, 0 on readwrite \libvlc_return_bool.
'''
return libvlc_media_list_is_readonly(self)
def lock(self):
'''Get lock on media list items.
'''
return libvlc_media_list_lock(self)
def unlock(self):
'''Release lock on media list items
The L{lock} should be held upon entering this function.
'''
return libvlc_media_list_unlock(self)
@memoize_parameterless
def event_manager(self):
'''Get libvlc_event_manager from this media list instance.
The p_event_manager is immutable, so you don't have to hold the lock.
@return: libvlc_event_manager.
'''
return libvlc_media_list_event_manager(self)
class MediaListPlayer(_Ctype):
'''Create a new MediaListPlayer instance.
It may take as parameter either:
- a vlc.Instance
- nothing
'''
def __new__(cls, arg=None):
if arg is None:
i = get_default_instance()
elif isinstance(arg, Instance):
i = arg
elif isinstance(arg, _Ints):
return _Constructor(cls, arg)
else:
raise TypeError('MediaListPlayer %r' % (arg,))
return i.media_list_player_new()
def get_instance(self):
"""Return the associated Instance.
"""
return self._instance #PYCHOK expected
def release(self):
'''Release a media_list_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
'''
return libvlc_media_list_player_release(self)
def retain(self):
'''Retain a reference to a media player list object. Use
L{release}() to decrement reference count.
'''
return libvlc_media_list_player_retain(self)
@memoize_parameterless
def event_manager(self):
'''Return the event manager of this media_list_player.
@return: the event manager.
'''
return libvlc_media_list_player_event_manager(self)
def set_media_player(self, p_mi):
'''Replace media player in media_list_player with this instance.
@param p_mi: media player instance.
'''
return libvlc_media_list_player_set_media_player(self, p_mi)
def set_media_list(self, p_mlist):
'''Set the media list associated with the player.
@param p_mlist: list of media.
'''
return libvlc_media_list_player_set_media_list(self, p_mlist)
def play(self):
'''Play media list.
'''
return libvlc_media_list_player_play(self)
def pause(self):
'''Toggle pause (or resume) media list.
'''
return libvlc_media_list_player_pause(self)
def is_playing(self):
'''Is media list playing?
@return: true for playing and false for not playing \libvlc_return_bool.
'''
return libvlc_media_list_player_is_playing(self)
def get_state(self):
'''Get current libvlc_state of media list player.
@return: libvlc_state_t for media list player.
'''
return libvlc_media_list_player_get_state(self)
def play_item_at_index(self, i_index):
'''Play media list item at position index.
@param i_index: index in media list to play.
@return: 0 upon success -1 if the item wasn't found.
'''
return libvlc_media_list_player_play_item_at_index(self, i_index)
def __getitem__(self, i):
return libvlc_media_list_player_play_item_at_index(self, i)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def play_item(self, p_md):
'''Play the given media item.
@param p_md: the media instance.
@return: 0 upon success, -1 if the media is not part of the media list.
'''
return libvlc_media_list_player_play_item(self, p_md)
def stop(self):
'''Stop playing media list.
'''
return libvlc_media_list_player_stop(self)
def next(self):
'''Play next item from media list.
@return: 0 upon success -1 if there is no next item.
'''
return libvlc_media_list_player_next(self)
def previous(self):
'''Play previous item from media list.
@return: 0 upon success -1 if there is no previous item.
'''
return libvlc_media_list_player_previous(self)
def set_playback_mode(self, e_mode):
'''Sets the playback mode for the playlist.
@param e_mode: playback mode specification.
'''
return libvlc_media_list_player_set_playback_mode(self, e_mode)
class MediaPlayer(_Ctype):
'''Create a new MediaPlayer instance.
It may take as parameter either:
- a string (media URI), options... In this case, a vlc.Instance will be created.
- a vlc.Instance, a string (media URI), options...
'''
def __new__(cls, *args):
if len(args) == 1 and isinstance(args[0], _Ints):
return _Constructor(cls, args[0])
if args and isinstance(args[0], Instance):
instance = args[0]
args = args[1:]
else:
instance = get_default_instance()
o = instance.media_player_new()
if args:
o.set_media(instance.media_new(*args))
return o
def get_instance(self):
"""Return the associated Instance.
"""
return self._instance #PYCHOK expected
def set_mrl(self, mrl, *options):
"""Set the MRL to play.
Warning: most audio and video options, such as text renderer,
have no effects on an individual media. These options must be
set at the vlc.Instance or vlc.MediaPlayer instanciation.
@param mrl: The MRL
@param options: optional media option=value strings
@return: the Media object
"""
m = self.get_instance().media_new(mrl, *options)
self.set_media(m)
return m
def video_get_spu_description(self):
"""Get the description of available video subtitles.
"""
return track_description_list(libvlc_video_get_spu_description(self))
def video_get_title_description(self):
"""Get the description of available titles.
"""
return track_description_list(libvlc_video_get_title_description(self))
def video_get_chapter_description(self, title):
"""Get the description of available chapters for specific title.
@param title: selected title (int)
"""
return track_description_list(libvlc_video_get_chapter_description(self, title))
def video_get_track_description(self):
"""Get the description of available video tracks.
"""
return track_description_list(libvlc_video_get_track_description(self))
def audio_get_track_description(self):
"""Get the description of available audio tracks.
"""
return track_description_list(libvlc_audio_get_track_description(self))
def video_get_size(self, num=0):
"""Get the video size in pixels as 2-tuple (width, height).
@param num: video number (default 0).
"""
r = libvlc_video_get_size(self, num)
if isinstance(r, tuple) and len(r) == 2:
return r
else:
raise VLCException('invalid video number (%s)' % (num,))
def set_hwnd(self, drawable):
"""Set a Win32/Win64 API window handle (HWND).
Specify where the media player should render its video
output. If LibVLC was built without Win32/Win64 API output
support, then this has no effects.
@param drawable: windows handle of the drawable.
"""
if not isinstance(drawable, ctypes.c_void_p):
drawable = ctypes.c_void_p(int(drawable))
libvlc_media_player_set_hwnd(self, drawable)
def video_get_width(self, num=0):
"""Get the width of a video in pixels.
@param num: video number (default 0).
"""
return self.video_get_size(num)[0]
def video_get_height(self, num=0):
"""Get the height of a video in pixels.
@param num: video number (default 0).
"""
return self.video_get_size(num)[1]
def video_get_cursor(self, num=0):
"""Get the mouse pointer coordinates over a video as 2-tuple (x, y).
Coordinates are expressed in terms of the decoded video resolution,
B{not} in terms of pixels on the screen/viewport. To get the
latter, you must query your windowing system directly.
Either coordinate may be negative or larger than the corresponding
size of the video, if the cursor is outside the rendering area.
@warning: The coordinates may be out-of-date if the pointer is not
located on the video rendering area. LibVLC does not track the
mouse pointer if the latter is outside the video widget.
@note: LibVLC does not support multiple mouse pointers (but does
support multiple input devices sharing the same pointer).
@param num: video number (default 0).
"""
r = libvlc_video_get_cursor(self, num)
if isinstance(r, tuple) and len(r) == 2:
return r
raise VLCException('invalid video number (%s)' % (num,))
def release(self):
'''Release a media_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
'''
return libvlc_media_player_release(self)
def retain(self):
'''Retain a reference to a media player object. Use
L{release}() to decrement reference count.
'''
return libvlc_media_player_retain(self)
def set_media(self, p_md):
'''Set the media that will be used by the media_player. If any,
previous md will be released.
@param p_md: the Media. Afterwards the p_md can be safely destroyed.
'''
return libvlc_media_player_set_media(self, p_md)
def get_media(self):
'''Get the media used by the media_player.
@return: the media associated with p_mi, or NULL if no media is associated.
'''
return libvlc_media_player_get_media(self)
@memoize_parameterless
def event_manager(self):
'''Get the Event Manager from which the media player send event.
@return: the event manager associated with p_mi.
'''
return libvlc_media_player_event_manager(self)
def is_playing(self):
'''is_playing.
@return: 1 if the media player is playing, 0 otherwise \libvlc_return_bool.
'''
return libvlc_media_player_is_playing(self)
def play(self):
'''Play.
@return: 0 if playback started (and was already started), or -1 on error.
'''
return libvlc_media_player_play(self)
def set_pause(self, do_pause):
'''Pause or resume (no effect if there is no media).
@param do_pause: play/resume if zero, pause if non-zero.
@version: LibVLC 1.1.1 or later.
'''
return libvlc_media_player_set_pause(self, do_pause)
def pause(self):
'''Toggle pause (no effect if there is no media).
'''
return libvlc_media_player_pause(self)
def stop(self):
'''Stop (no effect if there is no media).
'''
return libvlc_media_player_stop(self)
def video_set_callbacks(self, lock, unlock, display, opaque):
'''Set callbacks and private data to render decoded video to a custom area
in memory.
Use L{video_set_format}() or L{video_set_format_callbacks}()
to configure the decoded format.
@param lock: callback to lock video memory (must not be NULL).
@param unlock: callback to unlock video memory (or NULL if not needed).
@param display: callback to display video (or NULL if not needed).
@param opaque: private pointer for the three callbacks (as first parameter).
@version: LibVLC 1.1.1 or later.
'''
return libvlc_video_set_callbacks(self, lock, unlock, display, opaque)
def video_set_format(self, chroma, width, height, pitch):
'''Set decoded video chroma and dimensions.
This only works in combination with L{video_set_callbacks}(),
and is mutually exclusive with L{video_set_format_callbacks}().
@param chroma: a four-characters string identifying the chroma (e.g. "RV32" or "YUYV").
@param width: pixel width.
@param height: pixel height.
@param pitch: line pitch (in bytes).
@version: LibVLC 1.1.1 or later.
@bug: All pixel planes are expected to have the same pitch. To use the YCbCr color space with chrominance subsampling, consider using L{video_set_format_callbacks}() instead.
'''
return libvlc_video_set_format(self, str_to_bytes(chroma), width, height, pitch)
def video_set_format_callbacks(self, setup, cleanup):
'''Set decoded video chroma and dimensions. This only works in combination with
L{video_set_callbacks}().
@param setup: callback to select the video format (cannot be NULL).
@param cleanup: callback to release any allocated resources (or NULL).
@version: LibVLC 2.0.0 or later.
'''
return libvlc_video_set_format_callbacks(self, setup, cleanup)
def set_nsobject(self, drawable):
'''Set the NSView handler where the media player should render its video output.
Use the vout called "macosx".
The drawable is an NSObject that follow the VLCOpenGLVideoViewEmbedding
protocol:
@begincode
\@protocol VLCOpenGLVideoViewEmbedding <NSObject>
- (void)addVoutSubview:(NSView *)view;
- (void)removeVoutSubview:(NSView *)view;
\@end
@endcode
Or it can be an NSView object.
If you want to use it along with Qt4 see the QMacCocoaViewContainer. Then
the following code should work:
@begincode
NSView *video = [[NSView alloc] init];
QMacCocoaViewContainer *container = new QMacCocoaViewContainer(video, parent);
L{set_nsobject}(mp, video);
[video release];
@endcode
You can find a live example in VLCVideoView in VLCKit.framework.
@param drawable: the drawable that is either an NSView or an object following the VLCOpenGLVideoViewEmbedding protocol.
'''
return libvlc_media_player_set_nsobject(self, drawable)
def get_nsobject(self):
'''Get the NSView handler previously set with L{set_nsobject}().
@return: the NSView handler or 0 if none where set.
'''
return libvlc_media_player_get_nsobject(self)
def set_agl(self, drawable):
'''Set the agl handler where the media player should render its video output.
@param drawable: the agl handler.
'''
return libvlc_media_player_set_agl(self, drawable)
def get_agl(self):
'''Get the agl handler previously set with L{set_agl}().
@return: the agl handler or 0 if none where set.
'''
return libvlc_media_player_get_agl(self)
def set_xwindow(self, drawable):
'''Set an X Window System drawable where the media player should render its
video output. If LibVLC was built without X11 output support, then this has
no effects.
The specified identifier must correspond to an existing Input/Output class
X11 window. Pixmaps are B{not} supported. The caller shall ensure that
the X11 server is the same as the one the VLC instance has been configured
with. This function must be called before video playback is started;
otherwise it will only take effect after playback stop and restart.
@param drawable: the ID of the X window.
'''
return libvlc_media_player_set_xwindow(self, drawable)
def get_xwindow(self):
'''Get the X Window System window identifier previously set with
L{set_xwindow}(). Note that this will return the identifier
even if VLC is not currently using it (for instance if it is playing an
audio-only input).
@return: an X window ID, or 0 if none where set.
'''
return libvlc_media_player_get_xwindow(self)
def get_hwnd(self):
'''Get the Windows API window handle (HWND) previously set with
L{set_hwnd}(). The handle will be returned even if LibVLC
is not currently outputting any video to it.
@return: a window handle or NULL if there are none.
'''
return libvlc_media_player_get_hwnd(self)
def audio_set_callbacks(self, play, pause, resume, flush, drain, opaque):
'''Set callbacks and private data for decoded audio.
Use L{audio_set_format}() or L{audio_set_format_callbacks}()
to configure the decoded audio format.
@param play: callback to play audio samples (must not be NULL).
@param pause: callback to pause playback (or NULL to ignore).
@param resume: callback to resume playback (or NULL to ignore).
@param flush: callback to flush audio buffers (or NULL to ignore).
@param drain: callback to drain audio buffers (or NULL to ignore).
@param opaque: private pointer for the audio callbacks (as first parameter).
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_callbacks(self, play, pause, resume, flush, drain, opaque)
def audio_set_volume_callback(self, set_volume):
'''Set callbacks and private data for decoded audio. This only works in
combination with L{audio_set_callbacks}().
Use L{audio_set_format}() or L{audio_set_format_callbacks}()
to configure the decoded audio format.
@param set_volume: callback to apply audio volume, or NULL to apply volume in software.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_volume_callback(self, set_volume)
def audio_set_format_callbacks(self, setup, cleanup):
'''Set decoded audio format. This only works in combination with
L{audio_set_callbacks}().
@param setup: callback to select the audio format (cannot be NULL).
@param cleanup: callback to release any allocated resources (or NULL).
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_format_callbacks(self, setup, cleanup)
def audio_set_format(self, format, rate, channels):
'''Set decoded audio format.
This only works in combination with L{audio_set_callbacks}(),
and is mutually exclusive with L{audio_set_format_callbacks}().
@param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32").
@param rate: sample rate (expressed in Hz).
@param channels: channels count.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_audio_set_format(self, str_to_bytes(format), rate, channels)
def get_length(self):
'''Get the current movie length (in ms).
@return: the movie length (in ms), or -1 if there is no media.
'''
return libvlc_media_player_get_length(self)
def get_time(self):
'''Get the current movie time (in ms).
@return: the movie time (in ms), or -1 if there is no media.
'''
return libvlc_media_player_get_time(self)
def set_time(self, i_time):
'''Set the movie time (in ms). This has no effect if no media is being played.
Not all formats and protocols support this.
@param i_time: the movie time (in ms).
'''
return libvlc_media_player_set_time(self, i_time)
def get_position(self):
'''Get movie position as percentage between 0.0 and 1.0.
@return: movie position, or -1. in case of error.
'''
return libvlc_media_player_get_position(self)
def set_position(self, f_pos):
'''Set movie position as percentage between 0.0 and 1.0.
This has no effect if playback is not enabled.
This might not work depending on the underlying input format and protocol.
@param f_pos: the position.
'''
return libvlc_media_player_set_position(self, f_pos)
def set_chapter(self, i_chapter):
'''Set movie chapter (if applicable).
@param i_chapter: chapter number to play.
'''
return libvlc_media_player_set_chapter(self, i_chapter)
def get_chapter(self):
'''Get movie chapter.
@return: chapter number currently playing, or -1 if there is no media.
'''
return libvlc_media_player_get_chapter(self)
def get_chapter_count(self):
'''Get movie chapter count.
@return: number of chapters in movie, or -1.
'''
return libvlc_media_player_get_chapter_count(self)
def will_play(self):
'''Is the player able to play.
@return: boolean \libvlc_return_bool.
'''
return libvlc_media_player_will_play(self)
def get_chapter_count_for_title(self, i_title):
'''Get title chapter count.
@param i_title: title.
@return: number of chapters in title, or -1.
'''
return libvlc_media_player_get_chapter_count_for_title(self, i_title)
def set_title(self, i_title):
'''Set movie title.
@param i_title: title number to play.
'''
return libvlc_media_player_set_title(self, i_title)
def get_title(self):
'''Get movie title.
@return: title number currently playing, or -1.
'''
return libvlc_media_player_get_title(self)
def get_title_count(self):
'''Get movie title count.
@return: title number count, or -1.
'''
return libvlc_media_player_get_title_count(self)
def previous_chapter(self):
'''Set previous chapter (if applicable).
'''
return libvlc_media_player_previous_chapter(self)
def next_chapter(self):
'''Set next chapter (if applicable).
'''
return libvlc_media_player_next_chapter(self)
def get_rate(self):
'''Get the requested movie play rate.
@warning: Depending on the underlying media, the requested rate may be
different from the real playback rate.
@return: movie play rate.
'''
return libvlc_media_player_get_rate(self)
def set_rate(self, rate):
'''Set movie play rate.
@param rate: movie play rate to set.
@return: -1 if an error was detected, 0 otherwise (but even then, it might not actually work depending on the underlying media protocol).
'''
return libvlc_media_player_set_rate(self, rate)
def get_state(self):
'''Get current movie state.
@return: the current state of the media player (playing, paused, ...) See libvlc_state_t.
'''
return libvlc_media_player_get_state(self)
def get_fps(self):
'''Get movie fps rate.
@return: frames per second (fps) for this playing movie, or 0 if unspecified.
'''
return libvlc_media_player_get_fps(self)
def has_vout(self):
'''How many video outputs does this media player have?
@return: the number of video outputs.
'''
return libvlc_media_player_has_vout(self)
def is_seekable(self):
'''Is this media player seekable?
@return: true if the media player can seek \libvlc_return_bool.
'''
return libvlc_media_player_is_seekable(self)
def can_pause(self):
'''Can this media player be paused?
@return: true if the media player can pause \libvlc_return_bool.
'''
return libvlc_media_player_can_pause(self)
def program_scrambled(self):
'''Check if the current program is scrambled.
@return: true if the current program is scrambled \libvlc_return_bool.
@version: LibVLC 2.2.0 or later.
'''
return libvlc_media_player_program_scrambled(self)
def next_frame(self):
'''Display the next frame (if supported).
'''
return libvlc_media_player_next_frame(self)
def navigate(self, navigate):
'''Navigate through DVD Menu.
@param navigate: the Navigation mode.
@version: libVLC 2.0.0 or later.
'''
return libvlc_media_player_navigate(self, navigate)
def set_video_title_display(self, position, timeout):
'''Set if, and how, the video title will be shown when media is played.
@param position: position at which to display the title, or libvlc_position_disable to prevent the title from being displayed.
@param timeout: title display timeout in milliseconds (ignored if libvlc_position_disable).
@version: libVLC 2.1.0 or later.
'''
return libvlc_media_player_set_video_title_display(self, position, timeout)
def toggle_fullscreen(self):
'''Toggle fullscreen status on non-embedded video outputs.
@warning: The same limitations applies to this function
as to L{set_fullscreen}().
'''
return libvlc_toggle_fullscreen(self)
def set_fullscreen(self, b_fullscreen):
'''Enable or disable fullscreen.
@warning: With most window managers, only a top-level windows can be in
full-screen mode. Hence, this function will not operate properly if
L{set_xwindow}() was used to embed the video in a
non-top-level window. In that case, the embedding window must be reparented
to the root window B{before} fullscreen mode is enabled. You will want
to reparent it back to its normal parent when disabling fullscreen.
@param b_fullscreen: boolean for fullscreen status.
'''
return libvlc_set_fullscreen(self, b_fullscreen)
def get_fullscreen(self):
'''Get current fullscreen status.
@return: the fullscreen status (boolean) \libvlc_return_bool.
'''
return libvlc_get_fullscreen(self)
def video_set_key_input(self, on):
'''Enable or disable key press events handling, according to the LibVLC hotkeys
configuration. By default and for historical reasons, keyboard events are
handled by the LibVLC video widget.
@note: On X11, there can be only one subscriber for key press and mouse
click events per window. If your application has subscribed to those events
for the X window ID of the video widget, then LibVLC will not be able to
handle key presses and mouse clicks in any case.
@warning: This function is only implemented for X11 and Win32 at the moment.
@param on: true to handle key press events, false to ignore them.
'''
return libvlc_video_set_key_input(self, on)
def video_set_mouse_input(self, on):
'''Enable or disable mouse click events handling. By default, those events are
handled. This is needed for DVD menus to work, as well as a few video
filters such as "puzzle".
See L{video_set_key_input}().
@warning: This function is only implemented for X11 and Win32 at the moment.
@param on: true to handle mouse click events, false to ignore them.
'''
return libvlc_video_set_mouse_input(self, on)
def video_get_scale(self):
'''Get the current video scaling factor.
See also L{video_set_scale}().
@return: the currently configured zoom factor, or 0. if the video is set to fit to the output window/drawable automatically.
'''
return libvlc_video_get_scale(self)
def video_set_scale(self, f_factor):
'''Set the video scaling factor. That is the ratio of the number of pixels on
screen to the number of pixels in the original decoded video in each
dimension. Zero is a special value; it will adjust the video to the output
window/drawable (in windowed mode) or the entire screen.
Note that not all video outputs support scaling.
@param f_factor: the scaling factor, or zero.
'''
return libvlc_video_set_scale(self, f_factor)
def video_get_aspect_ratio(self):
'''Get current video aspect ratio.
@return: the video aspect ratio or NULL if unspecified (the result must be released with free() or L{free}()).
'''
return libvlc_video_get_aspect_ratio(self)
def video_set_aspect_ratio(self, psz_aspect):
'''Set new video aspect ratio.
@param psz_aspect: new video aspect-ratio or NULL to reset to default @note Invalid aspect ratios are ignored.
'''
return libvlc_video_set_aspect_ratio(self, str_to_bytes(psz_aspect))
def video_get_spu(self):
'''Get current video subtitle.
@return: the video subtitle selected, or -1 if none.
'''
return libvlc_video_get_spu(self)
def video_get_spu_count(self):
'''Get the number of available video subtitles.
@return: the number of available video subtitles.
'''
return libvlc_video_get_spu_count(self)
def video_set_spu(self, i_spu):
'''Set new video subtitle.
@param i_spu: video subtitle track to select (i_id from track description).
@return: 0 on success, -1 if out of range.
'''
return libvlc_video_set_spu(self, i_spu)
def video_set_subtitle_file(self, psz_subtitle):
'''Set new video subtitle file.
@param psz_subtitle: new video subtitle file.
@return: the success status (boolean).
'''
return libvlc_video_set_subtitle_file(self, str_to_bytes(psz_subtitle))
def video_get_spu_delay(self):
'''Get the current subtitle delay. Positive values means subtitles are being
displayed later, negative values earlier.
@return: time (in microseconds) the display of subtitles is being delayed.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_video_get_spu_delay(self)
def video_set_spu_delay(self, i_delay):
'''Set the subtitle delay. This affects the timing of when the subtitle will
be displayed. Positive values result in subtitles being displayed later,
while negative values will result in subtitles being displayed earlier.
The subtitle delay will be reset to zero each time the media changes.
@param i_delay: time (in microseconds) the display of subtitles should be delayed.
@return: 0 on success, -1 on error.
@version: LibVLC 2.0.0 or later.
'''
return libvlc_video_set_spu_delay(self, i_delay)
def video_get_crop_geometry(self):
'''Get current crop filter geometry.
@return: the crop filter geometry or NULL if unset.
'''
return libvlc_video_get_crop_geometry(self)
def video_set_crop_geometry(self, psz_geometry):
'''Set new crop filter geometry.
@param psz_geometry: new crop filter geometry (NULL to unset).
'''
return libvlc_video_set_crop_geometry(self, str_to_bytes(psz_geometry))
def video_get_teletext(self):
'''Get current teletext page requested.
@return: the current teletext page requested.
'''
return libvlc_video_get_teletext(self)
def video_set_teletext(self, i_page):
'''Set new teletext page to retrieve.
@param i_page: teletex page number requested.
'''
return libvlc_video_set_teletext(self, i_page)
def toggle_teletext(self):
'''Toggle teletext transparent status on video output.
'''
return libvlc_toggle_teletext(self)
def video_get_track_count(self):
'''Get number of available video tracks.
@return: the number of available video tracks (int).
'''
return libvlc_video_get_track_count(self)
def video_get_track(self):
'''Get current video track.
@return: the video track ID (int) or -1 if no active input.
'''
return libvlc_video_get_track(self)
def video_set_track(self, i_track):
'''Set video track.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 if out of range.
'''
return libvlc_video_set_track(self, i_track)
def video_take_snapshot(self, num, psz_filepath, i_width, i_height):
'''Take a snapshot of the current video window.
If i_width AND i_height is 0, original size is used.
If i_width XOR i_height is 0, original aspect-ratio is preserved.
@param num: number of video output (typically 0 for the first/only one).
@param psz_filepath: the path where to save the screenshot to.
@param i_width: the snapshot's width.
@param i_height: the snapshot's height.
@return: 0 on success, -1 if the video was not found.
'''
return libvlc_video_take_snapshot(self, num, str_to_bytes(psz_filepath), i_width, i_height)
def video_set_deinterlace(self, psz_mode):
'''Enable or disable deinterlace filter.
@param psz_mode: type of deinterlace filter, NULL to disable.
'''
return libvlc_video_set_deinterlace(self, str_to_bytes(psz_mode))
def video_get_marquee_int(self, option):
'''Get an integer marquee option value.
@param option: marq option to get See libvlc_video_marquee_int_option_t.
'''
return libvlc_video_get_marquee_int(self, option)
def video_get_marquee_string(self, option):
'''Get a string marquee option value.
@param option: marq option to get See libvlc_video_marquee_string_option_t.
'''
return libvlc_video_get_marquee_string(self, option)
def video_set_marquee_int(self, option, i_val):
'''Enable, disable or set an integer marquee option
Setting libvlc_marquee_Enable has the side effect of enabling (arg !0)
or disabling (arg 0) the marq filter.
@param option: marq option to set See libvlc_video_marquee_int_option_t.
@param i_val: marq option value.
'''
return libvlc_video_set_marquee_int(self, option, i_val)
def video_set_marquee_string(self, option, psz_text):
'''Set a marquee string option.
@param option: marq option to set See libvlc_video_marquee_string_option_t.
@param psz_text: marq option value.
'''
return libvlc_video_set_marquee_string(self, option, str_to_bytes(psz_text))
def video_get_logo_int(self, option):
'''Get integer logo option.
@param option: logo option to get, values of libvlc_video_logo_option_t.
'''
return libvlc_video_get_logo_int(self, option)
def video_set_logo_int(self, option, value):
'''Set logo option as integer. Options that take a different type value
are ignored.
Passing libvlc_logo_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the logo filter.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param value: logo option value.
'''
return libvlc_video_set_logo_int(self, option, value)
def video_set_logo_string(self, option, psz_value):
'''Set logo option as string. Options that take a different type value
are ignored.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param psz_value: logo option value.
'''
return libvlc_video_set_logo_string(self, option, str_to_bytes(psz_value))
def video_get_adjust_int(self, option):
'''Get integer adjust option.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_get_adjust_int(self, option)
def video_set_adjust_int(self, option, value):
'''Set adjust option as integer. Options that take a different type value
are ignored.
Passing libvlc_adjust_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the adjust filter.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_set_adjust_int(self, option, value)
def video_get_adjust_float(self, option):
'''Get float adjust option.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_get_adjust_float(self, option)
def video_set_adjust_float(self, option, value):
'''Set adjust option as float. Options that take a different type value
are ignored.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
return libvlc_video_set_adjust_float(self, option, value)
def audio_output_set(self, psz_name):
'''Selects an audio output module.
@note: Any change will take be effect only after playback is stopped and
restarted. Audio output cannot be changed while playing.
@param psz_name: name of audio output, use psz_name of See L{AudioOutput}.
@return: 0 if function succeded, -1 on error.
'''
return libvlc_audio_output_set(self, str_to_bytes(psz_name))
def audio_output_device_enum(self):
'''Gets a list of potential audio output devices,
See L{audio_output_device_set}().
@note: Not all audio outputs support enumerating devices.
The audio output may be functional even if the list is empty (NULL).
@note: The list may not be exhaustive.
@warning: Some audio output devices in the list might not actually work in
some circumstances. By default, it is recommended to not specify any
explicit audio device.
@return: A NULL-terminated linked list of potential audio output devices. It must be freed it with L{audio_output_device_list_release}().
@version: LibVLC 2.2.0 or later.
'''
return libvlc_audio_output_device_enum(self)
def audio_output_device_set(self, module, device_id):
'''Configures an explicit audio output device.
If the module paramater is NULL, audio output will be moved to the device
specified by the device identifier string immediately. This is the
recommended usage.
A list of adequate potential device strings can be obtained with
L{audio_output_device_enum}().
However passing NULL is supported in LibVLC version 2.2.0 and later only;
in earlier versions, this function would have no effects when the module
parameter was NULL.
If the module parameter is not NULL, the device parameter of the
corresponding audio output, if it exists, will be set to the specified
string. Note that some audio output modules do not have such a parameter
(notably MMDevice and PulseAudio).
A list of adequate potential device strings can be obtained with
L{audio_output_device_list_get}().
@note: This function does not select the specified audio output plugin.
L{audio_output_set}() is used for that purpose.
@warning: The syntax for the device parameter depends on the audio output.
Some audio output modules require further parameters (e.g. a channels map
in the case of ALSA).
@param module: If NULL, current audio output module. if non-NULL, name of audio output module.
@param device_id: device identifier string.
@return: Nothing. Errors are ignored (this is a design bug).
'''
return libvlc_audio_output_device_set(self, str_to_bytes(module), str_to_bytes(device_id))
def audio_output_device_get(self):
'''Get the current audio output device identifier.
This complements L{audio_output_device_set}().
@warning: The initial value for the current audio output device identifier
may not be set or may be some unknown value. A LibVLC application should
compare this value against the known device identifiers (e.g. those that
were previously retrieved by a call to L{audio_output_device_enum} or
L{audio_output_device_list_get}) to find the current audio output device.
It is possible that the selected audio output device changes (an external
change) without a call to L{audio_output_device_set}. That may make this
method unsuitable to use if a LibVLC application is attempting to track
dynamic audio device changes as they happen.
@return: the current audio output device identifier NULL if no device is selected or in case of error (the result must be released with free() or L{free}()).
@version: LibVLC 3.0.0 or later.
'''
return libvlc_audio_output_device_get(self)
def audio_toggle_mute(self):
'''Toggle mute status.
'''
return libvlc_audio_toggle_mute(self)
def audio_get_mute(self):
'''Get current mute status.
@return: the mute status (boolean) if defined, -1 if undefined/unapplicable.
'''
return libvlc_audio_get_mute(self)
def audio_set_mute(self, status):
'''Set mute status.
@param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute.
'''
return libvlc_audio_set_mute(self, status)
def audio_get_volume(self):
'''Get current software audio volume.
@return: the software volume in percents (0 = mute, 100 = nominal / 0dB).
'''
return libvlc_audio_get_volume(self)
def audio_set_volume(self, i_volume):
'''Set current software audio volume.
@param i_volume: the volume in percents (0 = mute, 100 = 0dB).
@return: 0 if the volume was set, -1 if it was out of range.
'''
return libvlc_audio_set_volume(self, i_volume)
def audio_get_track_count(self):
'''Get number of available audio tracks.
@return: the number of available audio tracks (int), or -1 if unavailable.
'''
return libvlc_audio_get_track_count(self)
def audio_get_track(self):
'''Get current audio track.
@return: the audio track ID or -1 if no active input.
'''
return libvlc_audio_get_track(self)
def audio_set_track(self, i_track):
'''Set current audio track.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 on error.
'''
return libvlc_audio_set_track(self, i_track)
def audio_get_channel(self):
'''Get current audio channel.
@return: the audio channel See libvlc_audio_output_channel_t.
'''
return libvlc_audio_get_channel(self)
def audio_set_channel(self, channel):
'''Set current audio channel.
@param channel: the audio channel, See libvlc_audio_output_channel_t.
@return: 0 on success, -1 on error.
'''
return libvlc_audio_set_channel(self, channel)
def audio_get_delay(self):
'''Get current audio delay.
@return: the audio delay (microseconds).
@version: LibVLC 1.1.1 or later.
'''
return libvlc_audio_get_delay(self)
def audio_set_delay(self, i_delay):
'''Set current audio delay. The audio delay will be reset to zero each time the media changes.
@param i_delay: the audio delay (microseconds).
@return: 0 on success, -1 on error.
@version: LibVLC 1.1.1 or later.
'''
return libvlc_audio_set_delay(self, i_delay)
def set_equalizer(self, p_equalizer):
'''Apply new equalizer settings to a media player.
The equalizer is first created by invoking L{audio_equalizer_new}() or
L{audio_equalizer_new_from_preset}().
It is possible to apply new equalizer settings to a media player whether the media
player is currently playing media or not.
Invoking this method will immediately apply the new equalizer settings to the audio
output of the currently playing media if there is any.
If there is no currently playing media, the new equalizer settings will be applied
later if and when new media is played.
Equalizer settings will automatically be applied to subsequently played media.
To disable the equalizer for a media player invoke this method passing NULL for the
p_equalizer parameter.
The media player does not keep a reference to the supplied equalizer so it is safe
for an application to release the equalizer reference any time after this method
returns.
@param p_equalizer: opaque equalizer handle, or NULL to disable the equalizer for this media player.
@return: zero on success, -1 on error.
@version: LibVLC 2.2.0 or later.
'''
return libvlc_media_player_set_equalizer(self, p_equalizer)
# LibVLC __version__ functions #
def libvlc_errmsg():
'''A human-readable error message for the last LibVLC error in the calling
thread. The resulting string is valid until another error occurs (at least
until the next LibVLC call).
@warning
This will be NULL if there was no error.
'''
f = _Cfunctions.get('libvlc_errmsg', None) or \
_Cfunction('libvlc_errmsg', (), None,
ctypes.c_char_p)
return f()
def libvlc_clearerr():
'''Clears the LibVLC error status for the current thread. This is optional.
By default, the error status is automatically overridden when a new error
occurs, and destroyed when the thread exits.
'''
f = _Cfunctions.get('libvlc_clearerr', None) or \
_Cfunction('libvlc_clearerr', (), None,
None)
return f()
def libvlc_vprinterr(fmt, ap):
'''Sets the LibVLC error status and message for the current thread.
Any previous error is overridden.
@param fmt: the format string.
@param ap: the arguments.
@return: a nul terminated string in any case.
'''
f = _Cfunctions.get('libvlc_vprinterr', None) or \
_Cfunction('libvlc_vprinterr', ((1,), (1,),), None,
ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p)
return f(fmt, ap)
def libvlc_new(argc, argv):
'''Create and initialize a libvlc instance.
This functions accept a list of "command line" arguments similar to the
main(). These arguments affect the LibVLC instance default configuration.
@param argc: the number of arguments (should be 0).
@param argv: list of arguments (should be NULL).
@return: the libvlc instance or NULL in case of error.
@version Arguments are meant to be passed from the command line to LibVLC, just like VLC media player does. The list of valid arguments depends on the LibVLC version, the operating system and platform, and set of available LibVLC plugins. Invalid or unsupported arguments will cause the function to fail (i.e. return NULL). Also, some arguments may alter the behaviour or otherwise interfere with other LibVLC functions. @warning There is absolutely no warranty or promise of forward, backward and cross-platform compatibility with regards to L{libvlc_new}() arguments. We recommend that you do not use them, other than when debugging.
'''
f = _Cfunctions.get('libvlc_new', None) or \
_Cfunction('libvlc_new', ((1,), (1,),), class_result(Instance),
ctypes.c_void_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p))
return f(argc, argv)
def libvlc_release(p_instance):
'''Decrement the reference count of a libvlc instance, and destroy it
if it reaches zero.
@param p_instance: the instance to destroy.
'''
f = _Cfunctions.get('libvlc_release', None) or \
_Cfunction('libvlc_release', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_retain(p_instance):
'''Increments the reference count of a libvlc instance.
The initial reference count is 1 after L{libvlc_new}() returns.
@param p_instance: the instance to reference.
'''
f = _Cfunctions.get('libvlc_retain', None) or \
_Cfunction('libvlc_retain', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_add_intf(p_instance, name):
'''Try to start a user interface for the libvlc instance.
@param p_instance: the instance.
@param name: interface name, or NULL for default.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_add_intf', None) or \
_Cfunction('libvlc_add_intf', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, name)
def libvlc_set_user_agent(p_instance, name, http):
'''Sets the application name. LibVLC passes this as the user agent string
when a protocol requires it.
@param p_instance: LibVLC instance.
@param name: human-readable application name, e.g. "FooBar player 1.2.3".
@param http: HTTP User Agent, e.g. "FooBar/1.2.3 Python/2.6.0".
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_set_user_agent', None) or \
_Cfunction('libvlc_set_user_agent', ((1,), (1,), (1,),), None,
None, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, name, http)
def libvlc_set_app_id(p_instance, id, version, icon):
'''Sets some meta-information about the application.
See also L{libvlc_set_user_agent}().
@param p_instance: LibVLC instance.
@param id: Java-style application identifier, e.g. "com.acme.foobar".
@param version: application version numbers, e.g. "1.2.3".
@param icon: application icon name, e.g. "foobar".
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_set_app_id', None) or \
_Cfunction('libvlc_set_app_id', ((1,), (1,), (1,), (1,),), None,
None, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, id, version, icon)
def libvlc_get_version():
'''Retrieve libvlc version.
Example: "1.1.0-git The Luggage".
@return: a string containing the libvlc version.
'''
f = _Cfunctions.get('libvlc_get_version', None) or \
_Cfunction('libvlc_get_version', (), None,
ctypes.c_char_p)
return f()
def libvlc_get_compiler():
'''Retrieve libvlc compiler version.
Example: "gcc version 4.2.3 (Ubuntu 4.2.3-2ubuntu6)".
@return: a string containing the libvlc compiler version.
'''
f = _Cfunctions.get('libvlc_get_compiler', None) or \
_Cfunction('libvlc_get_compiler', (), None,
ctypes.c_char_p)
return f()
def libvlc_get_changeset():
'''Retrieve libvlc changeset.
Example: "aa9bce0bc4".
@return: a string containing the libvlc changeset.
'''
f = _Cfunctions.get('libvlc_get_changeset', None) or \
_Cfunction('libvlc_get_changeset', (), None,
ctypes.c_char_p)
return f()
def libvlc_free(ptr):
'''Frees an heap allocation returned by a LibVLC function.
If you know you're using the same underlying C run-time as the LibVLC
implementation, then you can call ANSI C free() directly instead.
@param ptr: the pointer.
'''
f = _Cfunctions.get('libvlc_free', None) or \
_Cfunction('libvlc_free', ((1,),), None,
None, ctypes.c_void_p)
return f(ptr)
def libvlc_event_attach(p_event_manager, i_event_type, f_callback, user_data):
'''Register for an event notification.
@param p_event_manager: the event manager to which you want to attach to. Generally it is obtained by vlc_my_object_event_manager() where my_object is the object you want to listen to.
@param i_event_type: the desired event to which we want to listen.
@param f_callback: the function to call when i_event_type occurs.
@param user_data: user provided data to carry with the event.
@return: 0 on success, ENOMEM on error.
'''
f = _Cfunctions.get('libvlc_event_attach', None) or \
_Cfunction('libvlc_event_attach', ((1,), (1,), (1,), (1,),), None,
ctypes.c_int, EventManager, ctypes.c_uint, Callback, ctypes.c_void_p)
return f(p_event_manager, i_event_type, f_callback, user_data)
def libvlc_event_detach(p_event_manager, i_event_type, f_callback, p_user_data):
'''Unregister an event notification.
@param p_event_manager: the event manager.
@param i_event_type: the desired event to which we want to unregister.
@param f_callback: the function to call when i_event_type occurs.
@param p_user_data: user provided data to carry with the event.
'''
f = _Cfunctions.get('libvlc_event_detach', None) or \
_Cfunction('libvlc_event_detach', ((1,), (1,), (1,), (1,),), None,
None, EventManager, ctypes.c_uint, Callback, ctypes.c_void_p)
return f(p_event_manager, i_event_type, f_callback, p_user_data)
def libvlc_event_type_name(event_type):
'''Get an event's type name.
@param event_type: the desired event.
'''
f = _Cfunctions.get('libvlc_event_type_name', None) or \
_Cfunction('libvlc_event_type_name', ((1,),), None,
ctypes.c_char_p, ctypes.c_uint)
return f(event_type)
def libvlc_log_get_context(ctx):
'''Gets debugging information about a log message: the name of the VLC module
emitting the message and the message location within the source code.
The returned module name and file name will be NULL if unknown.
The returned line number will similarly be zero if unknown.
@param ctx: message context (as passed to the @ref libvlc_log_cb callback).
@return: module module name storage (or NULL), file source code file name storage (or NULL), line source code file line number storage (or NULL).
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_get_context', None) or \
_Cfunction('libvlc_log_get_context', ((1,), (2,), (2,), (2,),), None,
None, Log_ptr, ListPOINTER(ctypes.c_char_p), ListPOINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_uint))
return f(ctx)
def libvlc_log_get_object(ctx, id):
'''Gets VLC object information about a log message: the type name of the VLC
object emitting the message, the object header if any and a temporaly-unique
object identifier. This information is mainly meant for B{manual}
troubleshooting.
The returned type name may be "generic" if unknown, but it cannot be NULL.
The returned header will be NULL if unset; in current versions, the header
is used to distinguish for VLM inputs.
The returned object ID will be zero if the message is not associated with
any VLC object.
@param ctx: message context (as passed to the @ref libvlc_log_cb callback).
@return: name object name storage (or NULL), header object header (or NULL), line source code file line number storage (or NULL).
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_get_object', None) or \
_Cfunction('libvlc_log_get_object', ((1,), (2,), (2,), (1,),), None,
None, Log_ptr, ListPOINTER(ctypes.c_char_p), ListPOINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_uint))
return f(ctx, id)
def libvlc_log_unset(p_instance):
'''Unsets the logging callback for a LibVLC instance. This is rarely needed:
the callback is implicitly unset when the instance is destroyed.
This function will wait for any pending callbacks invocation to complete
(causing a deadlock if called from within the callback).
@param p_instance: libvlc instance.
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_unset', None) or \
_Cfunction('libvlc_log_unset', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_log_set(cb, data, p_instance):
'''Sets the logging callback for a LibVLC instance.
This function is thread-safe: it will wait for any pending callbacks
invocation to complete.
@param cb: callback function pointer.
@param data: opaque data pointer for the callback function @note Some log messages (especially debug) are emitted by LibVLC while is being initialized. These messages cannot be captured with this interface. @warning A deadlock may occur if this function is called from the callback.
@param p_instance: libvlc instance.
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_set', None) or \
_Cfunction('libvlc_log_set', ((1,), (1,), (1,),), None,
None, Instance, LogCb, ctypes.c_void_p)
return f(cb, data, p_instance)
def libvlc_log_set_file(p_instance, stream):
'''Sets up logging to a file.
@param p_instance: libvlc instance.
@param stream: FILE pointer opened for writing (the FILE pointer must remain valid until L{libvlc_log_unset}()).
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_log_set_file', None) or \
_Cfunction('libvlc_log_set_file', ((1,), (1,),), None,
None, Instance, FILE_ptr)
return f(p_instance, stream)
def libvlc_module_description_list_release(p_list):
'''Release a list of module descriptions.
@param p_list: the list to be released.
'''
f = _Cfunctions.get('libvlc_module_description_list_release', None) or \
_Cfunction('libvlc_module_description_list_release', ((1,),), None,
None, ctypes.POINTER(ModuleDescription))
return f(p_list)
def libvlc_audio_filter_list_get(p_instance):
'''Returns a list of audio filters that are available.
@param p_instance: libvlc instance.
@return: a list of module descriptions. It should be freed with L{libvlc_module_description_list_release}(). In case of an error, NULL is returned. See L{ModuleDescription} See L{libvlc_module_description_list_release}.
'''
f = _Cfunctions.get('libvlc_audio_filter_list_get', None) or \
_Cfunction('libvlc_audio_filter_list_get', ((1,),), None,
ctypes.POINTER(ModuleDescription), Instance)
return f(p_instance)
def libvlc_video_filter_list_get(p_instance):
'''Returns a list of video filters that are available.
@param p_instance: libvlc instance.
@return: a list of module descriptions. It should be freed with L{libvlc_module_description_list_release}(). In case of an error, NULL is returned. See L{ModuleDescription} See L{libvlc_module_description_list_release}.
'''
f = _Cfunctions.get('libvlc_video_filter_list_get', None) or \
_Cfunction('libvlc_video_filter_list_get', ((1,),), None,
ctypes.POINTER(ModuleDescription), Instance)
return f(p_instance)
def libvlc_clock():
'''Return the current time as defined by LibVLC. The unit is the microsecond.
Time increases monotonically (regardless of time zone changes and RTC
adjustements).
The origin is arbitrary but consistent across the whole system
(e.g. the system uptim, the time since the system was booted).
@note: On systems that support it, the POSIX monotonic clock is used.
'''
f = _Cfunctions.get('libvlc_clock', None) or \
_Cfunction('libvlc_clock', (), None,
ctypes.c_int64)
return f()
def libvlc_media_new_location(p_instance, psz_mrl):
'''Create a media with a certain given media resource location,
for instance a valid URL.
@note: To refer to a local file with this function,
the file://... URI syntax B{must} be used (see IETF RFC3986).
We recommend using L{libvlc_media_new_path}() instead when dealing with
local files.
See L{libvlc_media_release}.
@param p_instance: the instance.
@param psz_mrl: the media location.
@return: the newly created media or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_new_location', None) or \
_Cfunction('libvlc_media_new_location', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, psz_mrl)
def libvlc_media_new_path(p_instance, path):
'''Create a media for a certain file path.
See L{libvlc_media_release}.
@param p_instance: the instance.
@param path: local filesystem path.
@return: the newly created media or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_new_path', None) or \
_Cfunction('libvlc_media_new_path', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, path)
def libvlc_media_new_fd(p_instance, fd):
'''Create a media for an already open file descriptor.
The file descriptor shall be open for reading (or reading and writing).
Regular file descriptors, pipe read descriptors and character device
descriptors (including TTYs) are supported on all platforms.
Block device descriptors are supported where available.
Directory descriptors are supported on systems that provide fdopendir().
Sockets are supported on all platforms where they are file descriptors,
i.e. all except Windows.
@note: This library will B{not} automatically close the file descriptor
under any circumstance. Nevertheless, a file descriptor can usually only be
rendered once in a media player. To render it a second time, the file
descriptor should probably be rewound to the beginning with lseek().
See L{libvlc_media_release}.
@param p_instance: the instance.
@param fd: open file descriptor.
@return: the newly created media or NULL on error.
@version: LibVLC 1.1.5 and later.
'''
f = _Cfunctions.get('libvlc_media_new_fd', None) or \
_Cfunction('libvlc_media_new_fd', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_int)
return f(p_instance, fd)
def libvlc_media_new_callbacks(instance, open_cb, read_cb, seek_cb, close_cb, opaque):
'''Create a media with custom callbacks to read the data from.
@param instance: LibVLC instance.
@param open_cb: callback to open the custom bitstream input media.
@param read_cb: callback to read data (must not be NULL).
@param seek_cb: callback to seek, or NULL if seeking is not supported.
@param close_cb: callback to close the media, or NULL if unnecessary.
@param opaque: data pointer for the open callback.
@return: the newly created media or NULL on error @note If open_cb is NULL, the opaque pointer will be passed to read_cb, seek_cb and close_cb, and the stream size will be treated as unknown. @note The callbacks may be called asynchronously (from another thread). A single stream instance need not be reentrant. However the open_cb needs to be reentrant if the media is used by multiple player instances. @warning The callbacks may be used until all or any player instances that were supplied the media item are stopped. See L{libvlc_media_release}.
@version: LibVLC 3.0.0 and later.
'''
f = _Cfunctions.get('libvlc_media_new_callbacks', None) or \
_Cfunction('libvlc_media_new_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, MediaOpenCb, MediaReadCb, MediaSeekCb, MediaCloseCb, ctypes.c_void_p)
return f(instance, open_cb, read_cb, seek_cb, close_cb, opaque)
def libvlc_media_new_as_node(p_instance, psz_name):
'''Create a media as an empty node with a given name.
See L{libvlc_media_release}.
@param p_instance: the instance.
@param psz_name: the name of the node.
@return: the new empty media or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_new_as_node', None) or \
_Cfunction('libvlc_media_new_as_node', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_media_add_option(p_md, psz_options):
'''Add an option to the media.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
@note: The options are listed in 'vlc --long-help' from the command line,
e.g. "-sout-all". Keep in mind that available options and their semantics
vary across LibVLC versions and builds.
@warning: Not all options affects L{Media} objects:
Specifically, due to architectural issues most audio and video options,
such as text renderer options, have no effects on an individual media.
These options must be set through L{libvlc_new}() instead.
@param p_md: the media descriptor.
@param psz_options: the options (as a string).
'''
f = _Cfunctions.get('libvlc_media_add_option', None) or \
_Cfunction('libvlc_media_add_option', ((1,), (1,),), None,
None, Media, ctypes.c_char_p)
return f(p_md, psz_options)
def libvlc_media_add_option_flag(p_md, psz_options, i_flags):
'''Add an option to the media with configurable flags.
This option will be used to determine how the media_player will
read the media. This allows to use VLC's advanced
reading/streaming options on a per-media basis.
The options are detailed in vlc --long-help, for instance
"--sout-all". Note that all options are not usable on medias:
specifically, due to architectural issues, video-related options
such as text renderer options cannot be set on a single media. They
must be set on the whole libvlc instance instead.
@param p_md: the media descriptor.
@param psz_options: the options (as a string).
@param i_flags: the flags for this option.
'''
f = _Cfunctions.get('libvlc_media_add_option_flag', None) or \
_Cfunction('libvlc_media_add_option_flag', ((1,), (1,), (1,),), None,
None, Media, ctypes.c_char_p, ctypes.c_uint)
return f(p_md, psz_options, i_flags)
def libvlc_media_retain(p_md):
'''Retain a reference to a media descriptor object (libvlc_media_t). Use
L{libvlc_media_release}() to decrement the reference count of a
media descriptor object.
@param p_md: the media descriptor.
'''
f = _Cfunctions.get('libvlc_media_retain', None) or \
_Cfunction('libvlc_media_retain', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_release(p_md):
'''Decrement the reference count of a media descriptor object. If the
reference count is 0, then L{libvlc_media_release}() will release the
media descriptor object. It will send out an libvlc_MediaFreed event
to all listeners. If the media descriptor object has been released it
should not be used again.
@param p_md: the media descriptor.
'''
f = _Cfunctions.get('libvlc_media_release', None) or \
_Cfunction('libvlc_media_release', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_get_mrl(p_md):
'''Get the media resource locator (mrl) from a media descriptor object.
@param p_md: a media descriptor object.
@return: string with mrl of media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_get_mrl', None) or \
_Cfunction('libvlc_media_get_mrl', ((1,),), string_result,
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_duplicate(p_md):
'''Duplicate a media descriptor object.
@param p_md: a media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_duplicate', None) or \
_Cfunction('libvlc_media_duplicate', ((1,),), class_result(Media),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_get_meta(p_md, e_meta):
'''Read the meta of the media.
If the media has not yet been parsed this will return NULL.
This methods automatically calls L{libvlc_media_parse_async}(), so after calling
it you may receive a libvlc_MediaMetaChanged event. If you prefer a synchronous
version ensure that you call L{libvlc_media_parse}() before get_meta().
See L{libvlc_media_parse}
See L{libvlc_media_parse_async}
See libvlc_MediaMetaChanged.
@param p_md: the media descriptor.
@param e_meta: the meta to read.
@return: the media's meta.
'''
f = _Cfunctions.get('libvlc_media_get_meta', None) or \
_Cfunction('libvlc_media_get_meta', ((1,), (1,),), string_result,
ctypes.c_void_p, Media, Meta)
return f(p_md, e_meta)
def libvlc_media_set_meta(p_md, e_meta, psz_value):
'''Set the meta of the media (this function will not save the meta, call
L{libvlc_media_save_meta} in order to save the meta).
@param p_md: the media descriptor.
@param e_meta: the meta to write.
@param psz_value: the media's meta.
'''
f = _Cfunctions.get('libvlc_media_set_meta', None) or \
_Cfunction('libvlc_media_set_meta', ((1,), (1,), (1,),), None,
None, Media, Meta, ctypes.c_char_p)
return f(p_md, e_meta, psz_value)
def libvlc_media_save_meta(p_md):
'''Save the meta previously set.
@param p_md: the media desriptor.
@return: true if the write operation was successful.
'''
f = _Cfunctions.get('libvlc_media_save_meta', None) or \
_Cfunction('libvlc_media_save_meta', ((1,),), None,
ctypes.c_int, Media)
return f(p_md)
def libvlc_media_get_state(p_md):
'''Get current state of media descriptor object. Possible media states
are defined in libvlc_structures.c ( libvlc_NothingSpecial=0,
libvlc_Opening, libvlc_Buffering, libvlc_Playing, libvlc_Paused,
libvlc_Stopped, libvlc_Ended,
libvlc_Error).
See libvlc_state_t.
@param p_md: a media descriptor object.
@return: state of media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_get_state', None) or \
_Cfunction('libvlc_media_get_state', ((1,),), None,
State, Media)
return f(p_md)
def libvlc_media_get_stats(p_md, p_stats):
'''Get the current statistics about the media.
@param p_md:: media descriptor object.
@param p_stats:: structure that contain the statistics about the media (this structure must be allocated by the caller).
@return: true if the statistics are available, false otherwise \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_get_stats', None) or \
_Cfunction('libvlc_media_get_stats', ((1,), (1,),), None,
ctypes.c_int, Media, ctypes.POINTER(MediaStats))
return f(p_md, p_stats)
def libvlc_media_subitems(p_md):
'''Get subitems of media descriptor object. This will increment
the reference count of supplied media descriptor object. Use
L{libvlc_media_list_release}() to decrement the reference counting.
@param p_md: media descriptor object.
@return: list of media descriptor subitems or NULL.
'''
f = _Cfunctions.get('libvlc_media_subitems', None) or \
_Cfunction('libvlc_media_subitems', ((1,),), class_result(MediaList),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_event_manager(p_md):
'''Get event manager from media descriptor object.
NOTE: this function doesn't increment reference counting.
@param p_md: a media descriptor object.
@return: event manager object.
'''
f = _Cfunctions.get('libvlc_media_event_manager', None) or \
_Cfunction('libvlc_media_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_get_duration(p_md):
'''Get duration (in ms) of media descriptor object item.
@param p_md: media descriptor object.
@return: duration of media item or -1 on error.
'''
f = _Cfunctions.get('libvlc_media_get_duration', None) or \
_Cfunction('libvlc_media_get_duration', ((1,),), None,
ctypes.c_longlong, Media)
return f(p_md)
def libvlc_media_parse(p_md):
'''Parse a media.
This fetches (local) art, meta data and tracks information.
The method is synchronous.
See L{libvlc_media_parse_async}
See L{libvlc_media_get_meta}
See libvlc_media_get_tracks_info.
@param p_md: media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_parse', None) or \
_Cfunction('libvlc_media_parse', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_parse_async(p_md):
'''Parse a media.
This fetches (local) art, meta data and tracks information.
The method is the asynchronous of L{libvlc_media_parse}().
To track when this is over you can listen to libvlc_MediaParsedChanged
event. However if the media was already parsed you will not receive this
event.
See L{libvlc_media_parse}
See libvlc_MediaParsedChanged
See L{libvlc_media_get_meta}
See libvlc_media_get_tracks_info.
@param p_md: media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_parse_async', None) or \
_Cfunction('libvlc_media_parse_async', ((1,),), None,
None, Media)
return f(p_md)
def libvlc_media_parse_with_options(p_md, parse_flag):
'''Parse the media asynchronously with options.
This fetches (local or network) art, meta data and/or tracks information.
This method is the extended version of L{libvlc_media_parse_async}().
To track when this is over you can listen to libvlc_MediaParsedChanged
event. However if this functions returns an error, you will not receive this
event.
It uses a flag to specify parse options (see libvlc_media_parse_flag_t). All
these flags can be combined. By default, media is parsed if it's a local
file.
See libvlc_MediaParsedChanged
See L{libvlc_media_get_meta}
See L{libvlc_media_tracks_get}
See libvlc_media_parse_flag_t.
@param p_md: media descriptor object.
@param parse_flag: parse options:
@return: -1 in case of error, 0 otherwise.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_parse_with_options', None) or \
_Cfunction('libvlc_media_parse_with_options', ((1,), (1,),), None,
ctypes.c_int, Media, MediaParseFlag)
return f(p_md, parse_flag)
def libvlc_media_is_parsed(p_md):
'''Get Parsed status for media descriptor object.
See libvlc_MediaParsedChanged.
@param p_md: media descriptor object.
@return: true if media object has been parsed otherwise it returns false \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_is_parsed', None) or \
_Cfunction('libvlc_media_is_parsed', ((1,),), None,
ctypes.c_int, Media)
return f(p_md)
def libvlc_media_set_user_data(p_md, p_new_user_data):
'''Sets media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
@param p_new_user_data: pointer to user data.
'''
f = _Cfunctions.get('libvlc_media_set_user_data', None) or \
_Cfunction('libvlc_media_set_user_data', ((1,), (1,),), None,
None, Media, ctypes.c_void_p)
return f(p_md, p_new_user_data)
def libvlc_media_get_user_data(p_md):
'''Get media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_get_user_data', None) or \
_Cfunction('libvlc_media_get_user_data', ((1,),), None,
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_tracks_get(p_md, tracks):
'''Get media descriptor's elementary streams description
Note, you need to call L{libvlc_media_parse}() or play the media at least once
before calling this function.
Not doing this will result in an empty array.
@param p_md: media descriptor object.
@param tracks: address to store an allocated array of Elementary Streams descriptions (must be freed with L{libvlc_media_tracks_release}.
@return: the number of Elementary Streams (zero on error).
@version: LibVLC 2.1.0 and later.
'''
f = _Cfunctions.get('libvlc_media_tracks_get', None) or \
_Cfunction('libvlc_media_tracks_get', ((1,), (1,),), None,
ctypes.c_uint, Media, ctypes.POINTER(ctypes.POINTER(MediaTrack)))
return f(p_md, tracks)
def libvlc_media_get_codec_description(i_type, i_codec):
'''Get codec description from media elementary stream.
@param i_type: i_type from L{MediaTrack}.
@param i_codec: i_codec or i_original_fourcc from L{MediaTrack}.
@return: codec description.
@version: LibVLC 3.0.0 and later. See L{MediaTrack}.
'''
f = _Cfunctions.get('libvlc_media_get_codec_description', None) or \
_Cfunction('libvlc_media_get_codec_description', ((1,), (1,),), None,
ctypes.c_char_p, TrackType, ctypes.c_uint32)
return f(i_type, i_codec)
def libvlc_media_tracks_release(p_tracks, i_count):
'''Release media descriptor's elementary streams description array.
@param p_tracks: tracks info array to release.
@param i_count: number of elements in the array.
@version: LibVLC 2.1.0 and later.
'''
f = _Cfunctions.get('libvlc_media_tracks_release', None) or \
_Cfunction('libvlc_media_tracks_release', ((1,), (1,),), None,
None, ctypes.POINTER(MediaTrack), ctypes.c_uint)
return f(p_tracks, i_count)
def libvlc_media_get_type(p_md):
'''Get the media type of the media descriptor object.
@param p_md: media descriptor object.
@return: media type.
@version: LibVLC 3.0.0 and later. See libvlc_media_type_t.
'''
f = _Cfunctions.get('libvlc_media_get_type', None) or \
_Cfunction('libvlc_media_get_type', ((1,),), None,
MediaType, Media)
return f(p_md)
def libvlc_media_discoverer_new(p_inst, psz_name):
'''Create a media discoverer object by name.
After this object is created, you should attach to events in order to be
notified of the discoverer state.
You should also attach to media_list events in order to be notified of new
items discovered.
You need to call L{libvlc_media_discoverer_start}() in order to start the
discovery.
See L{libvlc_media_discoverer_media_list}
See L{libvlc_media_discoverer_event_manager}
See L{libvlc_media_discoverer_start}.
@param p_inst: libvlc instance.
@param psz_name: service name.
@return: media discover object or NULL in case of error.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_discoverer_new', None) or \
_Cfunction('libvlc_media_discoverer_new', ((1,), (1,),), class_result(MediaDiscoverer),
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_inst, psz_name)
def libvlc_media_discoverer_start(p_mdis):
'''Start media discovery.
To stop it, call L{libvlc_media_discoverer_stop}() or
L{libvlc_media_discoverer_release}() directly.
See L{libvlc_media_discoverer_stop}.
@param p_mdis: media discover object.
@return: -1 in case of error, 0 otherwise.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_discoverer_start', None) or \
_Cfunction('libvlc_media_discoverer_start', ((1,),), None,
ctypes.c_int, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_stop(p_mdis):
'''Stop media discovery.
See L{libvlc_media_discoverer_start}.
@param p_mdis: media discover object.
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_discoverer_stop', None) or \
_Cfunction('libvlc_media_discoverer_stop', ((1,),), None,
None, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_release(p_mdis):
'''Release media discover object. If the reference count reaches 0, then
the object will be released.
@param p_mdis: media service discover object.
'''
f = _Cfunctions.get('libvlc_media_discoverer_release', None) or \
_Cfunction('libvlc_media_discoverer_release', ((1,),), None,
None, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_localized_name(p_mdis):
'''Get media service discover object its localized name.
@param p_mdis: media discover object.
@return: localized name.
'''
f = _Cfunctions.get('libvlc_media_discoverer_localized_name', None) or \
_Cfunction('libvlc_media_discoverer_localized_name', ((1,),), string_result,
ctypes.c_void_p, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_media_list(p_mdis):
'''Get media service discover media list.
@param p_mdis: media service discover object.
@return: list of media items.
'''
f = _Cfunctions.get('libvlc_media_discoverer_media_list', None) or \
_Cfunction('libvlc_media_discoverer_media_list', ((1,),), class_result(MediaList),
ctypes.c_void_p, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_event_manager(p_mdis):
'''Get event manager from media service discover object.
@param p_mdis: media service discover object.
@return: event manager object.
'''
f = _Cfunctions.get('libvlc_media_discoverer_event_manager', None) or \
_Cfunction('libvlc_media_discoverer_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_discoverer_is_running(p_mdis):
'''Query if media service discover object is running.
@param p_mdis: media service discover object.
@return: true if running, false if not \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_discoverer_is_running', None) or \
_Cfunction('libvlc_media_discoverer_is_running', ((1,),), None,
ctypes.c_int, MediaDiscoverer)
return f(p_mdis)
def libvlc_media_library_new(p_instance):
'''Create an new Media Library object.
@param p_instance: the libvlc instance.
@return: a new object or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_library_new', None) or \
_Cfunction('libvlc_media_library_new', ((1,),), class_result(MediaLibrary),
ctypes.c_void_p, Instance)
return f(p_instance)
def libvlc_media_library_release(p_mlib):
'''Release media library object. This functions decrements the
reference count of the media library object. If it reaches 0,
then the object will be released.
@param p_mlib: media library object.
'''
f = _Cfunctions.get('libvlc_media_library_release', None) or \
_Cfunction('libvlc_media_library_release', ((1,),), None,
None, MediaLibrary)
return f(p_mlib)
def libvlc_media_library_retain(p_mlib):
'''Retain a reference to a media library object. This function will
increment the reference counting for this object. Use
L{libvlc_media_library_release}() to decrement the reference count.
@param p_mlib: media library object.
'''
f = _Cfunctions.get('libvlc_media_library_retain', None) or \
_Cfunction('libvlc_media_library_retain', ((1,),), None,
None, MediaLibrary)
return f(p_mlib)
def libvlc_media_library_load(p_mlib):
'''Load media library.
@param p_mlib: media library object.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_media_library_load', None) or \
_Cfunction('libvlc_media_library_load', ((1,),), None,
ctypes.c_int, MediaLibrary)
return f(p_mlib)
def libvlc_media_library_media_list(p_mlib):
'''Get media library subitems.
@param p_mlib: media library object.
@return: media list subitems.
'''
f = _Cfunctions.get('libvlc_media_library_media_list', None) or \
_Cfunction('libvlc_media_library_media_list', ((1,),), class_result(MediaList),
ctypes.c_void_p, MediaLibrary)
return f(p_mlib)
def libvlc_media_list_new(p_instance):
'''Create an empty media list.
@param p_instance: libvlc instance.
@return: empty media list, or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_list_new', None) or \
_Cfunction('libvlc_media_list_new', ((1,),), class_result(MediaList),
ctypes.c_void_p, Instance)
return f(p_instance)
def libvlc_media_list_release(p_ml):
'''Release media list created with L{libvlc_media_list_new}().
@param p_ml: a media list created with L{libvlc_media_list_new}().
'''
f = _Cfunctions.get('libvlc_media_list_release', None) or \
_Cfunction('libvlc_media_list_release', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_retain(p_ml):
'''Retain reference to a media list.
@param p_ml: a media list created with L{libvlc_media_list_new}().
'''
f = _Cfunctions.get('libvlc_media_list_retain', None) or \
_Cfunction('libvlc_media_list_retain', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_set_media(p_ml, p_md):
'''Associate media instance with this media list instance.
If another media instance was present it will be released.
The L{libvlc_media_list_lock} should NOT be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: media instance to add.
'''
f = _Cfunctions.get('libvlc_media_list_set_media', None) or \
_Cfunction('libvlc_media_list_set_media', ((1,), (1,),), None,
None, MediaList, Media)
return f(p_ml, p_md)
def libvlc_media_list_media(p_ml):
'''Get media instance from this media list instance. This action will increase
the refcount on the media instance.
The L{libvlc_media_list_lock} should NOT be held upon entering this function.
@param p_ml: a media list instance.
@return: media instance.
'''
f = _Cfunctions.get('libvlc_media_list_media', None) or \
_Cfunction('libvlc_media_list_media', ((1,),), class_result(Media),
ctypes.c_void_p, MediaList)
return f(p_ml)
def libvlc_media_list_add_media(p_ml, p_md):
'''Add media instance to media list
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: a media instance.
@return: 0 on success, -1 if the media list is read-only.
'''
f = _Cfunctions.get('libvlc_media_list_add_media', None) or \
_Cfunction('libvlc_media_list_add_media', ((1,), (1,),), None,
ctypes.c_int, MediaList, Media)
return f(p_ml, p_md)
def libvlc_media_list_insert_media(p_ml, p_md, i_pos):
'''Insert media instance in media list on a position
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: a media instance.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the media list is read-only.
'''
f = _Cfunctions.get('libvlc_media_list_insert_media', None) or \
_Cfunction('libvlc_media_list_insert_media', ((1,), (1,), (1,),), None,
ctypes.c_int, MediaList, Media, ctypes.c_int)
return f(p_ml, p_md, i_pos)
def libvlc_media_list_remove_index(p_ml, i_pos):
'''Remove media instance from media list on a position
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param i_pos: position in array where to insert.
@return: 0 on success, -1 if the list is read-only or the item was not found.
'''
f = _Cfunctions.get('libvlc_media_list_remove_index', None) or \
_Cfunction('libvlc_media_list_remove_index', ((1,), (1,),), None,
ctypes.c_int, MediaList, ctypes.c_int)
return f(p_ml, i_pos)
def libvlc_media_list_count(p_ml):
'''Get count on media list items
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@return: number of items in media list.
'''
f = _Cfunctions.get('libvlc_media_list_count', None) or \
_Cfunction('libvlc_media_list_count', ((1,),), None,
ctypes.c_int, MediaList)
return f(p_ml)
def libvlc_media_list_item_at_index(p_ml, i_pos):
'''List media instance in media list at a position
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param i_pos: position in array where to insert.
@return: media instance at position i_pos, or NULL if not found. In case of success, L{libvlc_media_retain}() is called to increase the refcount on the media.
'''
f = _Cfunctions.get('libvlc_media_list_item_at_index', None) or \
_Cfunction('libvlc_media_list_item_at_index', ((1,), (1,),), class_result(Media),
ctypes.c_void_p, MediaList, ctypes.c_int)
return f(p_ml, i_pos)
def libvlc_media_list_index_of_item(p_ml, p_md):
'''Find index position of List media instance in media list.
Warning: the function will return the first matched position.
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
@param p_md: media instance.
@return: position of media instance or -1 if media not found.
'''
f = _Cfunctions.get('libvlc_media_list_index_of_item', None) or \
_Cfunction('libvlc_media_list_index_of_item', ((1,), (1,),), None,
ctypes.c_int, MediaList, Media)
return f(p_ml, p_md)
def libvlc_media_list_is_readonly(p_ml):
'''This indicates if this media list is read-only from a user point of view.
@param p_ml: media list instance.
@return: 1 on readonly, 0 on readwrite \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_list_is_readonly', None) or \
_Cfunction('libvlc_media_list_is_readonly', ((1,),), None,
ctypes.c_int, MediaList)
return f(p_ml)
def libvlc_media_list_lock(p_ml):
'''Get lock on media list items.
@param p_ml: a media list instance.
'''
f = _Cfunctions.get('libvlc_media_list_lock', None) or \
_Cfunction('libvlc_media_list_lock', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_unlock(p_ml):
'''Release lock on media list items
The L{libvlc_media_list_lock} should be held upon entering this function.
@param p_ml: a media list instance.
'''
f = _Cfunctions.get('libvlc_media_list_unlock', None) or \
_Cfunction('libvlc_media_list_unlock', ((1,),), None,
None, MediaList)
return f(p_ml)
def libvlc_media_list_event_manager(p_ml):
'''Get libvlc_event_manager from this media list instance.
The p_event_manager is immutable, so you don't have to hold the lock.
@param p_ml: a media list instance.
@return: libvlc_event_manager.
'''
f = _Cfunctions.get('libvlc_media_list_event_manager', None) or \
_Cfunction('libvlc_media_list_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaList)
return f(p_ml)
def libvlc_media_list_player_new(p_instance):
'''Create new media_list_player.
@param p_instance: libvlc instance.
@return: media list player instance or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_list_player_new', None) or \
_Cfunction('libvlc_media_list_player_new', ((1,),), class_result(MediaListPlayer),
ctypes.c_void_p, Instance)
return f(p_instance)
def libvlc_media_list_player_release(p_mlp):
'''Release a media_list_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{libvlc_media_list_player_release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_release', None) or \
_Cfunction('libvlc_media_list_player_release', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_retain(p_mlp):
'''Retain a reference to a media player list object. Use
L{libvlc_media_list_player_release}() to decrement reference count.
@param p_mlp: media player list object.
'''
f = _Cfunctions.get('libvlc_media_list_player_retain', None) or \
_Cfunction('libvlc_media_list_player_retain', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_event_manager(p_mlp):
'''Return the event manager of this media_list_player.
@param p_mlp: media list player instance.
@return: the event manager.
'''
f = _Cfunctions.get('libvlc_media_list_player_event_manager', None) or \
_Cfunction('libvlc_media_list_player_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_set_media_player(p_mlp, p_mi):
'''Replace media player in media_list_player with this instance.
@param p_mlp: media list player instance.
@param p_mi: media player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_set_media_player', None) or \
_Cfunction('libvlc_media_list_player_set_media_player', ((1,), (1,),), None,
None, MediaListPlayer, MediaPlayer)
return f(p_mlp, p_mi)
def libvlc_media_list_player_set_media_list(p_mlp, p_mlist):
'''Set the media list associated with the player.
@param p_mlp: media list player instance.
@param p_mlist: list of media.
'''
f = _Cfunctions.get('libvlc_media_list_player_set_media_list', None) or \
_Cfunction('libvlc_media_list_player_set_media_list', ((1,), (1,),), None,
None, MediaListPlayer, MediaList)
return f(p_mlp, p_mlist)
def libvlc_media_list_player_play(p_mlp):
'''Play media list.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_play', None) or \
_Cfunction('libvlc_media_list_player_play', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_pause(p_mlp):
'''Toggle pause (or resume) media list.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_pause', None) or \
_Cfunction('libvlc_media_list_player_pause', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_is_playing(p_mlp):
'''Is media list playing?
@param p_mlp: media list player instance.
@return: true for playing and false for not playing \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_list_player_is_playing', None) or \
_Cfunction('libvlc_media_list_player_is_playing', ((1,),), None,
ctypes.c_int, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_get_state(p_mlp):
'''Get current libvlc_state of media list player.
@param p_mlp: media list player instance.
@return: libvlc_state_t for media list player.
'''
f = _Cfunctions.get('libvlc_media_list_player_get_state', None) or \
_Cfunction('libvlc_media_list_player_get_state', ((1,),), None,
State, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_play_item_at_index(p_mlp, i_index):
'''Play media list item at position index.
@param p_mlp: media list player instance.
@param i_index: index in media list to play.
@return: 0 upon success -1 if the item wasn't found.
'''
f = _Cfunctions.get('libvlc_media_list_player_play_item_at_index', None) or \
_Cfunction('libvlc_media_list_player_play_item_at_index', ((1,), (1,),), None,
ctypes.c_int, MediaListPlayer, ctypes.c_int)
return f(p_mlp, i_index)
def libvlc_media_list_player_play_item(p_mlp, p_md):
'''Play the given media item.
@param p_mlp: media list player instance.
@param p_md: the media instance.
@return: 0 upon success, -1 if the media is not part of the media list.
'''
f = _Cfunctions.get('libvlc_media_list_player_play_item', None) or \
_Cfunction('libvlc_media_list_player_play_item', ((1,), (1,),), None,
ctypes.c_int, MediaListPlayer, Media)
return f(p_mlp, p_md)
def libvlc_media_list_player_stop(p_mlp):
'''Stop playing media list.
@param p_mlp: media list player instance.
'''
f = _Cfunctions.get('libvlc_media_list_player_stop', None) or \
_Cfunction('libvlc_media_list_player_stop', ((1,),), None,
None, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_next(p_mlp):
'''Play next item from media list.
@param p_mlp: media list player instance.
@return: 0 upon success -1 if there is no next item.
'''
f = _Cfunctions.get('libvlc_media_list_player_next', None) or \
_Cfunction('libvlc_media_list_player_next', ((1,),), None,
ctypes.c_int, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_previous(p_mlp):
'''Play previous item from media list.
@param p_mlp: media list player instance.
@return: 0 upon success -1 if there is no previous item.
'''
f = _Cfunctions.get('libvlc_media_list_player_previous', None) or \
_Cfunction('libvlc_media_list_player_previous', ((1,),), None,
ctypes.c_int, MediaListPlayer)
return f(p_mlp)
def libvlc_media_list_player_set_playback_mode(p_mlp, e_mode):
'''Sets the playback mode for the playlist.
@param p_mlp: media list player instance.
@param e_mode: playback mode specification.
'''
f = _Cfunctions.get('libvlc_media_list_player_set_playback_mode', None) or \
_Cfunction('libvlc_media_list_player_set_playback_mode', ((1,), (1,),), None,
None, MediaListPlayer, PlaybackMode)
return f(p_mlp, e_mode)
def libvlc_media_player_new(p_libvlc_instance):
'''Create an empty Media Player object.
@param p_libvlc_instance: the libvlc instance in which the Media Player should be created.
@return: a new media player object, or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_player_new', None) or \
_Cfunction('libvlc_media_player_new', ((1,),), class_result(MediaPlayer),
ctypes.c_void_p, Instance)
return f(p_libvlc_instance)
def libvlc_media_player_new_from_media(p_md):
'''Create a Media Player object from a Media.
@param p_md: the media. Afterwards the p_md can be safely destroyed.
@return: a new media player object, or NULL on error.
'''
f = _Cfunctions.get('libvlc_media_player_new_from_media', None) or \
_Cfunction('libvlc_media_player_new_from_media', ((1,),), class_result(MediaPlayer),
ctypes.c_void_p, Media)
return f(p_md)
def libvlc_media_player_release(p_mi):
'''Release a media_player after use
Decrement the reference count of a media player object. If the
reference count is 0, then L{libvlc_media_player_release}() will
release the media player object. If the media player object
has been released, then it should not be used again.
@param p_mi: the Media Player to free.
'''
f = _Cfunctions.get('libvlc_media_player_release', None) or \
_Cfunction('libvlc_media_player_release', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_retain(p_mi):
'''Retain a reference to a media player object. Use
L{libvlc_media_player_release}() to decrement reference count.
@param p_mi: media player object.
'''
f = _Cfunctions.get('libvlc_media_player_retain', None) or \
_Cfunction('libvlc_media_player_retain', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_media(p_mi, p_md):
'''Set the media that will be used by the media_player. If any,
previous md will be released.
@param p_mi: the Media Player.
@param p_md: the Media. Afterwards the p_md can be safely destroyed.
'''
f = _Cfunctions.get('libvlc_media_player_set_media', None) or \
_Cfunction('libvlc_media_player_set_media', ((1,), (1,),), None,
None, MediaPlayer, Media)
return f(p_mi, p_md)
def libvlc_media_player_get_media(p_mi):
'''Get the media used by the media_player.
@param p_mi: the Media Player.
@return: the media associated with p_mi, or NULL if no media is associated.
'''
f = _Cfunctions.get('libvlc_media_player_get_media', None) or \
_Cfunction('libvlc_media_player_get_media', ((1,),), class_result(Media),
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_media_player_event_manager(p_mi):
'''Get the Event Manager from which the media player send event.
@param p_mi: the Media Player.
@return: the event manager associated with p_mi.
'''
f = _Cfunctions.get('libvlc_media_player_event_manager', None) or \
_Cfunction('libvlc_media_player_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_media_player_is_playing(p_mi):
'''is_playing.
@param p_mi: the Media Player.
@return: 1 if the media player is playing, 0 otherwise \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_is_playing', None) or \
_Cfunction('libvlc_media_player_is_playing', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_play(p_mi):
'''Play.
@param p_mi: the Media Player.
@return: 0 if playback started (and was already started), or -1 on error.
'''
f = _Cfunctions.get('libvlc_media_player_play', None) or \
_Cfunction('libvlc_media_player_play', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_pause(mp, do_pause):
'''Pause or resume (no effect if there is no media).
@param mp: the Media Player.
@param do_pause: play/resume if zero, pause if non-zero.
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_media_player_set_pause', None) or \
_Cfunction('libvlc_media_player_set_pause', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(mp, do_pause)
def libvlc_media_player_pause(p_mi):
'''Toggle pause (no effect if there is no media).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_pause', None) or \
_Cfunction('libvlc_media_player_pause', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_stop(p_mi):
'''Stop (no effect if there is no media).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_stop', None) or \
_Cfunction('libvlc_media_player_stop', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_video_set_callbacks(mp, lock, unlock, display, opaque):
'''Set callbacks and private data to render decoded video to a custom area
in memory.
Use L{libvlc_video_set_format}() or L{libvlc_video_set_format_callbacks}()
to configure the decoded format.
@param mp: the media player.
@param lock: callback to lock video memory (must not be NULL).
@param unlock: callback to unlock video memory (or NULL if not needed).
@param display: callback to display video (or NULL if not needed).
@param opaque: private pointer for the three callbacks (as first parameter).
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_video_set_callbacks', None) or \
_Cfunction('libvlc_video_set_callbacks', ((1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, VideoLockCb, VideoUnlockCb, VideoDisplayCb, ctypes.c_void_p)
return f(mp, lock, unlock, display, opaque)
def libvlc_video_set_format(mp, chroma, width, height, pitch):
'''Set decoded video chroma and dimensions.
This only works in combination with L{libvlc_video_set_callbacks}(),
and is mutually exclusive with L{libvlc_video_set_format_callbacks}().
@param mp: the media player.
@param chroma: a four-characters string identifying the chroma (e.g. "RV32" or "YUYV").
@param width: pixel width.
@param height: pixel height.
@param pitch: line pitch (in bytes).
@version: LibVLC 1.1.1 or later.
@bug: All pixel planes are expected to have the same pitch. To use the YCbCr color space with chrominance subsampling, consider using L{libvlc_video_set_format_callbacks}() instead.
'''
f = _Cfunctions.get('libvlc_video_set_format', None) or \
_Cfunction('libvlc_video_set_format', ((1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint, ctypes.c_uint)
return f(mp, chroma, width, height, pitch)
def libvlc_video_set_format_callbacks(mp, setup, cleanup):
'''Set decoded video chroma and dimensions. This only works in combination with
L{libvlc_video_set_callbacks}().
@param mp: the media player.
@param setup: callback to select the video format (cannot be NULL).
@param cleanup: callback to release any allocated resources (or NULL).
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_video_set_format_callbacks', None) or \
_Cfunction('libvlc_video_set_format_callbacks', ((1,), (1,), (1,),), None,
None, MediaPlayer, VideoFormatCb, VideoCleanupCb)
return f(mp, setup, cleanup)
def libvlc_media_player_set_nsobject(p_mi, drawable):
'''Set the NSView handler where the media player should render its video output.
Use the vout called "macosx".
The drawable is an NSObject that follow the VLCOpenGLVideoViewEmbedding
protocol:
@begincode
\@protocol VLCOpenGLVideoViewEmbedding <NSObject>
- (void)addVoutSubview:(NSView *)view;
- (void)removeVoutSubview:(NSView *)view;
\@end
@endcode
Or it can be an NSView object.
If you want to use it along with Qt4 see the QMacCocoaViewContainer. Then
the following code should work:
@begincode
NSView *video = [[NSView alloc] init];
QMacCocoaViewContainer *container = new QMacCocoaViewContainer(video, parent);
L{libvlc_media_player_set_nsobject}(mp, video);
[video release];
@endcode
You can find a live example in VLCVideoView in VLCKit.framework.
@param p_mi: the Media Player.
@param drawable: the drawable that is either an NSView or an object following the VLCOpenGLVideoViewEmbedding protocol.
'''
f = _Cfunctions.get('libvlc_media_player_set_nsobject', None) or \
_Cfunction('libvlc_media_player_set_nsobject', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_void_p)
return f(p_mi, drawable)
def libvlc_media_player_get_nsobject(p_mi):
'''Get the NSView handler previously set with L{libvlc_media_player_set_nsobject}().
@param p_mi: the Media Player.
@return: the NSView handler or 0 if none where set.
'''
f = _Cfunctions.get('libvlc_media_player_get_nsobject', None) or \
_Cfunction('libvlc_media_player_get_nsobject', ((1,),), None,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_agl(p_mi, drawable):
'''Set the agl handler where the media player should render its video output.
@param p_mi: the Media Player.
@param drawable: the agl handler.
'''
f = _Cfunctions.get('libvlc_media_player_set_agl', None) or \
_Cfunction('libvlc_media_player_set_agl', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint32)
return f(p_mi, drawable)
def libvlc_media_player_get_agl(p_mi):
'''Get the agl handler previously set with L{libvlc_media_player_set_agl}().
@param p_mi: the Media Player.
@return: the agl handler or 0 if none where set.
'''
f = _Cfunctions.get('libvlc_media_player_get_agl', None) or \
_Cfunction('libvlc_media_player_get_agl', ((1,),), None,
ctypes.c_uint32, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_xwindow(p_mi, drawable):
'''Set an X Window System drawable where the media player should render its
video output. If LibVLC was built without X11 output support, then this has
no effects.
The specified identifier must correspond to an existing Input/Output class
X11 window. Pixmaps are B{not} supported. The caller shall ensure that
the X11 server is the same as the one the VLC instance has been configured
with. This function must be called before video playback is started;
otherwise it will only take effect after playback stop and restart.
@param p_mi: the Media Player.
@param drawable: the ID of the X window.
'''
f = _Cfunctions.get('libvlc_media_player_set_xwindow', None) or \
_Cfunction('libvlc_media_player_set_xwindow', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint32)
return f(p_mi, drawable)
def libvlc_media_player_get_xwindow(p_mi):
'''Get the X Window System window identifier previously set with
L{libvlc_media_player_set_xwindow}(). Note that this will return the identifier
even if VLC is not currently using it (for instance if it is playing an
audio-only input).
@param p_mi: the Media Player.
@return: an X window ID, or 0 if none where set.
'''
f = _Cfunctions.get('libvlc_media_player_get_xwindow', None) or \
_Cfunction('libvlc_media_player_get_xwindow', ((1,),), None,
ctypes.c_uint32, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_hwnd(p_mi, drawable):
'''Set a Win32/Win64 API window handle (HWND) where the media player should
render its video output. If LibVLC was built without Win32/Win64 API output
support, then this has no effects.
@param p_mi: the Media Player.
@param drawable: windows handle of the drawable.
'''
f = _Cfunctions.get('libvlc_media_player_set_hwnd', None) or \
_Cfunction('libvlc_media_player_set_hwnd', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_void_p)
return f(p_mi, drawable)
def libvlc_media_player_get_hwnd(p_mi):
'''Get the Windows API window handle (HWND) previously set with
L{libvlc_media_player_set_hwnd}(). The handle will be returned even if LibVLC
is not currently outputting any video to it.
@param p_mi: the Media Player.
@return: a window handle or NULL if there are none.
'''
f = _Cfunctions.get('libvlc_media_player_get_hwnd', None) or \
_Cfunction('libvlc_media_player_get_hwnd', ((1,),), None,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_callbacks(mp, play, pause, resume, flush, drain, opaque):
'''Set callbacks and private data for decoded audio.
Use L{libvlc_audio_set_format}() or L{libvlc_audio_set_format_callbacks}()
to configure the decoded audio format.
@param mp: the media player.
@param play: callback to play audio samples (must not be NULL).
@param pause: callback to pause playback (or NULL to ignore).
@param resume: callback to resume playback (or NULL to ignore).
@param flush: callback to flush audio buffers (or NULL to ignore).
@param drain: callback to drain audio buffers (or NULL to ignore).
@param opaque: private pointer for the audio callbacks (as first parameter).
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_callbacks', None) or \
_Cfunction('libvlc_audio_set_callbacks', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
None, MediaPlayer, AudioPlayCb, AudioPauseCb, AudioResumeCb, AudioFlushCb, AudioDrainCb, ctypes.c_void_p)
return f(mp, play, pause, resume, flush, drain, opaque)
def libvlc_audio_set_volume_callback(mp, set_volume):
'''Set callbacks and private data for decoded audio. This only works in
combination with L{libvlc_audio_set_callbacks}().
Use L{libvlc_audio_set_format}() or L{libvlc_audio_set_format_callbacks}()
to configure the decoded audio format.
@param mp: the media player.
@param set_volume: callback to apply audio volume, or NULL to apply volume in software.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_volume_callback', None) or \
_Cfunction('libvlc_audio_set_volume_callback', ((1,), (1,),), None,
None, MediaPlayer, AudioSetVolumeCb)
return f(mp, set_volume)
def libvlc_audio_set_format_callbacks(mp, setup, cleanup):
'''Set decoded audio format. This only works in combination with
L{libvlc_audio_set_callbacks}().
@param mp: the media player.
@param setup: callback to select the audio format (cannot be NULL).
@param cleanup: callback to release any allocated resources (or NULL).
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_format_callbacks', None) or \
_Cfunction('libvlc_audio_set_format_callbacks', ((1,), (1,), (1,),), None,
None, MediaPlayer, AudioSetupCb, AudioCleanupCb)
return f(mp, setup, cleanup)
def libvlc_audio_set_format(mp, format, rate, channels):
'''Set decoded audio format.
This only works in combination with L{libvlc_audio_set_callbacks}(),
and is mutually exclusive with L{libvlc_audio_set_format_callbacks}().
@param mp: the media player.
@param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32").
@param rate: sample rate (expressed in Hz).
@param channels: channels count.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_format', None) or \
_Cfunction('libvlc_audio_set_format', ((1,), (1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint)
return f(mp, format, rate, channels)
def libvlc_media_player_get_length(p_mi):
'''Get the current movie length (in ms).
@param p_mi: the Media Player.
@return: the movie length (in ms), or -1 if there is no media.
'''
f = _Cfunctions.get('libvlc_media_player_get_length', None) or \
_Cfunction('libvlc_media_player_get_length', ((1,),), None,
ctypes.c_longlong, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_time(p_mi):
'''Get the current movie time (in ms).
@param p_mi: the Media Player.
@return: the movie time (in ms), or -1 if there is no media.
'''
f = _Cfunctions.get('libvlc_media_player_get_time', None) or \
_Cfunction('libvlc_media_player_get_time', ((1,),), None,
ctypes.c_longlong, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_time(p_mi, i_time):
'''Set the movie time (in ms). This has no effect if no media is being played.
Not all formats and protocols support this.
@param p_mi: the Media Player.
@param i_time: the movie time (in ms).
'''
f = _Cfunctions.get('libvlc_media_player_set_time', None) or \
_Cfunction('libvlc_media_player_set_time', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_longlong)
return f(p_mi, i_time)
def libvlc_media_player_get_position(p_mi):
'''Get movie position as percentage between 0.0 and 1.0.
@param p_mi: the Media Player.
@return: movie position, or -1. in case of error.
'''
f = _Cfunctions.get('libvlc_media_player_get_position', None) or \
_Cfunction('libvlc_media_player_get_position', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_position(p_mi, f_pos):
'''Set movie position as percentage between 0.0 and 1.0.
This has no effect if playback is not enabled.
This might not work depending on the underlying input format and protocol.
@param p_mi: the Media Player.
@param f_pos: the position.
'''
f = _Cfunctions.get('libvlc_media_player_set_position', None) or \
_Cfunction('libvlc_media_player_set_position', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_float)
return f(p_mi, f_pos)
def libvlc_media_player_set_chapter(p_mi, i_chapter):
'''Set movie chapter (if applicable).
@param p_mi: the Media Player.
@param i_chapter: chapter number to play.
'''
f = _Cfunctions.get('libvlc_media_player_set_chapter', None) or \
_Cfunction('libvlc_media_player_set_chapter', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, i_chapter)
def libvlc_media_player_get_chapter(p_mi):
'''Get movie chapter.
@param p_mi: the Media Player.
@return: chapter number currently playing, or -1 if there is no media.
'''
f = _Cfunctions.get('libvlc_media_player_get_chapter', None) or \
_Cfunction('libvlc_media_player_get_chapter', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_chapter_count(p_mi):
'''Get movie chapter count.
@param p_mi: the Media Player.
@return: number of chapters in movie, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_chapter_count', None) or \
_Cfunction('libvlc_media_player_get_chapter_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_will_play(p_mi):
'''Is the player able to play.
@param p_mi: the Media Player.
@return: boolean \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_will_play', None) or \
_Cfunction('libvlc_media_player_will_play', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_chapter_count_for_title(p_mi, i_title):
'''Get title chapter count.
@param p_mi: the Media Player.
@param i_title: title.
@return: number of chapters in title, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_chapter_count_for_title', None) or \
_Cfunction('libvlc_media_player_get_chapter_count_for_title', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_title)
def libvlc_media_player_set_title(p_mi, i_title):
'''Set movie title.
@param p_mi: the Media Player.
@param i_title: title number to play.
'''
f = _Cfunctions.get('libvlc_media_player_set_title', None) or \
_Cfunction('libvlc_media_player_set_title', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, i_title)
def libvlc_media_player_get_title(p_mi):
'''Get movie title.
@param p_mi: the Media Player.
@return: title number currently playing, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_title', None) or \
_Cfunction('libvlc_media_player_get_title', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_title_count(p_mi):
'''Get movie title count.
@param p_mi: the Media Player.
@return: title number count, or -1.
'''
f = _Cfunctions.get('libvlc_media_player_get_title_count', None) or \
_Cfunction('libvlc_media_player_get_title_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_previous_chapter(p_mi):
'''Set previous chapter (if applicable).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_previous_chapter', None) or \
_Cfunction('libvlc_media_player_previous_chapter', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_next_chapter(p_mi):
'''Set next chapter (if applicable).
@param p_mi: the Media Player.
'''
f = _Cfunctions.get('libvlc_media_player_next_chapter', None) or \
_Cfunction('libvlc_media_player_next_chapter', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_rate(p_mi):
'''Get the requested movie play rate.
@warning: Depending on the underlying media, the requested rate may be
different from the real playback rate.
@param p_mi: the Media Player.
@return: movie play rate.
'''
f = _Cfunctions.get('libvlc_media_player_get_rate', None) or \
_Cfunction('libvlc_media_player_get_rate', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_media_player_set_rate(p_mi, rate):
'''Set movie play rate.
@param p_mi: the Media Player.
@param rate: movie play rate to set.
@return: -1 if an error was detected, 0 otherwise (but even then, it might not actually work depending on the underlying media protocol).
'''
f = _Cfunctions.get('libvlc_media_player_set_rate', None) or \
_Cfunction('libvlc_media_player_set_rate', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_float)
return f(p_mi, rate)
def libvlc_media_player_get_state(p_mi):
'''Get current movie state.
@param p_mi: the Media Player.
@return: the current state of the media player (playing, paused, ...) See libvlc_state_t.
'''
f = _Cfunctions.get('libvlc_media_player_get_state', None) or \
_Cfunction('libvlc_media_player_get_state', ((1,),), None,
State, MediaPlayer)
return f(p_mi)
def libvlc_media_player_get_fps(p_mi):
'''Get movie fps rate.
@param p_mi: the Media Player.
@return: frames per second (fps) for this playing movie, or 0 if unspecified.
'''
f = _Cfunctions.get('libvlc_media_player_get_fps', None) or \
_Cfunction('libvlc_media_player_get_fps', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_media_player_has_vout(p_mi):
'''How many video outputs does this media player have?
@param p_mi: the media player.
@return: the number of video outputs.
'''
f = _Cfunctions.get('libvlc_media_player_has_vout', None) or \
_Cfunction('libvlc_media_player_has_vout', ((1,),), None,
ctypes.c_uint, MediaPlayer)
return f(p_mi)
def libvlc_media_player_is_seekable(p_mi):
'''Is this media player seekable?
@param p_mi: the media player.
@return: true if the media player can seek \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_is_seekable', None) or \
_Cfunction('libvlc_media_player_is_seekable', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_can_pause(p_mi):
'''Can this media player be paused?
@param p_mi: the media player.
@return: true if the media player can pause \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_media_player_can_pause', None) or \
_Cfunction('libvlc_media_player_can_pause', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_program_scrambled(p_mi):
'''Check if the current program is scrambled.
@param p_mi: the media player.
@return: true if the current program is scrambled \libvlc_return_bool.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_media_player_program_scrambled', None) or \
_Cfunction('libvlc_media_player_program_scrambled', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_media_player_next_frame(p_mi):
'''Display the next frame (if supported).
@param p_mi: the media player.
'''
f = _Cfunctions.get('libvlc_media_player_next_frame', None) or \
_Cfunction('libvlc_media_player_next_frame', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_media_player_navigate(p_mi, navigate):
'''Navigate through DVD Menu.
@param p_mi: the Media Player.
@param navigate: the Navigation mode.
@version: libVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_media_player_navigate', None) or \
_Cfunction('libvlc_media_player_navigate', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint)
return f(p_mi, navigate)
def libvlc_media_player_set_video_title_display(p_mi, position, timeout):
'''Set if, and how, the video title will be shown when media is played.
@param p_mi: the media player.
@param position: position at which to display the title, or libvlc_position_disable to prevent the title from being displayed.
@param timeout: title display timeout in milliseconds (ignored if libvlc_position_disable).
@version: libVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_media_player_set_video_title_display', None) or \
_Cfunction('libvlc_media_player_set_video_title_display', ((1,), (1,), (1,),), None,
None, MediaPlayer, Position, ctypes.c_int)
return f(p_mi, position, timeout)
def libvlc_track_description_list_release(p_track_description):
'''Release (free) L{TrackDescription}.
@param p_track_description: the structure to release.
'''
f = _Cfunctions.get('libvlc_track_description_list_release', None) or \
_Cfunction('libvlc_track_description_list_release', ((1,),), None,
None, ctypes.POINTER(TrackDescription))
return f(p_track_description)
def libvlc_toggle_fullscreen(p_mi):
'''Toggle fullscreen status on non-embedded video outputs.
@warning: The same limitations applies to this function
as to L{libvlc_set_fullscreen}().
@param p_mi: the media player.
'''
f = _Cfunctions.get('libvlc_toggle_fullscreen', None) or \
_Cfunction('libvlc_toggle_fullscreen', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_set_fullscreen(p_mi, b_fullscreen):
'''Enable or disable fullscreen.
@warning: With most window managers, only a top-level windows can be in
full-screen mode. Hence, this function will not operate properly if
L{libvlc_media_player_set_xwindow}() was used to embed the video in a
non-top-level window. In that case, the embedding window must be reparented
to the root window B{before} fullscreen mode is enabled. You will want
to reparent it back to its normal parent when disabling fullscreen.
@param p_mi: the media player.
@param b_fullscreen: boolean for fullscreen status.
'''
f = _Cfunctions.get('libvlc_set_fullscreen', None) or \
_Cfunction('libvlc_set_fullscreen', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, b_fullscreen)
def libvlc_get_fullscreen(p_mi):
'''Get current fullscreen status.
@param p_mi: the media player.
@return: the fullscreen status (boolean) \libvlc_return_bool.
'''
f = _Cfunctions.get('libvlc_get_fullscreen', None) or \
_Cfunction('libvlc_get_fullscreen', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_set_key_input(p_mi, on):
'''Enable or disable key press events handling, according to the LibVLC hotkeys
configuration. By default and for historical reasons, keyboard events are
handled by the LibVLC video widget.
@note: On X11, there can be only one subscriber for key press and mouse
click events per window. If your application has subscribed to those events
for the X window ID of the video widget, then LibVLC will not be able to
handle key presses and mouse clicks in any case.
@warning: This function is only implemented for X11 and Win32 at the moment.
@param p_mi: the media player.
@param on: true to handle key press events, false to ignore them.
'''
f = _Cfunctions.get('libvlc_video_set_key_input', None) or \
_Cfunction('libvlc_video_set_key_input', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint)
return f(p_mi, on)
def libvlc_video_set_mouse_input(p_mi, on):
'''Enable or disable mouse click events handling. By default, those events are
handled. This is needed for DVD menus to work, as well as a few video
filters such as "puzzle".
See L{libvlc_video_set_key_input}().
@warning: This function is only implemented for X11 and Win32 at the moment.
@param p_mi: the media player.
@param on: true to handle mouse click events, false to ignore them.
'''
f = _Cfunctions.get('libvlc_video_set_mouse_input', None) or \
_Cfunction('libvlc_video_set_mouse_input', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint)
return f(p_mi, on)
def libvlc_video_get_size(p_mi, num):
'''Get the pixel dimensions of a video.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px pixel width, py pixel height.
'''
f = _Cfunctions.get('libvlc_video_get_size', None) or \
_Cfunction('libvlc_video_get_size', ((1,), (1,), (2,), (2,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
return f(p_mi, num)
def libvlc_video_get_cursor(p_mi, num):
'''Get the mouse pointer coordinates over a video.
Coordinates are expressed in terms of the decoded video resolution,
B{not} in terms of pixels on the screen/viewport (to get the latter,
you can query your windowing system directly).
Either of the coordinates may be negative or larger than the corresponding
dimension of the video, if the cursor is outside the rendering area.
@warning: The coordinates may be out-of-date if the pointer is not located
on the video rendering area. LibVLC does not track the pointer if it is
outside of the video widget.
@note: LibVLC does not support multiple pointers (it does of course support
multiple input devices sharing the same pointer) at the moment.
@param p_mi: media player.
@param num: number of the video (starting from, and most commonly 0).
@return: px abscissa, py ordinate.
'''
f = _Cfunctions.get('libvlc_video_get_cursor', None) or \
_Cfunction('libvlc_video_get_cursor', ((1,), (1,), (2,), (2,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int))
return f(p_mi, num)
def libvlc_video_get_scale(p_mi):
'''Get the current video scaling factor.
See also L{libvlc_video_set_scale}().
@param p_mi: the media player.
@return: the currently configured zoom factor, or 0. if the video is set to fit to the output window/drawable automatically.
'''
f = _Cfunctions.get('libvlc_video_get_scale', None) or \
_Cfunction('libvlc_video_get_scale', ((1,),), None,
ctypes.c_float, MediaPlayer)
return f(p_mi)
def libvlc_video_set_scale(p_mi, f_factor):
'''Set the video scaling factor. That is the ratio of the number of pixels on
screen to the number of pixels in the original decoded video in each
dimension. Zero is a special value; it will adjust the video to the output
window/drawable (in windowed mode) or the entire screen.
Note that not all video outputs support scaling.
@param p_mi: the media player.
@param f_factor: the scaling factor, or zero.
'''
f = _Cfunctions.get('libvlc_video_set_scale', None) or \
_Cfunction('libvlc_video_set_scale', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_float)
return f(p_mi, f_factor)
def libvlc_video_get_aspect_ratio(p_mi):
'''Get current video aspect ratio.
@param p_mi: the media player.
@return: the video aspect ratio or NULL if unspecified (the result must be released with free() or L{libvlc_free}()).
'''
f = _Cfunctions.get('libvlc_video_get_aspect_ratio', None) or \
_Cfunction('libvlc_video_get_aspect_ratio', ((1,),), string_result,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_video_set_aspect_ratio(p_mi, psz_aspect):
'''Set new video aspect ratio.
@param p_mi: the media player.
@param psz_aspect: new video aspect-ratio or NULL to reset to default @note Invalid aspect ratios are ignored.
'''
f = _Cfunctions.get('libvlc_video_set_aspect_ratio', None) or \
_Cfunction('libvlc_video_set_aspect_ratio', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_aspect)
def libvlc_video_get_spu(p_mi):
'''Get current video subtitle.
@param p_mi: the media player.
@return: the video subtitle selected, or -1 if none.
'''
f = _Cfunctions.get('libvlc_video_get_spu', None) or \
_Cfunction('libvlc_video_get_spu', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_get_spu_count(p_mi):
'''Get the number of available video subtitles.
@param p_mi: the media player.
@return: the number of available video subtitles.
'''
f = _Cfunctions.get('libvlc_video_get_spu_count', None) or \
_Cfunction('libvlc_video_get_spu_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_get_spu_description(p_mi):
'''Get the description of available video subtitles.
@param p_mi: the media player.
@return: list containing description of available video subtitles.
'''
f = _Cfunctions.get('libvlc_video_get_spu_description', None) or \
_Cfunction('libvlc_video_get_spu_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_video_set_spu(p_mi, i_spu):
'''Set new video subtitle.
@param p_mi: the media player.
@param i_spu: video subtitle track to select (i_id from track description).
@return: 0 on success, -1 if out of range.
'''
f = _Cfunctions.get('libvlc_video_set_spu', None) or \
_Cfunction('libvlc_video_set_spu', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_spu)
def libvlc_video_set_subtitle_file(p_mi, psz_subtitle):
'''Set new video subtitle file.
@param p_mi: the media player.
@param psz_subtitle: new video subtitle file.
@return: the success status (boolean).
'''
f = _Cfunctions.get('libvlc_video_set_subtitle_file', None) or \
_Cfunction('libvlc_video_set_subtitle_file', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_subtitle)
def libvlc_video_get_spu_delay(p_mi):
'''Get the current subtitle delay. Positive values means subtitles are being
displayed later, negative values earlier.
@param p_mi: media player.
@return: time (in microseconds) the display of subtitles is being delayed.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_video_get_spu_delay', None) or \
_Cfunction('libvlc_video_get_spu_delay', ((1,),), None,
ctypes.c_int64, MediaPlayer)
return f(p_mi)
def libvlc_video_set_spu_delay(p_mi, i_delay):
'''Set the subtitle delay. This affects the timing of when the subtitle will
be displayed. Positive values result in subtitles being displayed later,
while negative values will result in subtitles being displayed earlier.
The subtitle delay will be reset to zero each time the media changes.
@param p_mi: media player.
@param i_delay: time (in microseconds) the display of subtitles should be delayed.
@return: 0 on success, -1 on error.
@version: LibVLC 2.0.0 or later.
'''
f = _Cfunctions.get('libvlc_video_set_spu_delay', None) or \
_Cfunction('libvlc_video_set_spu_delay', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int64)
return f(p_mi, i_delay)
def libvlc_video_get_title_description(p_mi):
'''Get the description of available titles.
@param p_mi: the media player.
@return: list containing description of available titles.
'''
f = _Cfunctions.get('libvlc_video_get_title_description', None) or \
_Cfunction('libvlc_video_get_title_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_video_get_chapter_description(p_mi, i_title):
'''Get the description of available chapters for specific title.
@param p_mi: the media player.
@param i_title: selected title.
@return: list containing description of available chapter for title i_title.
'''
f = _Cfunctions.get('libvlc_video_get_chapter_description', None) or \
_Cfunction('libvlc_video_get_chapter_description', ((1,), (1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer, ctypes.c_int)
return f(p_mi, i_title)
def libvlc_video_get_crop_geometry(p_mi):
'''Get current crop filter geometry.
@param p_mi: the media player.
@return: the crop filter geometry or NULL if unset.
'''
f = _Cfunctions.get('libvlc_video_get_crop_geometry', None) or \
_Cfunction('libvlc_video_get_crop_geometry', ((1,),), string_result,
ctypes.c_void_p, MediaPlayer)
return f(p_mi)
def libvlc_video_set_crop_geometry(p_mi, psz_geometry):
'''Set new crop filter geometry.
@param p_mi: the media player.
@param psz_geometry: new crop filter geometry (NULL to unset).
'''
f = _Cfunctions.get('libvlc_video_set_crop_geometry', None) or \
_Cfunction('libvlc_video_set_crop_geometry', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_geometry)
def libvlc_video_get_teletext(p_mi):
'''Get current teletext page requested.
@param p_mi: the media player.
@return: the current teletext page requested.
'''
f = _Cfunctions.get('libvlc_video_get_teletext', None) or \
_Cfunction('libvlc_video_get_teletext', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_set_teletext(p_mi, i_page):
'''Set new teletext page to retrieve.
@param p_mi: the media player.
@param i_page: teletex page number requested.
'''
f = _Cfunctions.get('libvlc_video_set_teletext', None) or \
_Cfunction('libvlc_video_set_teletext', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, i_page)
def libvlc_toggle_teletext(p_mi):
'''Toggle teletext transparent status on video output.
@param p_mi: the media player.
'''
f = _Cfunctions.get('libvlc_toggle_teletext', None) or \
_Cfunction('libvlc_toggle_teletext', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_video_get_track_count(p_mi):
'''Get number of available video tracks.
@param p_mi: media player.
@return: the number of available video tracks (int).
'''
f = _Cfunctions.get('libvlc_video_get_track_count', None) or \
_Cfunction('libvlc_video_get_track_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_get_track_description(p_mi):
'''Get the description of available video tracks.
@param p_mi: media player.
@return: list with description of available video tracks, or NULL on error.
'''
f = _Cfunctions.get('libvlc_video_get_track_description', None) or \
_Cfunction('libvlc_video_get_track_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_video_get_track(p_mi):
'''Get current video track.
@param p_mi: media player.
@return: the video track ID (int) or -1 if no active input.
'''
f = _Cfunctions.get('libvlc_video_get_track', None) or \
_Cfunction('libvlc_video_get_track', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_video_set_track(p_mi, i_track):
'''Set video track.
@param p_mi: media player.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 if out of range.
'''
f = _Cfunctions.get('libvlc_video_set_track', None) or \
_Cfunction('libvlc_video_set_track', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_track)
def libvlc_video_take_snapshot(p_mi, num, psz_filepath, i_width, i_height):
'''Take a snapshot of the current video window.
If i_width AND i_height is 0, original size is used.
If i_width XOR i_height is 0, original aspect-ratio is preserved.
@param p_mi: media player instance.
@param num: number of video output (typically 0 for the first/only one).
@param psz_filepath: the path where to save the screenshot to.
@param i_width: the snapshot's width.
@param i_height: the snapshot's height.
@return: 0 on success, -1 if the video was not found.
'''
f = _Cfunctions.get('libvlc_video_take_snapshot', None) or \
_Cfunction('libvlc_video_take_snapshot', ((1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint, ctypes.c_char_p, ctypes.c_int, ctypes.c_int)
return f(p_mi, num, psz_filepath, i_width, i_height)
def libvlc_video_set_deinterlace(p_mi, psz_mode):
'''Enable or disable deinterlace filter.
@param p_mi: libvlc media player.
@param psz_mode: type of deinterlace filter, NULL to disable.
'''
f = _Cfunctions.get('libvlc_video_set_deinterlace', None) or \
_Cfunction('libvlc_video_set_deinterlace', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_mode)
def libvlc_video_get_marquee_int(p_mi, option):
'''Get an integer marquee option value.
@param p_mi: libvlc media player.
@param option: marq option to get See libvlc_video_marquee_int_option_t.
'''
f = _Cfunctions.get('libvlc_video_get_marquee_int', None) or \
_Cfunction('libvlc_video_get_marquee_int', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_get_marquee_string(p_mi, option):
'''Get a string marquee option value.
@param p_mi: libvlc media player.
@param option: marq option to get See libvlc_video_marquee_string_option_t.
'''
f = _Cfunctions.get('libvlc_video_get_marquee_string', None) or \
_Cfunction('libvlc_video_get_marquee_string', ((1,), (1,),), string_result,
ctypes.c_void_p, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_marquee_int(p_mi, option, i_val):
'''Enable, disable or set an integer marquee option
Setting libvlc_marquee_Enable has the side effect of enabling (arg !0)
or disabling (arg 0) the marq filter.
@param p_mi: libvlc media player.
@param option: marq option to set See libvlc_video_marquee_int_option_t.
@param i_val: marq option value.
'''
f = _Cfunctions.get('libvlc_video_set_marquee_int', None) or \
_Cfunction('libvlc_video_set_marquee_int', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_int)
return f(p_mi, option, i_val)
def libvlc_video_set_marquee_string(p_mi, option, psz_text):
'''Set a marquee string option.
@param p_mi: libvlc media player.
@param option: marq option to set See libvlc_video_marquee_string_option_t.
@param psz_text: marq option value.
'''
f = _Cfunctions.get('libvlc_video_set_marquee_string', None) or \
_Cfunction('libvlc_video_set_marquee_string', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_char_p)
return f(p_mi, option, psz_text)
def libvlc_video_get_logo_int(p_mi, option):
'''Get integer logo option.
@param p_mi: libvlc media player instance.
@param option: logo option to get, values of libvlc_video_logo_option_t.
'''
f = _Cfunctions.get('libvlc_video_get_logo_int', None) or \
_Cfunction('libvlc_video_get_logo_int', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_logo_int(p_mi, option, value):
'''Set logo option as integer. Options that take a different type value
are ignored.
Passing libvlc_logo_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the logo filter.
@param p_mi: libvlc media player instance.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param value: logo option value.
'''
f = _Cfunctions.get('libvlc_video_set_logo_int', None) or \
_Cfunction('libvlc_video_set_logo_int', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_int)
return f(p_mi, option, value)
def libvlc_video_set_logo_string(p_mi, option, psz_value):
'''Set logo option as string. Options that take a different type value
are ignored.
@param p_mi: libvlc media player instance.
@param option: logo option to set, values of libvlc_video_logo_option_t.
@param psz_value: logo option value.
'''
f = _Cfunctions.get('libvlc_video_set_logo_string', None) or \
_Cfunction('libvlc_video_set_logo_string', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_char_p)
return f(p_mi, option, psz_value)
def libvlc_video_get_adjust_int(p_mi, option):
'''Get integer adjust option.
@param p_mi: libvlc media player instance.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_get_adjust_int', None) or \
_Cfunction('libvlc_video_get_adjust_int', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_adjust_int(p_mi, option, value):
'''Set adjust option as integer. Options that take a different type value
are ignored.
Passing libvlc_adjust_enable as option value has the side effect of
starting (arg !0) or stopping (arg 0) the adjust filter.
@param p_mi: libvlc media player instance.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_set_adjust_int', None) or \
_Cfunction('libvlc_video_set_adjust_int', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_int)
return f(p_mi, option, value)
def libvlc_video_get_adjust_float(p_mi, option):
'''Get float adjust option.
@param p_mi: libvlc media player instance.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_get_adjust_float', None) or \
_Cfunction('libvlc_video_get_adjust_float', ((1,), (1,),), None,
ctypes.c_float, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
def libvlc_video_set_adjust_float(p_mi, option, value):
'''Set adjust option as float. Options that take a different type value
are ignored.
@param p_mi: libvlc media player instance.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_set_adjust_float', None) or \
_Cfunction('libvlc_video_set_adjust_float', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_float)
return f(p_mi, option, value)
def libvlc_audio_output_list_get(p_instance):
'''Gets the list of available audio output modules.
@param p_instance: libvlc instance.
@return: list of available audio outputs. It must be freed it with In case of error, NULL is returned.
'''
f = _Cfunctions.get('libvlc_audio_output_list_get', None) or \
_Cfunction('libvlc_audio_output_list_get', ((1,),), None,
ctypes.POINTER(AudioOutput), Instance)
return f(p_instance)
def libvlc_audio_output_list_release(p_list):
'''Frees the list of available audio output modules.
@param p_list: list with audio outputs for release.
'''
f = _Cfunctions.get('libvlc_audio_output_list_release', None) or \
_Cfunction('libvlc_audio_output_list_release', ((1,),), None,
None, ctypes.POINTER(AudioOutput))
return f(p_list)
def libvlc_audio_output_set(p_mi, psz_name):
'''Selects an audio output module.
@note: Any change will take be effect only after playback is stopped and
restarted. Audio output cannot be changed while playing.
@param p_mi: media player.
@param psz_name: name of audio output, use psz_name of See L{AudioOutput}.
@return: 0 if function succeded, -1 on error.
'''
f = _Cfunctions.get('libvlc_audio_output_set', None) or \
_Cfunction('libvlc_audio_output_set', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_char_p)
return f(p_mi, psz_name)
def libvlc_audio_output_device_enum(mp):
'''Gets a list of potential audio output devices,
See L{libvlc_audio_output_device_set}().
@note: Not all audio outputs support enumerating devices.
The audio output may be functional even if the list is empty (NULL).
@note: The list may not be exhaustive.
@warning: Some audio output devices in the list might not actually work in
some circumstances. By default, it is recommended to not specify any
explicit audio device.
@param mp: media player.
@return: A NULL-terminated linked list of potential audio output devices. It must be freed it with L{libvlc_audio_output_device_list_release}().
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_output_device_enum', None) or \
_Cfunction('libvlc_audio_output_device_enum', ((1,),), None,
ctypes.POINTER(AudioOutputDevice), MediaPlayer)
return f(mp)
def libvlc_audio_output_device_list_get(p_instance, aout):
'''Gets a list of audio output devices for a given audio output module,
See L{libvlc_audio_output_device_set}().
@note: Not all audio outputs support this. In particular, an empty (NULL)
list of devices does B{not} imply that the specified audio output does
not work.
@note: The list might not be exhaustive.
@warning: Some audio output devices in the list might not actually work in
some circumstances. By default, it is recommended to not specify any
explicit audio device.
@param p_instance: libvlc instance.
@param psz_aout: audio output name (as returned by L{libvlc_audio_output_list_get}()).
@return: A NULL-terminated linked list of potential audio output devices. It must be freed it with L{libvlc_audio_output_device_list_release}().
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_output_device_list_get', None) or \
_Cfunction('libvlc_audio_output_device_list_get', ((1,), (1,),), None,
ctypes.POINTER(AudioOutputDevice), Instance, ctypes.c_char_p)
return f(p_instance, aout)
def libvlc_audio_output_device_list_release(p_list):
'''Frees a list of available audio output devices.
@param p_list: list with audio outputs for release.
@version: LibVLC 2.1.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_output_device_list_release', None) or \
_Cfunction('libvlc_audio_output_device_list_release', ((1,),), None,
None, ctypes.POINTER(AudioOutputDevice))
return f(p_list)
def libvlc_audio_output_device_set(mp, module, device_id):
'''Configures an explicit audio output device.
If the module paramater is NULL, audio output will be moved to the device
specified by the device identifier string immediately. This is the
recommended usage.
A list of adequate potential device strings can be obtained with
L{libvlc_audio_output_device_enum}().
However passing NULL is supported in LibVLC version 2.2.0 and later only;
in earlier versions, this function would have no effects when the module
parameter was NULL.
If the module parameter is not NULL, the device parameter of the
corresponding audio output, if it exists, will be set to the specified
string. Note that some audio output modules do not have such a parameter
(notably MMDevice and PulseAudio).
A list of adequate potential device strings can be obtained with
L{libvlc_audio_output_device_list_get}().
@note: This function does not select the specified audio output plugin.
L{libvlc_audio_output_set}() is used for that purpose.
@warning: The syntax for the device parameter depends on the audio output.
Some audio output modules require further parameters (e.g. a channels map
in the case of ALSA).
@param mp: media player.
@param module: If NULL, current audio output module. if non-NULL, name of audio output module.
@param device_id: device identifier string.
@return: Nothing. Errors are ignored (this is a design bug).
'''
f = _Cfunctions.get('libvlc_audio_output_device_set', None) or \
_Cfunction('libvlc_audio_output_device_set', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_char_p, ctypes.c_char_p)
return f(mp, module, device_id)
def libvlc_audio_output_device_get(mp):
'''Get the current audio output device identifier.
This complements L{libvlc_audio_output_device_set}().
@warning: The initial value for the current audio output device identifier
may not be set or may be some unknown value. A LibVLC application should
compare this value against the known device identifiers (e.g. those that
were previously retrieved by a call to L{libvlc_audio_output_device_enum} or
L{libvlc_audio_output_device_list_get}) to find the current audio output device.
It is possible that the selected audio output device changes (an external
change) without a call to L{libvlc_audio_output_device_set}. That may make this
method unsuitable to use if a LibVLC application is attempting to track
dynamic audio device changes as they happen.
@param mp: media player.
@return: the current audio output device identifier NULL if no device is selected or in case of error (the result must be released with free() or L{libvlc_free}()).
@version: LibVLC 3.0.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_output_device_get', None) or \
_Cfunction('libvlc_audio_output_device_get', ((1,),), None,
ctypes.c_char_p, MediaPlayer)
return f(mp)
def libvlc_audio_toggle_mute(p_mi):
'''Toggle mute status.
@param p_mi: media player @warning Toggling mute atomically is not always possible: On some platforms, other processes can mute the VLC audio playback stream asynchronously. Thus, there is a small race condition where toggling will not work. See also the limitations of L{libvlc_audio_set_mute}().
'''
f = _Cfunctions.get('libvlc_audio_toggle_mute', None) or \
_Cfunction('libvlc_audio_toggle_mute', ((1,),), None,
None, MediaPlayer)
return f(p_mi)
def libvlc_audio_get_mute(p_mi):
'''Get current mute status.
@param p_mi: media player.
@return: the mute status (boolean) if defined, -1 if undefined/unapplicable.
'''
f = _Cfunctions.get('libvlc_audio_get_mute', None) or \
_Cfunction('libvlc_audio_get_mute', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_mute(p_mi, status):
'''Set mute status.
@param p_mi: media player.
@param status: If status is true then mute, otherwise unmute @warning This function does not always work. If there are no active audio playback stream, the mute status might not be available. If digital pass-through (S/PDIF, HDMI...) is in use, muting may be unapplicable. Also some audio output plugins do not support muting at all. @note To force silent playback, disable all audio tracks. This is more efficient and reliable than mute.
'''
f = _Cfunctions.get('libvlc_audio_set_mute', None) or \
_Cfunction('libvlc_audio_set_mute', ((1,), (1,),), None,
None, MediaPlayer, ctypes.c_int)
return f(p_mi, status)
def libvlc_audio_get_volume(p_mi):
'''Get current software audio volume.
@param p_mi: media player.
@return: the software volume in percents (0 = mute, 100 = nominal / 0dB).
'''
f = _Cfunctions.get('libvlc_audio_get_volume', None) or \
_Cfunction('libvlc_audio_get_volume', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_volume(p_mi, i_volume):
'''Set current software audio volume.
@param p_mi: media player.
@param i_volume: the volume in percents (0 = mute, 100 = 0dB).
@return: 0 if the volume was set, -1 if it was out of range.
'''
f = _Cfunctions.get('libvlc_audio_set_volume', None) or \
_Cfunction('libvlc_audio_set_volume', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_volume)
def libvlc_audio_get_track_count(p_mi):
'''Get number of available audio tracks.
@param p_mi: media player.
@return: the number of available audio tracks (int), or -1 if unavailable.
'''
f = _Cfunctions.get('libvlc_audio_get_track_count', None) or \
_Cfunction('libvlc_audio_get_track_count', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_get_track_description(p_mi):
'''Get the description of available audio tracks.
@param p_mi: media player.
@return: list with description of available audio tracks, or NULL.
'''
f = _Cfunctions.get('libvlc_audio_get_track_description', None) or \
_Cfunction('libvlc_audio_get_track_description', ((1,),), None,
ctypes.POINTER(TrackDescription), MediaPlayer)
return f(p_mi)
def libvlc_audio_get_track(p_mi):
'''Get current audio track.
@param p_mi: media player.
@return: the audio track ID or -1 if no active input.
'''
f = _Cfunctions.get('libvlc_audio_get_track', None) or \
_Cfunction('libvlc_audio_get_track', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_track(p_mi, i_track):
'''Set current audio track.
@param p_mi: media player.
@param i_track: the track ID (i_id field from track description).
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_audio_set_track', None) or \
_Cfunction('libvlc_audio_set_track', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, i_track)
def libvlc_audio_get_channel(p_mi):
'''Get current audio channel.
@param p_mi: media player.
@return: the audio channel See libvlc_audio_output_channel_t.
'''
f = _Cfunctions.get('libvlc_audio_get_channel', None) or \
_Cfunction('libvlc_audio_get_channel', ((1,),), None,
ctypes.c_int, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_channel(p_mi, channel):
'''Set current audio channel.
@param p_mi: media player.
@param channel: the audio channel, See libvlc_audio_output_channel_t.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_audio_set_channel', None) or \
_Cfunction('libvlc_audio_set_channel', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int)
return f(p_mi, channel)
def libvlc_audio_get_delay(p_mi):
'''Get current audio delay.
@param p_mi: media player.
@return: the audio delay (microseconds).
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_audio_get_delay', None) or \
_Cfunction('libvlc_audio_get_delay', ((1,),), None,
ctypes.c_int64, MediaPlayer)
return f(p_mi)
def libvlc_audio_set_delay(p_mi, i_delay):
'''Set current audio delay. The audio delay will be reset to zero each time the media changes.
@param p_mi: media player.
@param i_delay: the audio delay (microseconds).
@return: 0 on success, -1 on error.
@version: LibVLC 1.1.1 or later.
'''
f = _Cfunctions.get('libvlc_audio_set_delay', None) or \
_Cfunction('libvlc_audio_set_delay', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_int64)
return f(p_mi, i_delay)
def libvlc_audio_equalizer_get_preset_count():
'''Get the number of equalizer presets.
@return: number of presets.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_preset_count', None) or \
_Cfunction('libvlc_audio_equalizer_get_preset_count', (), None,
ctypes.c_uint)
return f()
def libvlc_audio_equalizer_get_preset_name(u_index):
'''Get the name of a particular equalizer preset.
This name can be used, for example, to prepare a preset label or menu in a user
interface.
@param u_index: index of the preset, counting from zero.
@return: preset name, or NULL if there is no such preset.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_preset_name', None) or \
_Cfunction('libvlc_audio_equalizer_get_preset_name', ((1,),), None,
ctypes.c_char_p, ctypes.c_uint)
return f(u_index)
def libvlc_audio_equalizer_get_band_count():
'''Get the number of distinct frequency bands for an equalizer.
@return: number of frequency bands.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_band_count', None) or \
_Cfunction('libvlc_audio_equalizer_get_band_count', (), None,
ctypes.c_uint)
return f()
def libvlc_audio_equalizer_get_band_frequency(u_index):
'''Get a particular equalizer band frequency.
This value can be used, for example, to create a label for an equalizer band control
in a user interface.
@param u_index: index of the band, counting from zero.
@return: equalizer band frequency (Hz), or -1 if there is no such band.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_band_frequency', None) or \
_Cfunction('libvlc_audio_equalizer_get_band_frequency', ((1,),), None,
ctypes.c_float, ctypes.c_uint)
return f(u_index)
def libvlc_audio_equalizer_new():
'''Create a new default equalizer, with all frequency values zeroed.
The new equalizer can subsequently be applied to a media player by invoking
L{libvlc_media_player_set_equalizer}().
The returned handle should be freed via L{libvlc_audio_equalizer_release}() when
it is no longer needed.
@return: opaque equalizer handle, or NULL on error.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_new', None) or \
_Cfunction('libvlc_audio_equalizer_new', (), None,
ctypes.c_void_p)
return f()
def libvlc_audio_equalizer_new_from_preset(u_index):
'''Create a new equalizer, with initial frequency values copied from an existing
preset.
The new equalizer can subsequently be applied to a media player by invoking
L{libvlc_media_player_set_equalizer}().
The returned handle should be freed via L{libvlc_audio_equalizer_release}() when
it is no longer needed.
@param u_index: index of the preset, counting from zero.
@return: opaque equalizer handle, or NULL on error.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_new_from_preset', None) or \
_Cfunction('libvlc_audio_equalizer_new_from_preset', ((1,),), None,
ctypes.c_void_p, ctypes.c_uint)
return f(u_index)
def libvlc_audio_equalizer_release(p_equalizer):
'''Release a previously created equalizer instance.
The equalizer was previously created by using L{libvlc_audio_equalizer_new}() or
L{libvlc_audio_equalizer_new_from_preset}().
It is safe to invoke this method with a NULL p_equalizer parameter for no effect.
@param p_equalizer: opaque equalizer handle, or NULL.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_release', None) or \
_Cfunction('libvlc_audio_equalizer_release', ((1,),), None,
None, ctypes.c_void_p)
return f(p_equalizer)
def libvlc_audio_equalizer_set_preamp(p_equalizer, f_preamp):
'''Set a new pre-amplification value for an equalizer.
The new equalizer settings are subsequently applied to a media player by invoking
L{libvlc_media_player_set_equalizer}().
The supplied amplification value will be clamped to the -20.0 to +20.0 range.
@param p_equalizer: valid equalizer handle, must not be NULL.
@param f_preamp: preamp value (-20.0 to 20.0 Hz).
@return: zero on success, -1 on error.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_set_preamp', None) or \
_Cfunction('libvlc_audio_equalizer_set_preamp', ((1,), (1,),), None,
ctypes.c_int, ctypes.c_void_p, ctypes.c_float)
return f(p_equalizer, f_preamp)
def libvlc_audio_equalizer_get_preamp(p_equalizer):
'''Get the current pre-amplification value from an equalizer.
@param p_equalizer: valid equalizer handle, must not be NULL.
@return: preamp value (Hz).
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_preamp', None) or \
_Cfunction('libvlc_audio_equalizer_get_preamp', ((1,),), None,
ctypes.c_float, ctypes.c_void_p)
return f(p_equalizer)
def libvlc_audio_equalizer_set_amp_at_index(p_equalizer, f_amp, u_band):
'''Set a new amplification value for a particular equalizer frequency band.
The new equalizer settings are subsequently applied to a media player by invoking
L{libvlc_media_player_set_equalizer}().
The supplied amplification value will be clamped to the -20.0 to +20.0 range.
@param p_equalizer: valid equalizer handle, must not be NULL.
@param f_amp: amplification value (-20.0 to 20.0 Hz).
@param u_band: index, counting from zero, of the frequency band to set.
@return: zero on success, -1 on error.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_set_amp_at_index', None) or \
_Cfunction('libvlc_audio_equalizer_set_amp_at_index', ((1,), (1,), (1,),), None,
ctypes.c_int, ctypes.c_void_p, ctypes.c_float, ctypes.c_uint)
return f(p_equalizer, f_amp, u_band)
def libvlc_audio_equalizer_get_amp_at_index(p_equalizer, u_band):
'''Get the amplification value for a particular equalizer frequency band.
@param p_equalizer: valid equalizer handle, must not be NULL.
@param u_band: index, counting from zero, of the frequency band to get.
@return: amplification value (Hz); NaN if there is no such frequency band.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_audio_equalizer_get_amp_at_index', None) or \
_Cfunction('libvlc_audio_equalizer_get_amp_at_index', ((1,), (1,),), None,
ctypes.c_float, ctypes.c_void_p, ctypes.c_uint)
return f(p_equalizer, u_band)
def libvlc_media_player_set_equalizer(p_mi, p_equalizer):
'''Apply new equalizer settings to a media player.
The equalizer is first created by invoking L{libvlc_audio_equalizer_new}() or
L{libvlc_audio_equalizer_new_from_preset}().
It is possible to apply new equalizer settings to a media player whether the media
player is currently playing media or not.
Invoking this method will immediately apply the new equalizer settings to the audio
output of the currently playing media if there is any.
If there is no currently playing media, the new equalizer settings will be applied
later if and when new media is played.
Equalizer settings will automatically be applied to subsequently played media.
To disable the equalizer for a media player invoke this method passing NULL for the
p_equalizer parameter.
The media player does not keep a reference to the supplied equalizer so it is safe
for an application to release the equalizer reference any time after this method
returns.
@param p_mi: opaque media player handle.
@param p_equalizer: opaque equalizer handle, or NULL to disable the equalizer for this media player.
@return: zero on success, -1 on error.
@version: LibVLC 2.2.0 or later.
'''
f = _Cfunctions.get('libvlc_media_player_set_equalizer', None) or \
_Cfunction('libvlc_media_player_set_equalizer', ((1,), (1,),), None,
ctypes.c_int, MediaPlayer, ctypes.c_void_p)
return f(p_mi, p_equalizer)
def libvlc_vlm_release(p_instance):
'''Release the vlm instance related to the given L{Instance}.
@param p_instance: the instance.
'''
f = _Cfunctions.get('libvlc_vlm_release', None) or \
_Cfunction('libvlc_vlm_release', ((1,),), None,
None, Instance)
return f(p_instance)
def libvlc_vlm_add_broadcast(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Add a broadcast, with one input.
@param p_instance: the instance.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_add_broadcast', None) or \
_Cfunction('libvlc_vlm_add_broadcast', ((1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_int)
return f(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop)
def libvlc_vlm_add_vod(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux):
'''Add a vod, with one input.
@param p_instance: the instance.
@param psz_name: the name of the new vod media.
@param psz_input: the input MRL.
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new vod.
@param psz_mux: the muxer of the vod media.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_add_vod', None) or \
_Cfunction('libvlc_vlm_add_vod', ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_char_p)
return f(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux)
def libvlc_vlm_del_media(p_instance, psz_name):
'''Delete a media (VOD or broadcast).
@param p_instance: the instance.
@param psz_name: the media to delete.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_del_media', None) or \
_Cfunction('libvlc_vlm_del_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_set_enabled(p_instance, psz_name, b_enabled):
'''Enable or disable a media (VOD or broadcast).
@param p_instance: the instance.
@param psz_name: the media to work on.
@param b_enabled: the new status.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_enabled', None) or \
_Cfunction('libvlc_vlm_set_enabled', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, b_enabled)
def libvlc_vlm_set_output(p_instance, psz_name, psz_output):
'''Set the output for a media.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_output', None) or \
_Cfunction('libvlc_vlm_set_output', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_output)
def libvlc_vlm_set_input(p_instance, psz_name, psz_input):
'''Set a media's input MRL. This will delete all existing inputs and
add the specified one.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_input', None) or \
_Cfunction('libvlc_vlm_set_input', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_input)
def libvlc_vlm_add_input(p_instance, psz_name, psz_input):
'''Add a media's input MRL. This will add the specified one.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_input: the input MRL.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_add_input', None) or \
_Cfunction('libvlc_vlm_add_input', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_input)
def libvlc_vlm_set_loop(p_instance, psz_name, b_loop):
'''Set a media's loop status.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param b_loop: the new status.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_loop', None) or \
_Cfunction('libvlc_vlm_set_loop', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, b_loop)
def libvlc_vlm_set_mux(p_instance, psz_name, psz_mux):
'''Set a media's vod muxer.
@param p_instance: the instance.
@param psz_name: the media to work on.
@param psz_mux: the new muxer.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_set_mux', None) or \
_Cfunction('libvlc_vlm_set_mux', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p)
return f(p_instance, psz_name, psz_mux)
def libvlc_vlm_change_media(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop):
'''Edit the parameters of a media. This will delete all existing inputs and
add the specified one.
@param p_instance: the instance.
@param psz_name: the name of the new broadcast.
@param psz_input: the input MRL.
@param psz_output: the output MRL (the parameter to the "sout" variable).
@param i_options: number of additional options.
@param ppsz_options: additional options.
@param b_enabled: boolean for enabling the new broadcast.
@param b_loop: Should this broadcast be played in loop ?
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_change_media', None) or \
_Cfunction('libvlc_vlm_change_media', ((1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_int)
return f(p_instance, psz_name, psz_input, psz_output, i_options, ppsz_options, b_enabled, b_loop)
def libvlc_vlm_play_media(p_instance, psz_name):
'''Play the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_play_media', None) or \
_Cfunction('libvlc_vlm_play_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_stop_media(p_instance, psz_name):
'''Stop the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_stop_media', None) or \
_Cfunction('libvlc_vlm_stop_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_pause_media(p_instance, psz_name):
'''Pause the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_pause_media', None) or \
_Cfunction('libvlc_vlm_pause_media', ((1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_seek_media(p_instance, psz_name, f_percentage):
'''Seek in the named broadcast.
@param p_instance: the instance.
@param psz_name: the name of the broadcast.
@param f_percentage: the percentage to seek to.
@return: 0 on success, -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_seek_media', None) or \
_Cfunction('libvlc_vlm_seek_media', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_float)
return f(p_instance, psz_name, f_percentage)
def libvlc_vlm_show_media(p_instance, psz_name):
'''Return information about the named media as a JSON
string representation.
This function is mainly intended for debugging use,
if you want programmatic access to the state of
a vlm_media_instance_t, please use the corresponding
libvlc_vlm_get_media_instance_xxx -functions.
Currently there are no such functions available for
vlm_media_t though.
@param p_instance: the instance.
@param psz_name: the name of the media, if the name is an empty string, all media is described.
@return: string with information about named media, or NULL on error.
'''
f = _Cfunctions.get('libvlc_vlm_show_media', None) or \
_Cfunction('libvlc_vlm_show_media', ((1,), (1,),), string_result,
ctypes.c_void_p, Instance, ctypes.c_char_p)
return f(p_instance, psz_name)
def libvlc_vlm_get_media_instance_position(p_instance, psz_name, i_instance):
'''Get vlm_media instance position by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: position as float or -1. on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_position', None) or \
_Cfunction('libvlc_vlm_get_media_instance_position', ((1,), (1,), (1,),), None,
ctypes.c_float, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_time(p_instance, psz_name, i_instance):
'''Get vlm_media instance time by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: time as integer or -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_time', None) or \
_Cfunction('libvlc_vlm_get_media_instance_time', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_length(p_instance, psz_name, i_instance):
'''Get vlm_media instance length by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: length of media item or -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_length', None) or \
_Cfunction('libvlc_vlm_get_media_instance_length', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_rate(p_instance, psz_name, i_instance):
'''Get vlm_media instance playback rate by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: playback rate or -1 on error.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_rate', None) or \
_Cfunction('libvlc_vlm_get_media_instance_rate', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_title(p_instance, psz_name, i_instance):
'''Get vlm_media instance title number by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: title as number or -1 on error.
@bug: will always return 0.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_title', None) or \
_Cfunction('libvlc_vlm_get_media_instance_title', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_chapter(p_instance, psz_name, i_instance):
'''Get vlm_media instance chapter number by name or instance id.
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: chapter as number or -1 on error.
@bug: will always return 0.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_chapter', None) or \
_Cfunction('libvlc_vlm_get_media_instance_chapter', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_media_instance_seekable(p_instance, psz_name, i_instance):
'''Is libvlc instance seekable ?
@param p_instance: a libvlc instance.
@param psz_name: name of vlm media instance.
@param i_instance: instance id.
@return: 1 if seekable, 0 if not, -1 if media does not exist.
@bug: will always return 0.
'''
f = _Cfunctions.get('libvlc_vlm_get_media_instance_seekable', None) or \
_Cfunction('libvlc_vlm_get_media_instance_seekable', ((1,), (1,), (1,),), None,
ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_int)
return f(p_instance, psz_name, i_instance)
def libvlc_vlm_get_event_manager(p_instance):
'''Get libvlc_event_manager from a vlm media.
The p_event_manager is immutable, so you don't have to hold the lock.
@param p_instance: a libvlc instance.
@return: libvlc_event_manager.
'''
f = _Cfunctions.get('libvlc_vlm_get_event_manager', None) or \
_Cfunction('libvlc_vlm_get_event_manager', ((1,),), class_result(EventManager),
ctypes.c_void_p, Instance)
return f(p_instance)
# 4 function(s) blacklisted:
# libvlc_audio_output_get_device_type
# libvlc_audio_output_set_device_type
# libvlc_printerr
# libvlc_set_exit_handler
# 29 function(s) not wrapped as methods:
# libvlc_audio_equalizer_get_amp_at_index
# libvlc_audio_equalizer_get_band_count
# libvlc_audio_equalizer_get_band_frequency
# libvlc_audio_equalizer_get_preamp
# libvlc_audio_equalizer_get_preset_count
# libvlc_audio_equalizer_get_preset_name
# libvlc_audio_equalizer_new
# libvlc_audio_equalizer_new_from_preset
# libvlc_audio_equalizer_release
# libvlc_audio_equalizer_set_amp_at_index
# libvlc_audio_equalizer_set_preamp
# libvlc_audio_output_device_list_release
# libvlc_audio_output_list_release
# libvlc_clearerr
# libvlc_clock
# libvlc_errmsg
# libvlc_event_type_name
# libvlc_free
# libvlc_get_changeset
# libvlc_get_compiler
# libvlc_get_version
# libvlc_log_get_context
# libvlc_log_get_object
# libvlc_media_get_codec_description
# libvlc_media_tracks_release
# libvlc_module_description_list_release
# libvlc_new
# libvlc_track_description_list_release
# libvlc_vprinterr
# Start of footer.py #
# Backward compatibility
def callbackmethod(callback):
"""Now obsolete @callbackmethod decorator."""
return callback
# libvlc_free is not present in some versions of libvlc. If it is not
# in the library, then emulate it by calling libc.free
if not hasattr(dll, 'libvlc_free'):
# need to find the free function in the C runtime. This is
# platform specific.
# For Linux and MacOSX
libc_path = find_library('c')
if libc_path:
libc = ctypes.CDLL(libc_path)
libvlc_free = libc.free
else:
# On win32, it is impossible to guess the proper lib to call
# (msvcrt, mingw...). Just ignore the call: it will memleak,
# but not prevent to run the application.
def libvlc_free(p):
pass
# ensure argtypes is right, because default type of int won't work
# on 64-bit systems
libvlc_free.argtypes = [ ctypes.c_void_p ]
# Version functions
def _dot2int(v):
'''(INTERNAL) Convert 'i.i.i[.i]' str to int.
'''
t = [int(i) for i in v.split('.')]
if len(t) == 3:
t.append(0)
elif len(t) != 4:
raise ValueError('"i.i.i[.i]": %r' % (v,))
if min(t) < 0 or max(t) > 255:
raise ValueError('[0..255]: %r' % (v,))
i = t.pop(0)
while t:
i = (i << 8) + t.pop(0)
return i
def hex_version():
"""Return the version of these bindings in hex or 0 if unavailable.
"""
try:
return _dot2int(__version__.split('-')[-1])
except (NameError, ValueError):
return 0
def libvlc_hex_version():
"""Return the libvlc version in hex or 0 if unavailable.
"""
try:
return _dot2int(bytes_to_str(libvlc_get_version()).split()[0])
except ValueError:
return 0
def debug_callback(event, *args, **kwds):
'''Example callback, useful for debugging.
'''
l = ['event %s' % (event.type,)]
if args:
l.extend(map(str, args))
if kwds:
l.extend(sorted('%s=%s' % t for t in kwds.items()))
print('Debug callback (%s)' % ', '.join(l))
if __name__ == '__main__':
try:
from msvcrt import getch
except ImportError:
import termios
import tty
def getch(): # getchar(), getc(stdin) #PYCHOK flake
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
return ch
def end_callback(event):
print('End of media stream (event %s)' % event.type)
sys.exit(0)
echo_position = False
def pos_callback(event, player):
if echo_position:
sys.stdout.write('\r%s to %.2f%% (%.2f%%)' % (event.type,
event.u.new_position * 100,
player.get_position() * 100))
sys.stdout.flush()
def print_version():
"""Print libvlc version"""
try:
print('Build date: %s (%#x)' % (build_date, hex_version()))
print('LibVLC version: %s (%#x)' % (bytes_to_str(libvlc_get_version()), libvlc_hex_version()))
print('LibVLC compiler: %s' % bytes_to_str(libvlc_get_compiler()))
if plugin_path:
print('Plugin path: %s' % plugin_path)
except:
print('Error: %s' % sys.exc_info()[1])
if sys.argv[1:] and sys.argv[1] not in ('-h', '--help'):
movie = os.path.expanduser(sys.argv[1])
if not os.access(movie, os.R_OK):
print('Error: %s file not readable' % movie)
sys.exit(1)
instance = Instance("--sub-source marq")
try:
media = instance.media_new(movie)
except NameError:
print('NameError: %s (%s vs LibVLC %s)' % (sys.exc_info()[1],
__version__,
libvlc_get_version()))
sys.exit(1)
player = instance.media_player_new()
player.set_media(media)
player.play()
# Some marquee examples. Marquee requires '--sub-source marq' in the
# Instance() call above. See <http://www.videolan.org/doc/play-howto/en/ch04.html>
player.video_set_marquee_int(VideoMarqueeOption.Enable, 1)
player.video_set_marquee_int(VideoMarqueeOption.Size, 24) # pixels
player.video_set_marquee_int(VideoMarqueeOption.Position, Position.Bottom)
if False: # only one marquee can be specified
player.video_set_marquee_int(VideoMarqueeOption.Timeout, 5000) # millisec, 0==forever
t = media.get_mrl() # movie
else: # update marquee text periodically
player.video_set_marquee_int(VideoMarqueeOption.Timeout, 0) # millisec, 0==forever
player.video_set_marquee_int(VideoMarqueeOption.Refresh, 1000) # millisec (or sec?)
##t = '$L / $D or $P at $T'
t = '%Y-%m-%d %H:%M:%S'
player.video_set_marquee_string(VideoMarqueeOption.Text, str_to_bytes(t))
# Some event manager examples. Note, the callback can be any Python
# callable and does not need to be decorated. Optionally, specify
# any number of positional and/or keyword arguments to be passed
# to the callback (in addition to the first one, an Event instance).
event_manager = player.event_manager()
event_manager.event_attach(EventType.MediaPlayerEndReached, end_callback)
event_manager.event_attach(EventType.MediaPlayerPositionChanged, pos_callback, player)
def mspf():
"""Milliseconds per frame."""
return int(1000 // (player.get_fps() or 25))
def print_info():
"""Print information about the media"""
try:
print_version()
media = player.get_media()
print('State: %s' % player.get_state())
print('Media: %s' % bytes_to_str(media.get_mrl()))
print('Track: %s/%s' % (player.video_get_track(), player.video_get_track_count()))
print('Current time: %s/%s' % (player.get_time(), media.get_duration()))
print('Position: %s' % player.get_position())
print('FPS: %s (%d ms)' % (player.get_fps(), mspf()))
print('Rate: %s' % player.get_rate())
print('Video size: %s' % str(player.video_get_size(0))) # num=0
print('Scale: %s' % player.video_get_scale())
print('Aspect ratio: %s' % player.video_get_aspect_ratio())
#print('Window:' % player.get_hwnd()
except Exception:
print('Error: %s' % sys.exc_info()[1])
def sec_forward():
"""Go forward one sec"""
player.set_time(player.get_time() + 1000)
def sec_backward():
"""Go backward one sec"""
player.set_time(player.get_time() - 1000)
def frame_forward():
"""Go forward one frame"""
player.set_time(player.get_time() + mspf())
def frame_backward():
"""Go backward one frame"""
player.set_time(player.get_time() - mspf())
def print_help():
"""Print help"""
print('Single-character commands:')
for k, m in sorted(keybindings.items()):
m = (m.__doc__ or m.__name__).splitlines()[0]
print(' %s: %s.' % (k, m.rstrip('.')))
print('0-9: go to that fraction of the movie')
def quit_app():
"""Stop and exit"""
sys.exit(0)
def toggle_echo_position():
"""Toggle echoing of media position"""
global echo_position
echo_position = not echo_position
keybindings = {
' ': player.pause,
'+': sec_forward,
'-': sec_backward,
'.': frame_forward,
',': frame_backward,
'f': player.toggle_fullscreen,
'i': print_info,
'p': toggle_echo_position,
'q': quit_app,
'?': print_help,
}
print('Press q to quit, ? to get help.%s' % os.linesep)
while True:
k = getch()
print('> %s' % k)
if k in keybindings:
keybindings[k]()
elif k.isdigit():
# jump to fraction of the movie.
player.set_position(float('0.'+k))
else:
print('Usage: %s <movie_filename>' % sys.argv[0])
print('Once launched, type ? for help.')
print('')
print_version()
|
Kraymer/keroaek
|
keroaek/vlc.py
|
Python
|
mit
| 292,112
| 0.005402
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-28 13:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AccountRules',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permissions', models.CharField(choices=[(b'A', b'Administration'), (b'W', b'Read/write'), (b'R', b'Read')], max_length=1)),
],
),
migrations.DeleteModel(
name='InvitationRequest',
),
migrations.AlterModelOptions(
name='account',
options={'ordering': ('create', 'name'), 'verbose_name': 'Account'},
),
migrations.RemoveField(
model_name='account',
name='user',
),
migrations.AlterField(
model_name='account',
name='create',
field=models.DateField(auto_now_add=True, verbose_name='Creation date'),
),
migrations.AddField(
model_name='accountrules',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='accounts.Account'),
),
migrations.AddField(
model_name='accountrules',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='account',
name='users',
field=models.ManyToManyField(related_name='account', through='accounts.AccountRules', to=settings.AUTH_USER_MODEL),
),
]
|
sebastienbarbier/723e_server
|
seven23/models/accounts/migrations/0002_auto_20161128_1335.py
|
Python
|
mit
| 1,974
| 0.003546
|
"""
Test cases for ldaptor.protocols.ldap.ldapserver module.
"""
from twisted.trial import unittest
import sets, base64
from twisted.internet import protocol, address
from twisted.python import components
from ldaptor import inmemory, interfaces, schema, delta, entry
from ldaptor.protocols.ldap import ldapserver, ldapclient, ldaperrors, fetchschema
from ldaptor.protocols import pureldap, pureber
from twisted.test import proto_helpers
from ldaptor.test import util, test_schema
class LDAPServerTest(unittest.TestCase):
def setUp(self):
self.root = inmemory.ReadOnlyInMemoryLDAPEntry(
dn='dc=example,dc=com',
attributes={ 'dc': 'example',
})
self.stuff = self.root.addChild(
rdn='ou=stuff',
attributes={
'objectClass': ['a', 'b'],
'ou': ['stuff'],
})
self.thingie = self.stuff.addChild(
rdn='cn=thingie',
attributes={
'objectClass': ['a', 'b'],
'cn': ['thingie'],
})
self.another = self.stuff.addChild(
rdn='cn=another',
attributes={
'objectClass': ['a', 'b'],
'cn': ['another'],
})
server = ldapserver.LDAPServer()
server.factory = self.root
server.transport = proto_helpers.StringTransport()
server.connectionMade()
self.server = server
def test_bind(self):
self.server.dataReceived(str(pureldap.LDAPMessage(pureldap.LDAPBindRequest(), id=4)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(pureldap.LDAPBindResponse(resultCode=0), id=4)))
def test_bind_success(self):
self.thingie['userPassword'] = ['{SSHA}yVLLj62rFf3kDAbzwEU0zYAVvbWrze8='] # "secret"
self.server.dataReceived(str(pureldap.LDAPMessage(pureldap.LDAPBindRequest(
dn='cn=thingie,ou=stuff,dc=example,dc=com',
auth='secret'), id=4)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPBindResponse(resultCode=0,
matchedDN='cn=thingie,ou=stuff,dc=example,dc=com'),
id=4)))
def test_bind_invalidCredentials_badPassword(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPBindRequest(dn='cn=thingie,ou=stuff,dc=example,dc=com',
auth='invalid'),
id=734)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPBindResponse(
resultCode=ldaperrors.LDAPInvalidCredentials.resultCode),
id=734)))
def test_bind_invalidCredentials_nonExisting(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPBindRequest(dn='cn=non-existing,dc=example,dc=com',
auth='invalid'),
id=78)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPBindResponse(
resultCode=ldaperrors.LDAPInvalidCredentials.resultCode),
id=78)))
def test_bind_badVersion_1_anonymous(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPBindRequest(version=1),
id=32)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPBindResponse(
resultCode=ldaperrors.LDAPProtocolError.resultCode,
errorMessage='Version 1 not supported'),
id=32)))
def test_bind_badVersion_2_anonymous(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPBindRequest(version=2),
id=32)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPBindResponse(
resultCode=ldaperrors.LDAPProtocolError.resultCode,
errorMessage='Version 2 not supported'),
id=32)))
def test_bind_badVersion_4_anonymous(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPBindRequest(version=4),
id=32)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPBindResponse(
resultCode=ldaperrors.LDAPProtocolError.resultCode,
errorMessage='Version 4 not supported'),
id=32)))
def test_bind_badVersion_4_nonExisting(self):
# TODO make a test just like this one that would pass authentication
# if version was correct, to ensure we don't leak that info either.
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPBindRequest(version=4,
dn='cn=non-existing,dc=example,dc=com',
auth='invalid'),
id=11)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPBindResponse(
resultCode=ldaperrors.LDAPProtocolError.resultCode,
errorMessage='Version 4 not supported'),
id=11)))
def test_unbind(self):
self.server.dataReceived(str(pureldap.LDAPMessage(pureldap.LDAPUnbindRequest(), id=7)))
self.assertEquals(self.server.transport.value(),
'')
def test_search_outOfTree(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPSearchRequest(
baseObject='dc=invalid',
), id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultDone(resultCode=ldaperrors.LDAPNoSuchObject.resultCode),
id=2)),
)
def test_search_matchAll_oneResult(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPSearchRequest(
baseObject='cn=thingie,ou=stuff,dc=example,dc=com',
), id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultEntry(
objectName='cn=thingie,ou=stuff,dc=example,dc=com',
attributes=[ ('objectClass', ['a', 'b']),
('cn', ['thingie']),
]),
id=2))
+ str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultDone(resultCode=0),
id=2)),
)
def test_search_matchAll_manyResults(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPSearchRequest(
baseObject='ou=stuff,dc=example,dc=com',
), id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultEntry(
objectName='ou=stuff,dc=example,dc=com',
attributes=[ ('objectClass', ['a', 'b']),
('ou', ['stuff']),
]),
id=2))
+ str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultEntry(
objectName='cn=another,ou=stuff,dc=example,dc=com',
attributes=[ ('objectClass', ['a', 'b']),
('cn', ['another']),
]),
id=2))
+ str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultEntry(
objectName='cn=thingie,ou=stuff,dc=example,dc=com',
attributes=[ ('objectClass', ['a', 'b']),
('cn', ['thingie']),
]),
id=2))
+ str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultDone(resultCode=0),
id=2)),
)
def test_search_scope_oneLevel(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPSearchRequest(
baseObject='ou=stuff,dc=example,dc=com',
scope=pureldap.LDAP_SCOPE_singleLevel,
), id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultEntry(
objectName='cn=thingie,ou=stuff,dc=example,dc=com',
attributes=[ ('objectClass', ['a', 'b']),
('cn', ['thingie']),
]),
id=2))
+ str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultEntry(
objectName='cn=another,ou=stuff,dc=example,dc=com',
attributes=[ ('objectClass', ['a', 'b']),
('cn', ['another']),
]),
id=2))
+ str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultDone(resultCode=0),
id=2)),
)
def test_search_scope_wholeSubtree(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPSearchRequest(
baseObject='ou=stuff,dc=example,dc=com',
scope=pureldap.LDAP_SCOPE_wholeSubtree,
), id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultEntry(
objectName='ou=stuff,dc=example,dc=com',
attributes=[ ('objectClass', ['a', 'b']),
('ou', ['stuff']),
]),
id=2))
+ str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultEntry(
objectName='cn=another,ou=stuff,dc=example,dc=com',
attributes=[ ('objectClass', ['a', 'b']),
('cn', ['another']),
]),
id=2))
+ str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultEntry(
objectName='cn=thingie,ou=stuff,dc=example,dc=com',
attributes=[ ('objectClass', ['a', 'b']),
('cn', ['thingie']),
]),
id=2))
+ str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultDone(resultCode=0),
id=2)),
)
def test_search_scope_baseObject(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPSearchRequest(
baseObject='ou=stuff,dc=example,dc=com',
scope=pureldap.LDAP_SCOPE_baseObject,
), id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultEntry(
objectName='ou=stuff,dc=example,dc=com',
attributes=[ ('objectClass', ['a', 'b']),
('ou', ['stuff']),
]),
id=2))
+ str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultDone(resultCode=0),
id=2)),
)
def test_rootDSE(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPSearchRequest(
baseObject='',
scope=pureldap.LDAP_SCOPE_baseObject,
filter=pureldap.LDAPFilter_present('objectClass'),
), id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultEntry(
objectName='',
attributes=[ ('supportedLDAPVersion', ['3']),
('namingContexts', ['dc=example,dc=com']),
('supportedExtension', [
pureldap.LDAPPasswordModifyRequest.oid,
]),
]),
id=2))
+ str(pureldap.LDAPMessage(
pureldap.LDAPSearchResultDone(resultCode=ldaperrors.Success.resultCode),
id=2)),
)
def test_delete(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPDelRequest(str(self.thingie.dn)), id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPDelResponse(resultCode=0),
id=2)),
)
d = self.stuff.children()
d.addCallback(self.assertEquals, [self.another])
return d
def test_add_success(self):
dn = 'cn=new,ou=stuff,dc=example,dc=com'
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPAddRequest(entry=dn,
attributes=[
(pureldap.LDAPAttributeDescription("objectClass"),
pureber.BERSet(value=[
pureldap.LDAPAttributeValue('something'),
])),
]), id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPAddResponse(
resultCode=ldaperrors.Success.resultCode),
id=2)),
)
# tree changed
d = self.stuff.children()
d.addCallback(self.assertEquals, [
self.thingie,
self.another,
inmemory.ReadOnlyInMemoryLDAPEntry(
'cn=new,ou=stuff,dc=example,dc=com',
{'objectClass': ['something']}),
])
return d
def test_add_fail_existsAlready(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPAddRequest(entry=str(self.thingie.dn),
attributes=[
(pureldap.LDAPAttributeDescription("objectClass"),
pureber.BERSet(value=[
pureldap.LDAPAttributeValue('something'),
])),
]), id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPAddResponse(
resultCode=ldaperrors.LDAPEntryAlreadyExists.resultCode,
errorMessage=str(self.thingie.dn)),
id=2)),
)
# tree did not change
d = self.stuff.children()
d.addCallback(self.assertEquals, [self.thingie, self.another])
return d
def test_modifyDN_rdnOnly_deleteOldRDN_success(self):
newrdn = 'cn=thingamagic'
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPModifyDNRequest(entry=self.thingie.dn,
newrdn=newrdn,
deleteoldrdn=True),
id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPModifyDNResponse(
resultCode=ldaperrors.Success.resultCode),
id=2)),
)
# tree changed
d = self.stuff.children()
d.addCallback(self.assertEquals, [
inmemory.ReadOnlyInMemoryLDAPEntry(
'%s,ou=stuff,dc=example,dc=com' % newrdn,
{'objectClass': ['a', 'b'],
'cn': ['thingamagic']}),
self.another,
])
return d
def test_modifyDN_rdnOnly_noDeleteOldRDN_success(self):
newrdn = 'cn=thingamagic'
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPModifyDNRequest(entry=self.thingie.dn,
newrdn=newrdn,
deleteoldrdn=False),
id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPModifyDNResponse(
resultCode=ldaperrors.Success.resultCode),
id=2)),
)
# tree changed
d = self.stuff.children()
d.addCallback(self.assertEquals, sets.Set([
self.another,
inmemory.ReadOnlyInMemoryLDAPEntry(
'%s,ou=stuff,dc=example,dc=com' % newrdn,
{'objectClass': ['a', 'b'],
'cn': ['thingamagic', 'thingie']}),
]))
return d
test_modifyDN_rdnOnly_noDeleteOldRDN_success.todo = 'Not supported yet.'
def test_modify(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPModifyRequest(self.stuff.dn,
modification=[
delta.Add('foo', ['bar']).asLDAP(),
],
),
id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPModifyResponse(
resultCode=ldaperrors.Success.resultCode),
id=2)),
)
# tree changed
self.assertEquals(
self.stuff,
inmemory.ReadOnlyInMemoryLDAPEntry(
'ou=stuff,dc=example,dc=com',
{'objectClass': ['a', 'b'],
'ou': ['stuff'],
'foo': ['bar']}))
def test_extendedRequest_unknown(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPExtendedRequest(requestName='42.42.42',
requestValue='foo'),
id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPExtendedResponse(
resultCode=ldaperrors.LDAPProtocolError.resultCode,
errorMessage='Unknown extended request: 42.42.42'),
id=2)),
)
def test_passwordModify_notBound(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPPasswordModifyRequest(
userIdentity='cn=thingie,ou=stuff,dc=example,dc=com',
newPasswd='hushhush'),
id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPExtendedResponse(
resultCode=ldaperrors.LDAPStrongAuthRequired.resultCode,
responseName=pureldap.LDAPPasswordModifyRequest.oid),
id=2)),
)
def test_passwordModify_simple(self):
# first bind to some entry
self.thingie['userPassword'] = ['{SSHA}yVLLj62rFf3kDAbzwEU0zYAVvbWrze8='] # "secret"
self.server.dataReceived(str(pureldap.LDAPMessage(pureldap.LDAPBindRequest(
dn='cn=thingie,ou=stuff,dc=example,dc=com',
auth='secret'), id=4)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPBindResponse(resultCode=0,
matchedDN='cn=thingie,ou=stuff,dc=example,dc=com'),
id=4)))
self.server.transport.clear()
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPPasswordModifyRequest(
userIdentity='cn=thingie,ou=stuff,dc=example,dc=com',
newPasswd='hushhush'),
id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPExtendedResponse(
resultCode=ldaperrors.Success.resultCode,
responseName=pureldap.LDAPPasswordModifyRequest.oid),
id=2)),
)
# tree changed
secrets = self.thingie.get('userPassword', [])
self.assertEquals(len(secrets), 1)
for secret in secrets:
self.assertEquals(secret[:len('{SSHA}')], '{SSHA}')
raw = base64.decodestring(secret[len('{SSHA}'):])
salt = raw[20:]
self.assertEquals(entry.sshaDigest('hushhush', salt),
secret)
def test_unknownRequest(self):
# make server miss one of the handle_* attributes
# without having to modify the LDAPServer class
class MockServer(ldapserver.LDAPServer):
handle_LDAPBindRequest = property()
self.server.__class__ = MockServer
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPBindRequest(), id=2)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPExtendedResponse(resultCode=ldaperrors.LDAPProtocolError.resultCode,
responseName='1.3.6.1.4.1.1466.20036',
errorMessage='Unknown request'), id=2)))
def test_control_unknown_critical(self):
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPBindRequest(), id=2,
controls=[('42.42.42.42', True, None),
])))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPBindResponse(
resultCode=ldaperrors.LDAPUnavailableCriticalExtension.resultCode,
errorMessage='Unknown control 42.42.42.42'), id=2)))
def test_control_unknown_nonCritical(self):
self.thingie['userPassword'] = ['{SSHA}yVLLj62rFf3kDAbzwEU0zYAVvbWrze8='] # "secret"
self.server.dataReceived(str(pureldap.LDAPMessage(
pureldap.LDAPBindRequest(dn='cn=thingie,ou=stuff,dc=example,dc=com',
auth='secret'),
controls=[('42.42.42.42', False, None)],
id=4)))
self.assertEquals(self.server.transport.value(),
str(pureldap.LDAPMessage(
pureldap.LDAPBindResponse(resultCode=0,
matchedDN='cn=thingie,ou=stuff,dc=example,dc=com'),
id=4)))
class TestSchema(unittest.TestCase):
def setUp(self):
db = inmemory.ReadOnlyInMemoryLDAPEntry('', {})
com = db.addChild('dc=com',
{'objectClass': ['dcObject'],
'dc': ['com'],
})
com.addChild('dc=example',
{'objectClass': ['dcObject'],
'dc': ['example'],
'subschemaSubentry': ['cn=schema'],
})
db.addChild('cn=schema',
{'objectClass': ['TODO'],
'cn': ['schema'],
'attributeTypes': [test_schema.AttributeType_KnownValues.knownValues[0][0]],
'objectClasses': [test_schema.OBJECTCLASSES['organization'],
test_schema.OBJECTCLASSES['organizationalUnit'],
],
})
class LDAPServerFactory(protocol.ServerFactory):
protocol = ldapserver.LDAPServer
def __init__(self, root):
self.root = root
components.registerAdapter(lambda x: x.root,
LDAPServerFactory,
interfaces.IConnectedLDAPEntry)
serverFactory = LDAPServerFactory(db)
self.client = ldapclient.LDAPClient()
server = serverFactory.buildProtocol(address.IPv4Address('TCP', 'localhost', '1024'))
util.returnConnected(server, self.client)
def testSimple(self):
d = fetchschema.fetch(self.client, 'dc=example,dc=com')
(attributeTypes, objectClasses) = util.pumpingDeferredResult(d)
self.failUnlessEqual([str(x) for x in attributeTypes],
[str(schema.AttributeTypeDescription(x)) for x in [
test_schema.AttributeType_KnownValues.knownValues[0][0],
]])
self.failUnlessEqual([str(x) for x in objectClasses],
[str(schema.ObjectClassDescription(x)) for x in [
test_schema.OBJECTCLASSES['organization'],
test_schema.OBJECTCLASSES['organizationalUnit'],
]])
|
antong/ldaptor
|
ldaptor/test/test_server.py
|
Python
|
lgpl-2.1
| 24,613
| 0.008451
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'logging sinks update' command."""
from googlecloudsdk.api_lib.logging import util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.core import list_printer
from googlecloudsdk.core import log
class Update(base.Command):
"""Updates a sink."""
@staticmethod
def Args(parser):
"""Register flags for this command."""
parser.add_argument(
'sink_name', help='The name of the sink to update.')
parser.add_argument(
'destination', nargs='?',
help=('A new destination for the sink. '
'If omitted, the sink\'s existing destination is unchanged.'))
parser.add_argument(
'--log-filter', required=False,
help=('A new filter expression for the sink. '
'If omitted, the sink\'s existing filter (if any) is unchanged.'))
parser.add_argument(
'--output-version-format', required=False,
help=('Format of the log entries being exported. Detailed information: '
'https://cloud.google.com/logging/docs/api/introduction_v2'),
choices=('V1', 'V2'))
def GetLogSink(self):
"""Returns a log sink specified by the arguments."""
client = self.context['logging_client_v1beta3']
return client.projects_logs_sinks.Get(
self.context['sink_reference'].Request())
def GetLogServiceSink(self):
"""Returns a log service sink specified by the arguments."""
client = self.context['logging_client_v1beta3']
return client.projects_logServices_sinks.Get(
self.context['sink_reference'].Request())
def GetProjectSink(self):
"""Returns a project sink specified by the arguments."""
# Use V2 logging API for project sinks.
client = self.context['logging_client_v2beta1']
messages = self.context['logging_messages_v2beta1']
sink_ref = self.context['sink_reference']
return client.projects_sinks.Get(
messages.LoggingProjectsSinksGetRequest(
projectsId=sink_ref.projectsId, sinksId=sink_ref.sinksId))
def UpdateLogSink(self, sink_data):
"""Updates a log sink specified by the arguments."""
client = self.context['logging_client_v1beta3']
messages = self.context['logging_messages_v1beta3']
sink_ref = self.context['sink_reference']
return client.projects_logs_sinks.Update(
messages.LoggingProjectsLogsSinksUpdateRequest(
projectsId=sink_ref.projectsId, logsId=sink_ref.logsId,
sinksId=sink_data['name'], logSink=messages.LogSink(**sink_data)))
def UpdateLogServiceSink(self, sink_data):
"""Updates a log service sink specified by the arguments."""
client = self.context['logging_client_v1beta3']
messages = self.context['logging_messages_v1beta3']
sink_ref = self.context['sink_reference']
return client.projects_logServices_sinks.Update(
messages.LoggingProjectsLogServicesSinksUpdateRequest(
projectsId=sink_ref.projectsId,
logServicesId=sink_ref.logServicesId, sinksId=sink_data['name'],
logSink=messages.LogSink(**sink_data)))
def UpdateProjectSink(self, sink_data):
"""Updates a project sink specified by the arguments."""
# Use V2 logging API for project sinks.
client = self.context['logging_client_v2beta1']
messages = self.context['logging_messages_v2beta1']
sink_ref = self.context['sink_reference']
# Change string value to enum.
sink_data['outputVersionFormat'] = getattr(
messages.LogSink.OutputVersionFormatValueValuesEnum,
sink_data['outputVersionFormat'])
return client.projects_sinks.Update(
messages.LoggingProjectsSinksUpdateRequest(
projectsId=sink_ref.projectsId, sinksId=sink_data['name'],
logSink=messages.LogSink(**sink_data)))
@util.HandleHttpError
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
The updated sink with its new destination.
"""
util.CheckSinksCommandArguments(args)
# One of the flags is required to update the sink.
# log_filter can be an empty string, so check explicitly for None.
if not (args.destination or args.log_filter is not None or
args.output_version_format):
raise exceptions.ToolException(
'[destination], --log-filter or --output-version-format is required')
# Calling Update on a non-existing sink creates it.
# We need to make sure it exists, otherwise we would create it.
if args.log:
sink = self.GetLogSink()
elif args.service:
sink = self.GetLogServiceSink()
else:
sink = self.GetProjectSink()
# Only update fields that were passed to the command.
if args.destination:
destination = args.destination
else:
destination = sink.destination
if args.log_filter is not None:
log_filter = args.log_filter
else:
log_filter = sink.filter
sink_ref = self.context['sink_reference']
sink_data = {'name': sink_ref.sinksId, 'destination': destination,
'filter': log_filter}
if args.log:
result = util.TypedLogSink(self.UpdateLogSink(sink_data),
log_name=args.log)
elif args.service:
result = util.TypedLogSink(self.UpdateLogServiceSink(sink_data),
service_name=args.service)
else:
if args.output_version_format:
sink_data['outputVersionFormat'] = args.output_version_format
else:
sink_data['outputVersionFormat'] = sink.outputVersionFormat.name
result = util.TypedLogSink(self.UpdateProjectSink(sink_data))
log.UpdatedResource(sink_ref)
return result
def Display(self, unused_args, result):
"""This method is called to print the result of the Run() method.
Args:
unused_args: The arguments that command was run with.
result: The value returned from the Run() method.
"""
list_printer.PrintResourceList('logging.typedSinks', [result])
util.PrintPermissionInstructions(result.destination)
Update.detailed_help = {
'DESCRIPTION': """\
Changes the *[destination]* or *--log-filter* associated with a sink.
If you don't include one of the *--log* or *--log-service* flags,
this command updates a project sink.
The new destination must already exist and Cloud Logging must have
permission to write to it.
Log entries are exported to the new destination immediately.
""",
'EXAMPLES': """\
To only update a project sink filter, run:
$ {command} my-sink --log-filter='metadata.severity>=ERROR'
Detailed information about filters can be found at:
https://cloud.google.com/logging/docs/view/advanced_filters
""",
}
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/surface/logging/sinks/update.py
|
Python
|
bsd-3-clause
| 7,511
| 0.003595
|
#!/usr/bin/env python
# Support a YAML file hosts.yml as external inventory in Ansible
# Copyright (C) 2012 Jeroen Hoekx <jeroen@hoekx.be>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
File format:
- <hostname>
or
- host: <hostname>
vars:
- myvar: value
- myvbr: vblue
groups:
- mygroup1
- mygroup2
or
- group: <groupname>
vars:
- groupvar: value
hosts:
- myhost1
- myhost2
groups:
- subgroup1
- subgroup2
Any statement except the first definition is optional.
"""
import json
import os
import sys
from optparse import OptionParser
import yaml
class Host():
def __init__(self, name):
self.name = name
self.groups = []
self.vars = {}
def __repr__(self):
return "Host('%s')"%(self.name)
def set_variable(self, key, value):
self.vars[key] = value
def get_variables(self):
result = {}
for group in self.groups:
for k,v in group.get_variables().items():
result[k] = v
for k, v in self.vars.items():
result[k] = v
return result
def add_group(self, group):
if group not in self.groups:
self.groups.append(group)
class Group():
def __init__(self, name):
self.name = name
self.hosts = []
self.vars = {}
self.subgroups = []
self.parents = []
def __repr__(self):
return "Group('%s')"%(self.name)
def get_hosts(self):
""" List all hosts in this group, including subgroups """
result = [ host for host in self.hosts ]
for group in self.subgroups:
for host in group.get_hosts():
if host not in result:
result.append(host)
return result
def add_host(self, host):
if host not in self.hosts:
self.hosts.append(host)
host.add_group(self)
def add_subgroup(self, group):
if group not in self.subgroups:
self.subgroups.append(group)
group.add_parent(self)
def add_parent(self, group):
if group not in self.parents:
self.parents.append(group)
def set_variable(self, key, value):
self.vars[key] = value
def get_variables(self):
result = {}
for group in self.parents:
result.update( group.get_variables() )
result.update(self.vars)
return result
def find_group(name, groups):
for group in groups:
if name == group.name:
return group
def parse_vars(vars, obj):
### vars can be a list of dicts or a dictionary
if type(vars) == dict:
for k,v in vars.items():
obj.set_variable(k, v)
elif type(vars) == list:
for var in vars:
k,v = var.items()[0]
obj.set_variable(k, v)
def parse_yaml(yaml_hosts):
groups = []
all_hosts = Group('all')
ungrouped = Group('ungrouped')
groups.append(ungrouped)
### groups first, so hosts can be added to 'ungrouped' if necessary
subgroups = []
for entry in yaml_hosts:
if 'group' in entry and type(entry)==dict:
group = find_group(entry['group'], groups)
if not group:
group = Group(entry['group'])
groups.append(group)
if 'vars' in entry:
parse_vars(entry['vars'], group)
if 'hosts' in entry:
for host_name in entry['hosts']:
host = None
for test_host in all_hosts.get_hosts():
if test_host.name == host_name:
host = test_host
break
else:
host = Host(host_name)
all_hosts.add_host(host)
group.add_host(host)
if 'groups' in entry:
for subgroup in entry['groups']:
subgroups.append((group.name, subgroup))
for name, sub_name in subgroups:
group = find_group(name, groups)
subgroup = find_group(sub_name, groups)
group.add_subgroup(subgroup)
for entry in yaml_hosts:
### a host is either a dict or a single line definition
if type(entry) in [str, unicode]:
for test_host in all_hosts.get_hosts():
if test_host.name == entry:
break
else:
host = Host(entry)
all_hosts.add_host(host)
ungrouped.add_host(host)
elif 'host' in entry:
host = None
no_group = False
for test_host in all_hosts.get_hosts():
### all hosts contains only hosts already in groups
if test_host.name == entry['host']:
host = test_host
break
else:
host = Host(entry['host'])
all_hosts.add_host(host)
no_group = True
if 'vars' in entry:
parse_vars(entry['vars'], host)
if 'groups' in entry:
for test_group in groups:
if test_group.name in entry['groups']:
test_group.add_host(host)
all_hosts.add_host(host)
no_group = False
if no_group:
ungrouped.add_host(host)
return groups, all_hosts
parser = OptionParser()
parser.add_option('-l', '--list', default=False, dest="list_hosts", action="store_true")
parser.add_option('-H', '--host', default=None, dest="host")
parser.add_option('-e', '--extra-vars', default=None, dest="extra")
options, args = parser.parse_args()
base_dir = os.path.dirname(os.path.realpath(__file__))
hosts_file = os.path.join(base_dir, 'hosts.yml')
with open(hosts_file) as f:
yaml_hosts = yaml.safe_load( f.read() )
groups, all_hosts = parse_yaml(yaml_hosts)
if options.list_hosts == True:
result = {}
for group in groups:
result[group.name] = [host.name for host in group.get_hosts()]
print json.dumps(result)
sys.exit(0)
if options.host is not None:
result = {}
host = None
for test_host in all_hosts.get_hosts():
if test_host.name == options.host:
host = test_host
break
result = host.get_variables()
if options.extra:
k,v = options.extra.split("=")
result[k] = v
print json.dumps(result)
sys.exit(0)
parser.print_help()
sys.exit(1)
|
j2sol/ansible
|
plugins/inventory/yaml.py
|
Python
|
gpl-3.0
| 7,145
| 0.003779
|
# Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
from tempest import test
class AggregatesAdminTestJSON(base.BaseV2ComputeAdminTest):
"""
Tests Aggregates API that require admin privileges
"""
_host_key = 'OS-EXT-SRV-ATTR:host'
@classmethod
def setup_clients(cls):
super(AggregatesAdminTestJSON, cls).setup_clients()
cls.client = cls.os_adm.aggregates_client
@classmethod
def resource_setup(cls):
super(AggregatesAdminTestJSON, cls).resource_setup()
cls.aggregate_name_prefix = 'test_aggregate_'
cls.az_name_prefix = 'test_az_'
hosts_all = cls.os_adm.hosts_client.list_hosts()
hosts = map(lambda x: x['host_name'],
filter(lambda y: y['service'] == 'compute', hosts_all))
cls.host = hosts[0]
def _try_delete_aggregate(self, aggregate_id):
# delete aggregate, if it exists
try:
self.client.delete_aggregate(aggregate_id)
# if aggregate not found, it depict it was deleted in the test
except lib_exc.NotFound:
pass
@test.attr(type='gate')
@test.idempotent_id('0d148aa3-d54c-4317-aa8d-42040a475e20')
def test_aggregate_create_delete(self):
# Create and delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self._try_delete_aggregate, aggregate['id'])
self.assertEqual(aggregate_name, aggregate['name'])
self.assertIsNone(aggregate['availability_zone'])
self.client.delete_aggregate(aggregate['id'])
self.client.wait_for_resource_deletion(aggregate['id'])
@test.attr(type='gate')
@test.idempotent_id('5873a6f8-671a-43ff-8838-7ce430bb6d0b')
def test_aggregate_create_delete_with_az(self):
# Create and delete an aggregate.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self._try_delete_aggregate, aggregate['id'])
self.assertEqual(aggregate_name, aggregate['name'])
self.assertEqual(az_name, aggregate['availability_zone'])
self.client.delete_aggregate(aggregate['id'])
self.client.wait_for_resource_deletion(aggregate['id'])
@test.attr(type='gate')
@test.idempotent_id('68089c38-04b1-4758-bdf0-cf0daec4defd')
def test_aggregate_create_verify_entry_in_list(self):
# Create an aggregate and ensure it is listed.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
aggregates = self.client.list_aggregates()
self.assertIn((aggregate['id'], aggregate['availability_zone']),
map(lambda x: (x['id'], x['availability_zone']),
aggregates))
@test.attr(type='gate')
@test.idempotent_id('36ec92ca-7a73-43bc-b920-7531809e8540')
def test_aggregate_create_update_metadata_get_details(self):
# Create an aggregate and ensure its details are returned.
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
body = self.client.get_aggregate(aggregate['id'])
self.assertEqual(aggregate['name'], body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertEqual({}, body["metadata"])
# set the metadata of the aggregate
meta = {"key": "value"}
body = self.client.set_metadata(aggregate['id'], meta)
self.assertEqual(meta, body["metadata"])
# verify the metadata has been set
body = self.client.get_aggregate(aggregate['id'])
self.assertEqual(meta, body["metadata"])
@test.attr(type='gate')
@test.idempotent_id('4d2b2004-40fa-40a1-aab2-66f4dab81beb')
def test_aggregate_create_update_with_az(self):
# Update an aggregate and ensure properties are updated correctly
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.assertEqual(aggregate_name, aggregate['name'])
self.assertEqual(az_name, aggregate['availability_zone'])
self.assertIsNotNone(aggregate['id'])
aggregate_id = aggregate['id']
new_aggregate_name = aggregate_name + '_new'
new_az_name = az_name + '_new'
resp_aggregate = self.client.update_aggregate(aggregate_id,
new_aggregate_name,
new_az_name)
self.assertEqual(new_aggregate_name, resp_aggregate['name'])
self.assertEqual(new_az_name, resp_aggregate['availability_zone'])
aggregates = self.client.list_aggregates()
self.assertIn((aggregate_id, new_aggregate_name, new_az_name),
map(lambda x:
(x['id'], x['name'], x['availability_zone']),
aggregates))
@test.attr(type='gate')
@test.idempotent_id('c8e85064-e79b-4906-9931-c11c24294d02')
def test_aggregate_add_remove_host(self):
# Add an host to the given aggregate and remove.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
body = self.client.add_host(aggregate['id'], self.host)
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertIn(self.host, body['hosts'])
body = self.client.remove_host(aggregate['id'], self.host)
self.assertEqual(aggregate_name, body['name'])
self.assertEqual(aggregate['availability_zone'],
body['availability_zone'])
self.assertNotIn(self.host, body['hosts'])
@test.attr(type='gate')
@test.idempotent_id('7f6a1cc5-2446-4cdb-9baa-b6ae0a919b72')
def test_aggregate_add_host_list(self):
# Add an host to the given aggregate and list.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], self.host)
self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
aggregates = self.client.list_aggregates()
aggs = filter(lambda x: x['id'] == aggregate['id'], aggregates)
self.assertEqual(1, len(aggs))
agg = aggs[0]
self.assertEqual(aggregate_name, agg['name'])
self.assertIsNone(agg['availability_zone'])
self.assertIn(self.host, agg['hosts'])
@test.attr(type='gate')
@test.idempotent_id('eeef473c-7c52-494d-9f09-2ed7fc8fc036')
def test_aggregate_add_host_get_details(self):
# Add an host to the given aggregate and get details.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
aggregate = self.client.create_aggregate(name=aggregate_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], self.host)
self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
body = self.client.get_aggregate(aggregate['id'])
self.assertEqual(aggregate_name, body['name'])
self.assertIsNone(body['availability_zone'])
self.assertIn(self.host, body['hosts'])
@test.attr(type='gate')
@test.idempotent_id('96be03c7-570d-409c-90f8-e4db3c646996')
def test_aggregate_add_host_create_server_with_az(self):
# Add an host to the given aggregate and create a server.
self.useFixture(fixtures.LockFixture('availability_zone'))
aggregate_name = data_utils.rand_name(self.aggregate_name_prefix)
az_name = data_utils.rand_name(self.az_name_prefix)
aggregate = self.client.create_aggregate(
name=aggregate_name, availability_zone=az_name)
self.addCleanup(self.client.delete_aggregate, aggregate['id'])
self.client.add_host(aggregate['id'], self.host)
self.addCleanup(self.client.remove_host, aggregate['id'], self.host)
server_name = data_utils.rand_name('test_server_')
admin_servers_client = self.os_adm.servers_client
server = self.create_test_server(name=server_name,
availability_zone=az_name,
wait_until='ACTIVE')
body = admin_servers_client.get_server(server['id'])
self.assertEqual(self.host, body[self._host_key])
|
rzarzynski/tempest
|
tempest/api/compute/admin/test_aggregates.py
|
Python
|
apache-2.0
| 10,395
| 0
|
import logging
import datetime
import mediacloud.api
import re
from server import mc
from server.auth import is_user_logged_in
from server.util.csv import SOURCE_LIST_CSV_METADATA_PROPS
logger = logging.getLogger(__name__)
TOPIC_MEDIA_INFO_PROPS = ['media_id', 'name', 'url']
TOPIC_MEDIA_PROPS = ['story_count', 'media_inlink_count', 'inlink_count', 'outlink_count',
'facebook_share_count', 'simple_tweet_count']
TOPIC_MEDIA_URL_SHARING_PROPS = ['sum_post_count', 'sum_channel_count', 'sum_author_count']
TOPIC_MEDIA_CSV_PROPS = TOPIC_MEDIA_INFO_PROPS + TOPIC_MEDIA_PROPS + TOPIC_MEDIA_URL_SHARING_PROPS + \
SOURCE_LIST_CSV_METADATA_PROPS
def _parse_media_ids(args):
media_ids = []
if 'sources[]' in args:
src = args['sources[]']
if isinstance(src, str):
media_ids = src.split(',')
media_ids = " ".join([str(m) for m in media_ids])
src = re.sub(r'\[*\]*', '', str(src))
if len(src) == 0:
media_ids = []
media_ids = src.split(',') if len(src) > 0 else []
else:
media_ids = src
return media_ids
def _parse_collection_ids(args):
collection_ids = []
if 'collections[]' in args:
coll = args['collections[]']
if isinstance(coll, str):
tags_ids = coll.split(',')
tags_ids = " ".join([str(m) for m in tags_ids])
coll = re.sub(r'\[*\]*', '', str(tags_ids))
if len(coll) == 0:
collection_ids = []
else:
collection_ids = coll.split(',') # make a list
else:
collection_ids = coll
return collection_ids
# TODO: Migrate eto use mediapicker.concate!
# helper for topic preview queries
def concatenate_query_for_solr(solr_seed_query=None, media_ids=None, tags_ids=None):
query = ''
if solr_seed_query not in [None,'']:
query = '({})'.format(solr_seed_query)
if len(media_ids) > 0 or len(tags_ids) > 0:
if solr_seed_query not in [None,'']:
query += " AND ("
else:
query += "(*) AND ("
# add in the media sources they specified
if len(media_ids) > 0:
media_ids = media_ids.split(',') if isinstance(media_ids, str) else media_ids
query_media_ids = " ".join(map(str, media_ids))
query_media_ids = re.sub(r'\[*\]*', '', str(query_media_ids))
query_media_ids = " media_id:({})".format(query_media_ids)
query += '(' + query_media_ids + ')'
if len(media_ids) > 0 and len(tags_ids) > 0:
query += " OR "
# add in the collections they specified
if len(tags_ids) > 0:
tags_ids = tags_ids.split(',') if isinstance(tags_ids, str) else tags_ids
query_tags_ids = " ".join(map(str, tags_ids))
query_tags_ids = re.sub(r'\[*\]*', '', str(query_tags_ids))
query_tags_ids = " tags_id_media:({})".format(query_tags_ids)
query += '(' + query_tags_ids + ')'
query += ')'
return query
def concatenate_solr_dates(start_date, end_date):
publish_date = mediacloud.api.MediaCloud.dates_as_query_clause(
datetime.datetime.strptime(start_date, '%Y-%m-%d').date(),
datetime.datetime.strptime(end_date, '%Y-%m-%d').date())
return publish_date
|
mitmedialab/MediaCloud-Web-Tools
|
server/views/topics/__init__.py
|
Python
|
apache-2.0
| 3,391
| 0.002359
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslotest import mockpatch
from tempest_lib import exceptions as lib_exc
from tempest.cmd import javelin
from tempest.tests import base
class JavelinUnitTest(base.TestCase):
def setUp(self):
super(JavelinUnitTest, self).setUp()
javelin.setup_logging()
self.fake_client = mock.MagicMock()
self.fake_object = mock.MagicMock()
def test_load_resources(self):
with mock.patch('six.moves.builtins.open', mock.mock_open(),
create=True) as open_mock:
with mock.patch('yaml.load', mock.MagicMock(),
create=True) as load_mock:
javelin.load_resources(self.fake_object)
load_mock.assert_called_once_with(open_mock(self.fake_object))
def test_keystone_admin(self):
self.useFixture(mockpatch.PatchObject(javelin, "OSClient"))
javelin.OPTS = self.fake_object
javelin.keystone_admin()
javelin.OSClient.assert_called_once_with(
self.fake_object.os_username,
self.fake_object.os_password,
self.fake_object.os_tenant_name)
def test_client_for_user(self):
fake_user = mock.MagicMock()
javelin.USERS = {fake_user['name']: fake_user}
self.useFixture(mockpatch.PatchObject(javelin, "OSClient"))
javelin.client_for_user(fake_user['name'])
javelin.OSClient.assert_called_once_with(
fake_user['name'], fake_user['pass'], fake_user['tenant'])
def test_client_for_non_existing_user(self):
fake_non_existing_user = self.fake_object
fake_user = mock.MagicMock()
javelin.USERS = {fake_user['name']: fake_user}
self.useFixture(mockpatch.PatchObject(javelin, "OSClient"))
javelin.client_for_user(fake_non_existing_user['name'])
self.assertFalse(javelin.OSClient.called)
def test_attach_volumes(self):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.useFixture(mockpatch.PatchObject(
javelin, "_get_volume_by_name",
return_value=self.fake_object.volume))
self.useFixture(mockpatch.PatchObject(
javelin, "_get_server_by_name",
return_value=self.fake_object.server))
javelin.attach_volumes([self.fake_object])
mocked_function = self.fake_client.volumes.attach_volume
mocked_function.assert_called_once_with(
self.fake_object.volume['id'],
self.fake_object.server['id'],
self.fake_object['device'])
class TestCreateResources(JavelinUnitTest):
def test_create_tenants(self):
self.fake_client.identity.list_tenants.return_value = {'tenants': []}
self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
return_value=self.fake_client))
javelin.create_tenants([self.fake_object['name']])
mocked_function = self.fake_client.identity.create_tenant
mocked_function.assert_called_once_with(self.fake_object['name'])
def test_create_duplicate_tenant(self):
self.fake_client.identity.list_tenants.return_value = {'tenants': [
{'name': self.fake_object['name']}]}
self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
return_value=self.fake_client))
javelin.create_tenants([self.fake_object['name']])
mocked_function = self.fake_client.identity.create_tenant
self.assertFalse(mocked_function.called)
def test_create_users(self):
self.fake_client.identity.get_tenant_by_name.return_value = \
self.fake_object['tenant']
self.fake_client.identity.get_user_by_username.side_effect = \
lib_exc.NotFound("user is not found")
self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
return_value=self.fake_client))
javelin.create_users([self.fake_object])
fake_tenant_id = self.fake_object['tenant']['id']
fake_email = "%s@%s" % (self.fake_object['user'], fake_tenant_id)
mocked_function = self.fake_client.identity.create_user
mocked_function.assert_called_once_with(self.fake_object['name'],
self.fake_object['password'],
fake_tenant_id,
fake_email,
enabled=True)
def test_create_user_missing_tenant(self):
self.fake_client.identity.get_tenant_by_name.side_effect = \
lib_exc.NotFound("tenant is not found")
self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
return_value=self.fake_client))
javelin.create_users([self.fake_object])
mocked_function = self.fake_client.identity.create_user
self.assertFalse(mocked_function.called)
def test_create_objects(self):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.useFixture(mockpatch.PatchObject(javelin, "_assign_swift_role"))
self.useFixture(mockpatch.PatchObject(javelin, "_file_contents",
return_value=self.fake_object.content))
javelin.create_objects([self.fake_object])
mocked_function = self.fake_client.containers.create_container
mocked_function.assert_called_once_with(self.fake_object['container'])
mocked_function = self.fake_client.objects.create_object
mocked_function.assert_called_once_with(self.fake_object['container'],
self.fake_object['name'],
self.fake_object.content)
def test_create_images(self):
self.fake_client.images.create_image.return_value = \
self.fake_object['body']
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.useFixture(mockpatch.PatchObject(javelin, "_get_image_by_name",
return_value=[]))
self.useFixture(mockpatch.PatchObject(javelin, "_resolve_image",
return_value=(None, None)))
with mock.patch('six.moves.builtins.open', mock.mock_open(),
create=True) as open_mock:
javelin.create_images([self.fake_object])
mocked_function = self.fake_client.images.create_image
mocked_function.assert_called_once_with(self.fake_object['name'],
self.fake_object['format'],
self.fake_object['format'])
mocked_function = self.fake_client.images.store_image_file
fake_image_id = self.fake_object['body'].get('id')
mocked_function.assert_called_once_with(fake_image_id, open_mock())
def test_create_networks(self):
self.fake_client.networks.list_networks.return_value = {
'networks': []}
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
javelin.create_networks([self.fake_object])
mocked_function = self.fake_client.networks.create_network
mocked_function.assert_called_once_with(name=self.fake_object['name'])
def test_create_subnet(self):
fake_network = self.fake_object['network']
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.useFixture(mockpatch.PatchObject(javelin, "_get_resource_by_name",
return_value=fake_network))
fake_netaddr = mock.MagicMock()
self.useFixture(mockpatch.PatchObject(javelin, "netaddr",
return_value=fake_netaddr))
fake_version = javelin.netaddr.IPNetwork().version
javelin.create_subnets([self.fake_object])
mocked_function = self.fake_client.networks.create_subnet
mocked_function.assert_called_once_with(network_id=fake_network['id'],
cidr=self.fake_object['range'],
name=self.fake_object['name'],
ip_version=fake_version)
def test_create_volumes(self):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.useFixture(mockpatch.PatchObject(javelin, "_get_volume_by_name",
return_value=None))
self.fake_client.volumes.create_volume.return_value = \
self.fake_object.body
javelin.create_volumes([self.fake_object])
mocked_function = self.fake_client.volumes.create_volume
mocked_function.assert_called_once_with(
size=self.fake_object['gb'],
display_name=self.fake_object['name'])
mocked_function = self.fake_client.volumes.wait_for_volume_status
mocked_function.assert_called_once_with(
self.fake_object.body['volume']['id'],
'available')
def test_create_volume_existing(self):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.useFixture(mockpatch.PatchObject(javelin, "_get_volume_by_name",
return_value=self.fake_object))
self.fake_client.volumes.create_volume.return_value = \
self.fake_object.body
javelin.create_volumes([self.fake_object])
mocked_function = self.fake_client.volumes.create_volume
self.assertFalse(mocked_function.called)
mocked_function = self.fake_client.volumes.wait_for_volume_status
self.assertFalse(mocked_function.called)
def test_create_router(self):
self.fake_client.networks.list_routers.return_value = {'routers': []}
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
javelin.create_routers([self.fake_object])
mocked_function = self.fake_client.networks.create_router
mocked_function.assert_called_once_with(self.fake_object['name'])
def test_create_router_existing(self):
self.fake_client.networks.list_routers.return_value = {
'routers': [self.fake_object]}
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
javelin.create_routers([self.fake_object])
mocked_function = self.fake_client.networks.create_router
self.assertFalse(mocked_function.called)
def test_create_secgroup(self):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.fake_client.secgroups.list_security_groups.return_value = (
{'security_groups': []})
self.fake_client.secgroups.create_security_group.return_value = \
{'security_group': {'id': self.fake_object['secgroup_id']}}
javelin.create_secgroups([self.fake_object])
mocked_function = self.fake_client.secgroups.create_security_group
mocked_function.assert_called_once_with(
name=self.fake_object['name'],
description=self.fake_object['description'])
class TestDestroyResources(JavelinUnitTest):
def test_destroy_tenants(self):
fake_tenant = self.fake_object['tenant']
fake_auth = self.fake_client
fake_auth.identity.get_tenant_by_name.return_value = fake_tenant
self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
return_value=fake_auth))
javelin.destroy_tenants([fake_tenant])
mocked_function = fake_auth.identity.delete_tenant
mocked_function.assert_called_once_with(fake_tenant['id'])
def test_destroy_users(self):
fake_user = self.fake_object['user']
fake_tenant = self.fake_object['tenant']
fake_auth = self.fake_client
fake_auth.identity.get_tenant_by_name.return_value = fake_tenant
fake_auth.identity.get_user_by_username.return_value = fake_user
self.useFixture(mockpatch.PatchObject(javelin, "keystone_admin",
return_value=fake_auth))
javelin.destroy_users([fake_user])
mocked_function = fake_auth.identity.delete_user
mocked_function.assert_called_once_with(fake_user['id'])
def test_destroy_objects(self):
self.fake_client.objects.delete_object.return_value = \
{'status': "200"}, ""
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
javelin.destroy_objects([self.fake_object])
mocked_function = self.fake_client.objects.delete_object
mocked_function.asswert_called_once(self.fake_object['container'],
self.fake_object['name'])
def test_destroy_images(self):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.useFixture(mockpatch.PatchObject(javelin, "_get_image_by_name",
return_value=self.fake_object['image']))
javelin.destroy_images([self.fake_object])
mocked_function = self.fake_client.images.delete_image
mocked_function.assert_called_once_with(
self.fake_object['image']['id'])
def test_destroy_networks(self):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.useFixture(mockpatch.PatchObject(
javelin, "_get_resource_by_name",
return_value=self.fake_object['resource']))
javelin.destroy_networks([self.fake_object])
mocked_function = self.fake_client.networks.delete_network
mocked_function.assert_called_once_with(
self.fake_object['resource']['id'])
def test_destroy_volumes(self):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.useFixture(mockpatch.PatchObject(
javelin, "_get_volume_by_name",
return_value=self.fake_object.volume))
javelin.destroy_volumes([self.fake_object])
mocked_function = self.fake_client.volumes.detach_volume
mocked_function.assert_called_once_with(self.fake_object.volume['id'])
mocked_function = self.fake_client.volumes.delete_volume
mocked_function.assert_called_once_with(self.fake_object.volume['id'])
def test_destroy_subnets(self):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
fake_subnet_id = self.fake_object['subnet_id']
self.useFixture(mockpatch.PatchObject(javelin, "_get_resource_by_name",
return_value={
'id': fake_subnet_id}))
javelin.destroy_subnets([self.fake_object])
mocked_function = self.fake_client.subnets.delete_subnet
mocked_function.assert_called_once_with(fake_subnet_id)
def test_destroy_routers(self):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
# this function is used on 2 different occasions in the code
def _fake_get_resource_by_name(*args):
if args[1] == "routers":
return {"id": self.fake_object['router_id']}
elif args[1] == "subnets":
return {"id": self.fake_object['subnet_id']}
javelin._get_resource_by_name = _fake_get_resource_by_name
javelin.destroy_routers([self.fake_object])
mocked_function = self.fake_client.networks.delete_router
mocked_function.assert_called_once_with(
self.fake_object['router_id'])
def test_destroy_secgroup(self):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
fake_secgroup = {'id': self.fake_object['id']}
self.useFixture(mockpatch.PatchObject(javelin, "_get_resource_by_name",
return_value=fake_secgroup))
javelin.destroy_secgroups([self.fake_object])
mocked_function = self.fake_client.secgroups.delete_security_group
mocked_function.assert_called_once_with(self.fake_object['id'])
|
izadorozhna/tempest
|
tempest/tests/cmd/test_javelin.py
|
Python
|
apache-2.0
| 18,257
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2018-03-05 05:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0012_sponsor_level_smallint'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='conference',
field=models.SlugField(choices=[('pycontw-2016', 'PyCon Taiwan 2016'), ('pycontw-2017', 'PyCon Taiwan 2017'), ('pycontw-2018', 'PyCon Taiwan 2018')], default='pycontw-2018', verbose_name='conference'),
),
]
|
pycontw/pycontw2016
|
src/sponsors/migrations/0013_auto_20180305_1339.py
|
Python
|
mit
| 624
| 0.001603
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import sys
from functools import partial
from PyQt4.QtGui import QApplication
import threading
# This function was copied from: http://bugs.python.org/issue1230540
# It is necessary because sys.excepthook doesn't work for unhandled exceptions in other threads.
def install_thread_excepthook():
"""
Workaround for sys.excepthook thread bug
(https://sourceforge.net/tracker/?func=detail&atid=105470&aid=1230540&group_id=5470).
Call once from __main__ before creating any threads.
If using psyco, call psycho.cannotcompile(threading.Thread.run)
since this replaces a new-style class method.
"""
import sys
run_old = threading.Thread.run
def run(*args, **kwargs):
try:
run_old(*args, **kwargs)
except (KeyboardInterrupt, SystemExit):
raise
except:
sys.excepthook(*sys.exc_info())
threading.Thread.run = run
#python launch_workflow.py --workflow=PixelClassificationWorkflow --playback_script=$f --playback_speed=2.0 --exit_on_failure --exit_on_success
#sys.argv.append( "/Users/bergs/MyProject.ilp" )
## EXAMPLE PLAYBACK TESTING ARGS
#sys.argv.append( "--playback_script=/Users/bergs/Documents/workspace/ilastik-meta/ilastik/tests/event_based/recording-20130450-2111.py" )
#sys.argv.append( "--playback_speed=3" )
#sys.argv.append( "--exit_on_failure" )
sys.argv.append( "--workflow=PixelClassificationWorkflow" )
import argparse
parser = argparse.ArgumentParser( description="Ilastik Pixel Classification Workflow" )
parser.add_argument('--playback_script', help='An event recording to play back after the main window has opened.', required=False)
parser.add_argument('--playback_speed', help='Speed to play the playback script.', default=0.5, type=float)
parser.add_argument('--exit_on_failure', help='Immediately call exit(1) if an unhandled exception occurs.', action='store_true', default=False)
parser.add_argument('--exit_on_success', help='Quit the app when the playback is complete.', action='store_true', default=False)
parser.add_argument('--project', nargs='?', help='A project file to open on startup.')
parser.add_argument('--workflow', help='A project file to open on startup.')
parsed_args = parser.parse_args()
init_funcs = []
# Start the GUI
if parsed_args.project is not None:
def loadProject(shell):
shell.openProjectFile(parsed_args.project)
init_funcs.append( loadProject )
onfinish = None
if parsed_args.exit_on_success:
onfinish = QApplication.quit
if parsed_args.playback_script is not None:
from ilastik.utility.gui.eventRecorder import EventPlayer
def play_recording(shell):
player = EventPlayer(parsed_args.playback_speed)
player.play_script(parsed_args.playback_script, onfinish)
init_funcs.append( partial(play_recording) )
if parsed_args.exit_on_failure:
old_excepthook = sys.excepthook
def print_exc_and_exit(*args):
old_excepthook(*args)
sys.stderr.write("Exiting early due to an unhandled exception. See error output above.\n")
QApplication.exit(1)
sys.excepthook = print_exc_and_exit
install_thread_excepthook()
# Import all possible workflows so they are registered with the base class
import ilastik.workflows
# Ask the base class to give us the workflow type
from ilastik.workflow import Workflow
workflowClass = Workflow.getSubclass(parsed_args.workflow)
# Launch the GUI
from ilastik.shell.gui.startShellGui import startShellGui
sys.exit( startShellGui( workflowClass, *init_funcs ) )
|
nielsbuwen/ilastik
|
tests/launch_workflow.py
|
Python
|
gpl-3.0
| 4,466
| 0.008509
|
from __future__ import division
import encoder
import socket_class as socket
import threading
import time
import sys
rightC,leftC = (0,0)
s = None
IP = "10.42.0.1"
host = 50679
class sendData(threading.Thread):
def __init__(self,waitTime):
self.waitTime = waitTime
threading.Thread.__init__(self)
def run(self):
#send info every waitTime
global s
global rightC,leftC
conf = "OK"
while True:
if(conf == "OK"):
s.send(str(rightC)+","+str(leftC))
conf = s.recv(10)
print "sent",str(rightC),",",str(leftC)
time.sleep(self.waitTime)
def right():
global rightC
rightC += 1
print "right: ",rightC,"\t","left :",leftC
def left():
global leftC
leftC += 1
print "right: ",rightC,"\t","left :",leftC
def checkArgs():
global IP,host
if(len(sys.argv)!=1):
IP = sys.argv[1]
host = sys.argv[2]
if __name__ == "__main__":
"""if 2 arguments are passed in overwrite IP and port number to those values else use IP = 10.42.0.1 and 50679"""
encoder.encoderSetup()
if len(sys.argv) in (1,3):
checkArgs()
s = socket.initSocket()
while True:
try:
socket.connect(s,IP,host)
break
except:
pass
#start thread to send info in background
t = sendData(.01)
t.daemon = True
t.start()
#read encoder values
encoder.getEncoder(right,left)
else:
encoder.getEncoder(right,left)
|
surajshanbhag/Indoor_SLAM
|
src/control/piControl/encoderRun.py
|
Python
|
gpl-3.0
| 1,623
| 0.021565
|
import numpy as np
import scipy.sparse as sp
from scipy.optimize import fmin_l_bfgs_b
from Orange.classification import Learner, Model
__all__ = ["LinearRegressionLearner"]
class LinearRegressionLearner(Learner):
def __init__(self, lambda_=1.0, preprocessors=None, **fmin_args):
'''L2 regularized linear regression (a.k.a Ridge regression)
This model uses the L-BFGS algorithm to minimize the linear least
squares penalty with L2 regularization. When using this model you
should:
- Choose a suitable regularization parameter lambda_
- Continuize all discrete attributes
- Consider appending a column of ones to the dataset (intercept term)
- Transform the dataset so that the columns are on a similar scale
:param lambda_: the regularization parameter. Higher values of lambda_
force the coefficients to be small.
:type lambda_: float
Examples
--------
import numpy as np
from Orange.data import Table
from Orange.classification.linear_regression import LinearRegressionLearner
data = Table('housing')
data.X = (data.X - np.mean(data.X, axis=0)) / np.std(data.X, axis=0) # normalize
data.X = np.hstack((data.X, np.ones((data.X.shape[0], 1)))) # append ones
m = LinearRegressionLearner(lambda_=1.0)
c = m(data) # fit
print(c(data)) # predict
'''
super().__init__(preprocessors=preprocessors)
self.lambda_ = lambda_
self.fmin_args = fmin_args
def cost_grad(self, theta, X, y):
t = X.dot(theta) - y
cost = t.dot(t)
cost += self.lambda_ * theta.dot(theta)
cost /= 2.0 * X.shape[0]
grad = X.T.dot(t)
grad += self.lambda_ * theta
grad /= X.shape[0]
return cost, grad
def fit(self, X, Y, W):
if Y.shape[1] > 1:
raise ValueError('Linear regression does not support '
'multi-target classification')
if np.isnan(np.sum(X)) or np.isnan(np.sum(Y)):
raise ValueError('Linear regression does not support '
'unknown values')
theta = np.zeros(X.shape[1])
theta, cost, ret = fmin_l_bfgs_b(self.cost_grad, theta,
args=(X, Y.ravel()), **self.fmin_args)
return LinearRegressionModel(theta)
class LinearRegressionModel(Model):
def __init__(self, theta):
self.theta = theta
def predict(self, X):
return X.dot(self.theta)
if __name__ == '__main__':
import Orange.data
import sklearn.cross_validation as skl_cross_validation
np.random.seed(42)
def numerical_grad(f, params, e=1e-4):
grad = np.zeros_like(params)
perturb = np.zeros_like(params)
for i in range(params.size):
perturb[i] = e
j1 = f(params - perturb)
j2 = f(params + perturb)
grad[i] = (j2 - j1) / (2.0 * e)
perturb[i] = 0
return grad
d = Orange.data.Table('housing')
d.X = np.hstack((d.X, np.ones((d.X.shape[0], 1))))
d.shuffle()
# m = LinearRegressionLearner(lambda_=1.0)
# print(m(d)(d))
# # gradient check
# m = LinearRegressionLearner(lambda_=1.0)
# theta = np.random.randn(d.X.shape[1])
#
# ga = m.cost_grad(theta, d.X, d.Y.ravel())[1]
# gm = numerical_grad(lambda t: m.cost_grad(t, d.X, d.Y.ravel())[0], theta)
#
# print(np.sum((ga - gm)**2))
for lambda_ in (0.01, 0.03, 0.1, 0.3, 1, 3):
m = LinearRegressionLearner(lambda_=lambda_)
scores = []
for tr_ind, te_ind in skl_cross_validation.KFold(d.X.shape[0]):
s = np.mean((m(d[tr_ind])(d[te_ind]) - d[te_ind].Y.ravel())**2)
scores.append(s)
print('{:5.2f} {}'.format(lambda_, np.mean(scores)))
m = LinearRegressionLearner(lambda_=0)
print('test data', np.mean((m(d)(d) - d.Y.ravel())**2))
print('majority', np.mean((np.mean(d.Y.ravel()) - d.Y.ravel())**2))
|
jzbontar/orange-tree
|
Orange/classification/linear_regression.py
|
Python
|
gpl-3.0
| 4,108
| 0.000974
|
"""
Test for opmapp application.
"""
# from python
import datetime
# from selenium
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from django.test import LiveServerTestCase
import unittest, time, re
class LoginLogoutTestCase(LiveServerTestCase):
fixtures = ['user-data.json', 'basic-data.json']
@classmethod
def setUpClass(cls):
cls.selenium = WebDriver()
cls.selenium.implicitly_wait(30)
super(LoginLogoutTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(LoginLogoutTestCase, cls).tearDownClass()
def setUp(self):
self.verificationErrors = []
super(LoginLogoutTestCase, self).setUp()
def tearDown(self):
self.assertEqual([], self.verificationErrors)
super(LoginLogoutTestCase, self).tearDown()
def test_login(self):
driver = self.selenium
driver.get(self.live_server_url + "/")
driver.find_element_by_link_text("Login").click()
driver.find_element_by_id("id_username").clear()
driver.find_element_by_id("id_username").send_keys("opmtest")
driver.find_element_by_id("id_password").clear()
driver.find_element_by_id("id_password").send_keys("secretpass")
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
# Warning: assertTextPresent may require manual changes
self.assertRegexpMatches(driver.find_element_by_css_selector("BODY").text, r"^[\s\S]*opmtest[\s\S]*$")
driver.find_element_by_link_text("Logout").click()
# Warning: assertTextPresent may require manual changes
self.assertRegexpMatches(driver.find_element_by_css_selector("BODY").text, r"^[\s\S]*Login[\s\S]*$")
def is_element_present(self, how, what):
try: self.selenium.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def authenticate(username, password, base_url, selenium, client):
"""
Authenticates the selenium driver using the django client driver.
Basically, it passes the sessionid cookie to selenium.
"""
client.login(username="opmtest", password="secretpass")
sess = client.cookies['sessionid']
# Make a first request in order to avoid overlapping of cookies
selenium.get(base_url)
selenium.add_cookie({"name":"sessionid",
"value":sess.value, "path":sess["path"],
"httponly":sess["httponly"], "max-age":sess["max-age"],
"expiry":sess["expires"]})
class BasicTestCase(LiveServerTestCase):
fixtures = ['user-data.json', 'basic-data.json']
@classmethod
def setUpClass(cls):
cls.selenium = WebDriver()
cls.selenium.implicitly_wait(30)
super(BasicTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(BasicTestCase, cls).tearDownClass()
def setUpBasic(self, prop, base_url, entity):
self.property = prop
self.entity = entity
self.base_url = base_url
self.verificationErrors = []
# Login test user
authenticate("opmtest", "secretpass", self.live_server_url+"/",
self.selenium, self.client)
driver = self.selenium
driver.get(self.live_server_url + "/")
driver.find_element_by_xpath("//li/a[text()='"+self.property+"']").click()
driver.get(self.live_server_url + self.base_url)
driver.find_element_by_link_text(self.entity).click()
super(BasicTestCase, self).setUp()
def tearDown(self):
self.assertEqual([], self.verificationErrors)
# Logout test user
driver = self.selenium
driver.find_element_by_link_text("Logout").click()
super(BasicTestCase, self).tearDown()
def check_highlight_property(self):
try: self.assertTrue(self.is_element_present(By.XPATH, "//a[contains(@style,'yellow') and .//text()='"+self.entity+"']"))
except AssertionError as e: self.verificationErrors.append(str(e))
def check_highlight_entity(self):
driver = self.selenium
try: self.assertRegexpMatches(driver.title, r"^[\s\S]*"+self.entity+"[\s\S]*$")
except AssertionError as e: self.verificationErrors.append(str(e))
def is_element_present(self, how, what):
try: self.selenium.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
class TenantTestCase(BasicTestCase):
def setUp(self):
super(TenantTestCase, self).setUpBasic('Broad Ripple Trails', "/tenants/", "Tenants")
def test_highlight_property(self):
super(TenantTestCase, self).check_highlight_property()
def test_highlight_entity(self):
super(TenantTestCase, self).check_highlight_entity()
def test_filter_pos(self):
driver = self.selenium
driver.find_element_by_id("id_last_name").clear()
driver.find_element_by_id("id_last_name").send_keys("obam")
driver.find_element_by_xpath("//input[@value='Filter']").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='content']/table[@id='id_list_view']/tbody/tr/td/a[contains(text(),'Obama')]"))
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_css_selector("input[type=\"button\"]").click()
Select(driver.find_element_by_id("id_unit")).select_by_visible_text("5209 CV")
driver.find_element_by_xpath("//input[@value='Filter']").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='content']/table[@id='id_list_view']/tbody/tr/td/a[contains(text(),'Obama')]"))
except AssertionError as e: self.verificationErrors.append(str(e))
def test_filter_neg(self):
driver = self.selenium
driver.find_element_by_id("id_last_name").clear()
driver.find_element_by_id("id_last_name").send_keys("obamertrte")
driver.find_element_by_xpath("//input[@value='Filter']").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='content']/table[@id='id_list_view']/tbody[count(tr)=0]"))
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_css_selector("input[type=\"button\"]").click()
Select(driver.find_element_by_id("id_unit")).select_by_visible_text("5211 CV")
driver.find_element_by_xpath("//input[@value='Filter']").click()
try: self.assertFalse(self.is_element_present(By.XPATH, "//div[@id='content']/table[@id='id_list_view']/tbody/tr/td/a[contains(text(),'Obama')]"))
except AssertionError as e: self.verificationErrors.append(str(e))
def test_add_pos(self):
driver = self.selenium
driver.find_element_by_id("id_add_item").click()
driver.find_element_by_id("id_first_name").clear()
driver.find_element_by_id("id_first_name").send_keys("Barack")
driver.find_element_by_id("id_last_name").clear()
driver.find_element_by_id("id_last_name").send_keys("Obama")
driver.find_element_by_id("id_start_date").clear()
driver.find_element_by_id("id_start_date").send_keys("2012-12-02")
driver.find_element_by_id("id_end_date").clear()
driver.find_element_by_id("id_end_date").send_keys("2012-12-31")
Select(driver.find_element_by_id("id_unit")).select_by_visible_text("5209 CV")
driver.find_element_by_id("id_permanent_address1").clear()
driver.find_element_by_id("id_permanent_address1").send_keys("1220 Montgomery St.")
driver.find_element_by_id("id_permanent_address2").clear()
driver.find_element_by_id("id_permanent_address2").send_keys("1995 Shattuck St.")
driver.find_element_by_id("id_permanent_city").clear()
driver.find_element_by_id("id_permanent_city").send_keys("San Francisco")
Select(driver.find_element_by_id("id_permanent_state")).select_by_visible_text("California")
driver.find_element_by_id("id_permanent_zip_code").clear()
driver.find_element_by_id("id_permanent_zip_code").send_keys("94112")
driver.find_element_by_id("id_permanent_contact_name").clear()
driver.find_element_by_id("id_permanent_contact_name").send_keys("Bary")
driver.find_element_by_id("id_phone1").clear()
driver.find_element_by_id("id_phone1").send_keys("(415) 344 8992")
driver.find_element_by_id("id_phone2").clear()
driver.find_element_by_id("id_phone2").send_keys("(510) 223-6533")
driver.find_element_by_id("id_email").clear()
driver.find_element_by_id("id_email").send_keys("barack.obama@usa.gov")
driver.find_element_by_id("id_comments").clear()
driver.find_element_by_id("id_comments").send_keys("Nothing else to add")
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='content']/table[@id='id_list_view']/tbody/tr[td/text()='Barack']"))
except AssertionError as e: self.verificationErrors.append(str(e))
def test_add_neg(self):
driver = self.selenium
driver.find_element_by_css_selector("img").click()
driver.find_element_by_id("id_first_name").clear()
driver.find_element_by_id("id_first_name").send_keys("Barack2")
driver.find_element_by_id("id_last_name").clear()
driver.find_element_by_id("id_last_name").send_keys("Obama2")
driver.find_element_by_id("id_start_date").click()
driver.find_element_by_id("id_start_date").clear()
driver.find_element_by_id("id_start_date").send_keys("2013-01-15")
driver.find_element_by_id("id_end_date").clear()
driver.find_element_by_id("id_end_date").send_keys("2013-01-26")
driver.find_element_by_id("id_permanent_address1").clear()
driver.find_element_by_id("id_permanent_address1").send_keys("6666 Wrong St.")
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
self.assertTrue(self.is_element_present(By.XPATH, "//ul[contains(@class, 'errorlist') and .//text() = 'This field is required.']"))
def test_edit_pos(self):
driver = self.selenium
driver.find_element_by_xpath("(//a[contains(text(),'Obama')])").click()
Select(driver.find_element_by_id("id_permanent_state")).select_by_visible_text("Alabama")
driver.find_element_by_id("id_permanent_zip_code").clear()
driver.find_element_by_id("id_permanent_zip_code").send_keys("95788")
driver.find_element_by_id("id_first_name").clear()
driver.find_element_by_id("id_first_name").send_keys("Michelle")
driver.find_element_by_id("id_start_date").click()
driver.find_element_by_id("id_start_date").clear()
driver.find_element_by_id("id_start_date").send_keys("2012-11-25")
driver.find_element_by_id("id_end_date").clear()
driver.find_element_by_id("id_end_date").send_keys("2012-11-30")
driver.find_element_by_xpath("//div[@id='content']/form/table/tbody/tr[4]/td").click()
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='content']/table[2]/tbody/tr/td[text()='Michelle']"))
def test_edit_neg(self):
driver = self.selenium
driver.find_element_by_xpath("(//a[contains(text(),'Obama')])").click()
Select(driver.find_element_by_id("id_unit")).select_by_visible_text("---------")
driver.find_element_by_css_selector("input[type=\"submit\"]").click()
self.assertTrue(self.is_element_present(By.XPATH, "//tr[contains(./th/label/text(), 'Unit')]/td/ul[contains(@class, 'errorlist') and .//text() = 'This field is required.']"))
class CreditMemoTestCase(BasicTestCase):
def setUp(self):
super(CreditMemoTestCase, self).setUpBasic('Broad Ripple Trails', "/credit_memos/", "Credit Memos")
def test_highlight_property(self):
super(CreditMemoTestCase, self).check_highlight_property()
def test_highlight_entity(self):
super(CreditMemoTestCase, self).check_highlight_entity()
def test_filter_pos(self):
driver = self.selenium
driver.find_element_by_id("id_id").clear()
driver.find_element_by_id("id_id").send_keys("62")
driver.find_element_by_id("id_filter_button").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='content']/table[@id='id_list_view']/tbody/tr/td/a[contains(text(),'623')]"))
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_id("id_reset_button").click()
Select(driver.find_element_by_id("id_unit")).select_by_visible_text("5209 CV")
driver.find_element_by_id("id_filter_button").click()
try: self.assertTrue(self.is_element_present(By.XPATH,
"//div[@id='content']/table[@id='id_list_view']/tbody/tr/td[contains(text(),'5209 CV')]"))
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_id("id_reset_button").click()
driver.find_element_by_id("id_date_from").send_keys("2012-10-01")
driver.find_element_by_id("id_date_to").send_keys("2012-10-31")
driver.find_element_by_id("id_filter_button").click()
creditss = driver.find_elements_by_xpath("//div[@id='content']/table[@id='id_list_view']/tbody/tr")
date_from = datetime.datetime.strptime("2012-10-01", "%Y-%m-%d")
date_to = datetime.datetime.strptime("2012-10-31", "%Y-%m-%d")
for c in creditss:
date = datetime.datetime.strptime(c.find_element_by_xpath("td[3]").text, "%b. %d, %Y")
self.assertGreaterEqual(date, date_from)
self.assertLessEqual(date, date_to)
driver.find_element_by_id("id_reset_button").click()
driver.find_element_by_id("id_check").send_keys("123")
driver.find_element_by_id("id_filter_button").click()
try: self.assertTrue(self.is_element_present(By.XPATH,
"//div[@id='content']/table[@id='id_list_view']/tbody/tr/td[contains(text(),'1234567890')]"))
except AssertionError as e: self.verificationErrors.append(str(e))
def test_filter_neg(self):
driver = self.selenium
driver.find_element_by_id("id_id").clear()
driver.find_element_by_id("id_id").send_keys("62434543535533")
driver.find_element_by_id("id_filter_button").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='content']/table[@id='id_list_view']/tbody[count(tr)=0]"))
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_id("id_reset_button").click()
Select(driver.find_element_by_id("id_unit")).select_by_visible_text("5211 CV")
driver.find_element_by_id("id_filter_button").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='content']/table[@id='id_list_view']/tbody[count(tr)=0]"))
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_id("id_reset_button").click()
driver.find_element_by_id("id_date_from").send_keys("3012-10-01")
# driver.find_element_by_id("id_date_to").send_keys("2012-10-31")
driver.find_element_by_id("id_filter_button").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='content']/table[@id='id_list_view']/tbody[count(tr)=0]"))
except AssertionError as e: self.verificationErrors.append(str(e))
driver.find_element_by_id("id_reset_button").click()
driver.find_element_by_id("id_check").send_keys("20429dfgsdgfgs44g4g3g3443g4309")
driver.find_element_by_id("id_filter_button").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='content']/table[@id='id_list_view']/tbody[count(tr)=0]"))
except AssertionError as e: self.verificationErrors.append(str(e))
def test_add_pos(self):
driver = self.selenium
driver.find_element_by_css_selector("img").click()
driver.find_element_by_id("id_date").clear()
driver.find_element_by_id("id_date").send_keys("2012-10-26")
Select(driver.find_element_by_id("id_unit")).select_by_visible_text("5211 CV")
driver.find_element_by_id("id_memo").clear()
driver.find_element_by_id("id_memo").send_keys("This is a test.")
driver.find_element_by_id("id_check").clear()
driver.find_element_by_id("id_check").send_keys("304030393220")
driver.find_element_by_id("id_amount").clear()
driver.find_element_by_id("id_amount").send_keys("22333")
Select(driver.find_element_by_id("id_type")).select_by_visible_text("Payment")
driver.find_element_by_id("id_description").clear()
driver.find_element_by_id("id_description").send_keys("This is a positive case.")
driver.find_element_by_name("savebutt").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//div[@id='content']/table[@id='id_list_view']/tbody/tr[.//text() = '304030393220']"))
except AssertionError as e: self.verificationErrors.append(str(e))
def test_saveadd(self):
driver = self.selenium
driver.find_element_by_id("id_add_item").click()
driver.find_element_by_id("id_date").clear()
driver.find_element_by_id("id_date").send_keys("2012-10-26")
Select(driver.find_element_by_id("id_unit")).select_by_visible_text("5211 CV")
driver.find_element_by_id("id_memo").clear()
driver.find_element_by_id("id_memo").send_keys("This is a test.")
driver.find_element_by_id("id_check").clear()
driver.find_element_by_id("id_check").send_keys("304030393220")
driver.find_element_by_id("id_amount").clear()
driver.find_element_by_id("id_amount").send_keys("22333")
Select(driver.find_element_by_id("id_type")).select_by_visible_text("Payment")
driver.find_element_by_id("id_description").clear()
driver.find_element_by_id("id_description").send_keys("This is a positive case.")
driver.find_element_by_name("saveadd").click()
self.assertEqual(self.live_server_url+"/credit_memos/add/", driver.current_url)
def test_add_neg(self):
driver = self.selenium
driver.find_element_by_id("id_add_item").click()
driver.find_element_by_id("id_date").clear()
driver.find_element_by_id("id_date").send_keys("2012-10-26")
driver.find_element_by_id("id_memo").clear()
driver.find_element_by_id("id_memo").send_keys("This is a test.")
driver.find_element_by_id("id_check").clear()
driver.find_element_by_id("id_check").send_keys("304030393220")
driver.find_element_by_id("id_amount").clear()
driver.find_element_by_id("id_amount").send_keys("22333")
Select(driver.find_element_by_id("id_type")).select_by_visible_text("Payment")
driver.find_element_by_id("id_description").clear()
driver.find_element_by_id("id_description").send_keys("This is a negative case.")
driver.find_element_by_name("savebutt").click()
self.assertTrue(self.is_element_present(By.XPATH, "//tr[th[contains(.//text(),'Unit')]]/td/ul[contains(@class, 'errorlist') and .//text() = 'This field is required.'] "))
def test_unit_to_tenant(self):
driver = self.selenium
driver.find_element_by_id("id_add_item").click()
Select(driver.find_element_by_id("id_unit")).select_by_visible_text("5209 CV")
for i in range(60):
try:
if self.is_element_present(By.XPATH,
"//select[@id='id_tenant']/option[text()='Obama Michelle']"): break
except: pass
time.sleep(1)
else: self.fail("time out")
def test_tenant_to_unit(self):
driver = self.selenium
driver.find_element_by_id("id_add_item").click()
Select(driver.find_element_by_id("id_tenant")).select_by_visible_text("Michelle Obama")
for i in range(60):
try:
if self.is_element_present(By.XPATH,
"//select[@id='id_unit']/option[text()='5209 CV' and @selected='selected']"): break
except: pass
time.sleep(1)
else: self.fail("time out")
class MaintenanceMemoTestCase(BasicTestCase):
def setUp(self):
super(MaintenanceMemoTestCase, self).setUpBasic('Broad Ripple Trails', "/maintenance_memos/", "Maintenance Memos")
def test_highlight_property(self):
super(MaintenanceMemoTestCase, self).check_highlight_property()
def test_highlight_entity(self):
super(MaintenanceMemoTestCase, self).check_highlight_entity()
def test_tenant_to_unit(self):
"""
The test checks if appears the relate unit and the phone when clicking to a requested by tenant
"""
driver = self.selenium
driver.find_element_by_id("id_add_item").click()
Select(driver.find_element_by_id("id_requested_by")).select_by_visible_text("Obama Michelle")
for i in range(60):
try:
if self.is_element_present(By.XPATH,
"//select[@id='id_unit']/option[text()='5209 CV' and @selected='selected']"): break
except: pass
time.sleep(1)
else: self.fail("time out: couldn't find the unit")
for i in range(60):
try:
if self.is_element_present(By.XPATH, "//tr[contains(th/text(),'Requested')]/td/label[@id='id_phone' and text()='(415) 344 8992']"): break
except: pass
time.sleep(1)
else: self.fail("time out: couldn't find the phone")
def test_unit_to_tenant(self):
"""
The test checks whether appears the requested by when clicking on a particular unit id.
"""
driver = self.selenium
driver.find_element_by_id("id_add_item").click()
Select(driver.find_element_by_id("id_unit")).select_by_visible_text("5209 CV")
for i in range(60):
try:
if self.is_element_present(By.XPATH,
"//select[@id='id_requested_by']/option[text()='Obama Michelle']"): break
except: pass
time.sleep(1)
else: self.fail("time out")
class UnitTestCase(BasicTestCase):
def setUp(self):
super(UnitTestCase, self).setUpBasic('Broad Ripple Trails', "/units/post_late_fee/", "Units")
def test_pos_post_late_fee(self):
driver = self.selenium
driver.get(self.live_server_url + "/units/post_late_fee/")
driver.find_element_by_id("id_cutoff").clear()
driver.find_element_by_id("id_cutoff").send_keys("122")
driver.find_element_by_id("id_submit_cutoff").click()
try: self.assertTrue(self.is_element_present(By.XPATH, "//input[@id='id_cutoff' and contains(@value, '122')]"))
except AssertionError as e: self.verificationErrors.append(str(e))
units = driver.find_elements_by_xpath("//table[@id='id_list']/tbody/tr")
for c in units:
pp = float(c.find_element_by_xpath("td[3]").text.replace('$', ''))
inp = c.find_element_by_xpath("td[1]/input")
res = inp.get_attribute('checked')
if pp > 122.0:
self.assertTrue(res)
else:
self.assertEqual(res, None)
|
clifforloff/opmservice
|
opmapp/tests.py
|
Python
|
gpl-2.0
| 24,082
| 0.008139
|
# -*- coding: utf-8 -*-
#
# sample documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 16 21:22:43 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sample'
copyright = u'2012, Kenneth Reitz'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v0.0.1'
# The full version, including alpha/beta/rc tags.
release = 'v0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sampledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sample.tex', u'sample Documentation',
u'Kenneth Reitz', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sample', u'sample Documentation',
[u'Kenneth Reitz'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sample', u'sample Documentation',
u'Kenneth Reitz', 'sample', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
igorgue/zeromqlogs
|
docs/conf.py
|
Python
|
mit
| 7,723
| 0.00764
|
#!/usr/bin/env python
import gtk
#import NoteBuffer
import notemeister
class Note:
def __init__(self, path=None, title='', body='', link='', wrap="1"):
self.path = path
self.title = title
self.body = body
self.link = link
self.wrap = wrap
self.buffer = notemeister.NoteBuffer.NoteBuffer()
self.buffer.set_text(self.body)
def __str__(self):
return '(%d) Note "%s" has body: %s' % (self.index, self.title, self.body)
|
robotii/notemeister
|
src/lib/Note.py
|
Python
|
gpl-2.0
| 439
| 0.029613
|
#!/usr/bin/env python
import setuptools
# Hack to prevent stupid TypeError: 'NoneType' object is not callable error on
# exit of python setup.py test # in multiprocessing/util.py _exit_function when
# running python setup.py test (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
try:
import multiprocessing
assert multiprocessing
except ImportError:
pass
setuptools.setup(
name='orwell.agent',
version='0.0.1',
description='Agent connecting to the game server.',
author='',
author_email='',
packages=setuptools.find_packages(exclude="test"),
test_suite='nose.collector',
install_requires=['pyzmq', 'cliff'],
tests_require=['nose', 'coverage', 'mock'],
entry_points={
'console_scripts': [
'thought_police = orwell.agent.main:main',
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: POSIX :: Linux',
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6'],
python_requires='>=3.6.0',
)
|
orwell-int/agent-server-game-python
|
setup.py
|
Python
|
bsd-3-clause
| 1,239
| 0.000807
|
#!/usr/bin/env python3
"""
py_fanos_test.py: Tests for py_fanos.py
"""
import socket
import sys
import unittest
import py_fanos # module under test
class FanosTest(unittest.TestCase):
def testSendReceive(self):
left, right = socket.socketpair()
py_fanos.send(left, b'foo')
fd_out = []
msg = py_fanos.recv(right, fd_out=fd_out)
self.assertEqual(b'foo', msg)
self.assertEqual([], fd_out)
py_fanos.send(left, b'spam', [sys.stdin.fileno(), sys.stdout.fileno(), sys.stderr.fileno()])
msg = py_fanos.recv(right, fd_out=fd_out)
self.assertEqual(b'spam', msg)
self.assertEqual(3, len(fd_out))
print(fd_out)
left.close()
msg = py_fanos.recv(right)
self.assertEqual(None, msg) # Valid EOF
right.close()
class InvalidMessageTests(unittest.TestCase):
"""COPIED to native/fanos_test.py."""
def testInvalidColon(self):
left, right = socket.socketpair()
left.send(b':') # Should be 3:foo,
try:
msg = py_fanos.recv(right)
except ValueError as e:
print(type(e))
print(e)
else:
self.fail('Expected failure')
left.close()
right.close()
def testInvalidDigits(self):
left, right = socket.socketpair()
left.send(b'34') # EOF in the middle of length
left.close()
try:
msg = py_fanos.recv(right)
except ValueError as e:
print(type(e))
print(e)
else:
self.fail('Expected failure')
right.close()
def testInvalidMissingColon(self):
left, right = socket.socketpair()
left.send(b'34foo') # missing colon
left.close()
try:
msg = py_fanos.recv(right)
except ValueError as e:
print(type(e))
print(e)
else:
self.fail('Expected failure')
right.close()
def testInvalidMissingComma(self):
left, right = socket.socketpair()
# Short payload BLOCKS indefinitely?
#left.send(b'3:fo')
left.send(b'3:foo') # missing comma
left.close()
try:
msg = py_fanos.recv(right)
except ValueError as e:
print(type(e))
print(e)
else:
self.fail('Expected failure')
right.close()
if __name__ == '__main__':
unittest.main()
|
oilshell/blog-code
|
fd-passing/py_fanos_test.py
|
Python
|
apache-2.0
| 2,191
| 0.01141
|
# Copyright 2012 NetApp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Interface for shares extension."""
try:
from urllib import urlencode # noqa
except ImportError:
from urllib.parse import urlencode # noqa
from manilaclient import api_versions
from manilaclient import base
from manilaclient.common import constants
from manilaclient.openstack.common.apiclient import base as common_base
class ShareSnapshot(common_base.Resource):
"""Represent a snapshot of a share."""
def __repr__(self):
return "<ShareSnapshot: %s>" % self.id
def update(self, **kwargs):
"""Update this snapshot."""
self.manager.update(self, **kwargs)
def reset_state(self, state):
"""Update the snapshot with the privided state."""
self.manager.reset_state(self, state)
def delete(self):
"""Delete this snapshot."""
self.manager.delete(self)
def force_delete(self):
"""Delete the specified snapshot ignoring its current state."""
self.manager.force_delete(self)
class ShareSnapshotManager(base.ManagerWithFind):
"""Manage :class:`ShareSnapshot` resources."""
resource_class = ShareSnapshot
def create(self, share, force=False, name=None, description=None):
"""Create a snapshot of the given share.
:param share_id: The ID of the share to snapshot.
:param force: If force is True, create a snapshot even if the
share is busy. Default is False.
:param name: Name of the snapshot
:param description: Description of the snapshot
:rtype: :class:`ShareSnapshot`
"""
body = {'snapshot': {'share_id': common_base.getid(share),
'force': force,
'name': name,
'description': description}}
return self._create('/snapshots', body, 'snapshot')
def get(self, snapshot):
"""Get a snapshot.
:param snapshot: The :class:`ShareSnapshot` instance or string with ID
of snapshot to delete.
:rtype: :class:`ShareSnapshot`
"""
snapshot_id = common_base.getid(snapshot)
return self._get('/snapshots/%s' % snapshot_id, 'snapshot')
def list(self, detailed=True, search_opts=None, sort_key=None,
sort_dir=None):
"""Get a list of snapshots of shares.
:param search_opts: Search options to filter out shares.
:param sort_key: Key to be sorted.
:param sort_dir: Sort direction, should be 'desc' or 'asc'.
:rtype: list of :class:`ShareSnapshot`
"""
if search_opts is None:
search_opts = {}
if sort_key is not None:
if sort_key in constants.SNAPSHOT_SORT_KEY_VALUES:
search_opts['sort_key'] = sort_key
else:
raise ValueError(
'sort_key must be one of the following: %s.'
% ', '.join(constants.SNAPSHOT_SORT_KEY_VALUES))
if sort_dir is not None:
if sort_dir in constants.SORT_DIR_VALUES:
search_opts['sort_dir'] = sort_dir
else:
raise ValueError(
'sort_dir must be one of the following: %s.'
% ', '.join(constants.SORT_DIR_VALUES))
if search_opts:
query_string = urlencode(
sorted([(k, v) for (k, v) in list(search_opts.items()) if v]))
if query_string:
query_string = "?%s" % (query_string,)
else:
query_string = ''
if detailed:
path = "/snapshots/detail%s" % (query_string,)
else:
path = "/snapshots%s" % (query_string,)
return self._list(path, 'snapshots')
def delete(self, snapshot):
"""Delete a snapshot of a share.
:param snapshot: The :class:`ShareSnapshot` to delete.
"""
self._delete("/snapshots/%s" % common_base.getid(snapshot))
def _do_force_delete(self, snapshot, action_name="force_delete"):
"""Delete the specified snapshot ignoring its current state."""
return self._action(action_name, common_base.getid(snapshot))
@api_versions.wraps("1.0", "2.6")
def force_delete(self, snapshot):
return self._do_force_delete(snapshot, "os-force_delete")
@api_versions.wraps("2.7") # noqa
def force_delete(self, snapshot):
return self._do_force_delete(snapshot, "force_delete")
def update(self, snapshot, **kwargs):
"""Update a snapshot.
:param snapshot: The :class:`ShareSnapshot` instance or string with ID
of snapshot to delete.
:rtype: :class:`ShareSnapshot`
"""
if not kwargs:
return
body = {'snapshot': kwargs, }
snapshot_id = common_base.getid(snapshot)
return self._update("/snapshots/%s" % snapshot_id, body)
def _do_reset_state(self, snapshot, state, action_name="reset_status"):
"""Update the specified share snapshot with the provided state."""
return self._action(action_name, snapshot, {"status": state})
@api_versions.wraps("1.0", "2.6")
def reset_state(self, snapshot, state):
return self._do_reset_state(snapshot, state, "os-reset_status")
@api_versions.wraps("2.7") # noqa
def reset_state(self, snapshot, state):
return self._do_reset_state(snapshot, state, "reset_status")
def _action(self, action, snapshot, info=None, **kwargs):
"""Perform a snapshot 'action'."""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/snapshots/%s/action' % common_base.getid(snapshot)
return self.api.client.post(url, body=body)
|
sniperganso/python-manilaclient
|
manilaclient/v2/share_snapshots.py
|
Python
|
apache-2.0
| 6,363
| 0
|
#!/usr/bin/env python
"""Run pytest with coverage and generate an html report."""
from sys import argv
from os import system as run
# To run a specific file with debug logging prints:
# py -3 -m pytest test_can.py --log-cli-format="%(asctime)s.%(msecs)d %(levelname)s: %(message)s (%(filename)s:%(lineno)d)" --log-cli-level=debug
def main(): # noqa
run_str = 'python -m coverage run --include={} --omit=./* -m pytest {} {}'
arg = ''
# All source files included in coverage
includes = '../*'
if len(argv) >= 2:
arg = argv[1]
if ':' in argv[1]:
includes = argv[1].split('::')[0]
other_args = ' '.join(argv[2:])
run(run_str.format(includes, arg, other_args))
# Generate the html coverage report and ignore errors
run('python -m coverage html -i')
if __name__ == '__main__':
main()
|
cmcerove/pyvxl
|
pyvxl/tests/run.py
|
Python
|
mit
| 884
| 0.002262
|
from uuid import uuid4, UUID
from behave import given, when, then
from formencode import Invalid, validators
@given("I made a Device linking request")
@given("I have made a Device linking request")
@when("I make a Device linking request")
def make_device_linking_request(context):
current_directory = context.entity_manager.get_current_directory()
context.directory_device_manager.create_linking_request(
user_identifier=str(uuid4()),
directory_id=current_directory.id
)
@then("the Device linking response contains a valid QR Code URL")
def linking_response_contains_valid_qr_code_url(context):
try:
validators.URL().to_python(
context.entity_manager.get_current_linking_response().qrcode
)
except Invalid as e:
raise Exception("Could not parse QR Code as URL: %s" % e)
@then("the Device linking response contains a valid Linking Code")
def linking_response_contains_valid_linking_code(context):
code = context.entity_manager.get_current_linking_response().code
if not code:
raise Exception("Linking code was not valid: %s" % code)
@then("the Device linking response contains a valid Device ID")
def linking_response_contains_valid_linking_code(context):
device_id = context.entity_manager.get_current_linking_response().device_id
try:
if not device_id:
raise ValueError
UUID(device_id)
except ValueError:
raise Exception("Device ID was not valid: %s" % device_id)
@given("I retrieve the Devices list for the current User")
@when("I retrieve the Devices list for the current User")
def retrieve_devices_list_for_current_user(context):
current_directory = context.entity_manager.get_current_directory()
current_user_identifier = context.directory_device_manager.\
current_user_identifier
context.directory_device_manager.retrieve_user_devices(
current_user_identifier, current_directory.id)
@when("I retrieve the Devices list for the user \"{user_identifier}\"")
def retrieve_devices_list_for_current_user(context, user_identifier):
current_directory = context.entity_manager.get_current_directory()
context.directory_device_manager.retrieve_user_devices(
user_identifier,
current_directory.id
)
@then("the Device List has {count:d} Device")
@then("the Device List has {count:d} Devices")
@then("there should be {count:d} Device in the Devices list")
@then("there should be {count:d} Devices in the Devices list")
def verify_device_list_count(context, count):
current_device_list = context.entity_manager.get_current_device_list()
if current_device_list is None or len(current_device_list) != count:
raise Exception("Device list length length is not %s: %s" % (
count, current_device_list))
@then("all of the devices should be inactive")
def verify_device_list_count(context):
current_device_list = context.entity_manager.get_current_device_list()
for device in current_device_list:
if device.status.is_active:
raise Exception("Device was active: %s" % device)
@then("all of the devices should be active")
def verify_device_list_count(context):
current_device_list = context.entity_manager.get_current_device_list()
for device in current_device_list:
if not device.status.is_active:
raise Exception("Device was not active: %s" % device)
@when("I unlink the Device with the ID \"{device_id}\"")
def unlink_device_with_id(context, device_id):
current_directory = context.entity_manager.get_current_directory()
current_user_identifier = context.directory_device_manager. \
current_user_identifier
context.directory_device_manager.unlink_device(
device_id,
current_user_identifier,
current_directory.id
)
@when("I unlink the current Device")
def unlink_current_device(context):
current_directory = context.entity_manager.get_current_directory()
current_user_identifier = context.directory_device_manager. \
current_user_identifier
current_device = context.entity_manager.get_current_device()
context.directory_device_manager.unlink_device(
current_device.id,
current_user_identifier,
current_directory.id
)
@when("I attempt to unlink the device with the ID \"{device_id}\"")
def attempt_to_unlink_device_with_id(context, device_id):
current_directory = context.entity_manager.get_current_directory()
current_user_identifier = context.directory_device_manager. \
current_user_identifier
try:
context.directory_device_manager.unlink_device(
device_id,
current_user_identifier,
current_directory.id
)
except Exception as e:
context.current_exception = e
@when("I attempt to unlink the device from the User Identifier "
"\"{user_identifier}\"")
def attempt_to_unlink_user_identifier_device(context, user_identifier):
current_directory = context.entity_manager.get_current_directory()
try:
context.directory_device_manager.unlink_device(
str(uuid4()),
user_identifier,
current_directory.id
)
except Exception as e:
context.current_exception = e
# Device manager steps
@given("I have a linked device")
def link_device(context):
context.execute_steps(u'''
Given I made a Device linking request
When I link my device
''')
@when("I link my device")
def link_physical_device(context):
sdk_key = context.entity_manager.get_current_directory_sdk_keys()[0]
context.sample_app_device_manager.set_sdk_key(sdk_key)
linking_code = context.entity_manager.get_current_linking_response().code
context.sample_app_device_manager.link_device(linking_code)
# We should now be on the home page if everything succeeded
context.appium_device_manager.get_scrollable_element_by_text("Auth Methods")
@when("I link my physical device with the name \"{device_name}\"")
def link_device_with_name(context, device_name):
sdk_key = context.entity_manager.get_current_directory_sdk_keys()[0]
linking_code = context.entity_manager.get_current_linking_response().code
context.sample_app_device_manager.link_device(linking_code,
device_name=device_name)
@when("I approve the auth request")
def approve_auth_request(context):
context.sample_app_device_manager.approve_request()
@when("I deny the auth request")
def deny_auth_request(context):
context.sample_app_device_manager.deny_request()
@when("I receive the auth request and acknowledge the failure message")
def deny_auth_request(context):
context.sample_app_device_manager.receive_and_acknowledge_auth_failure()
@when("I make a Device linking request with a TTL of {ttl:d} seconds")
def step_impl(context, ttl):
current_directory = context.entity_manager.get_current_directory()
context.directory_device_manager.create_linking_request(
user_identifier=str(uuid4()),
directory_id=current_directory.id,
ttl=ttl
)
|
iovation/launchkey-python
|
features/steps/directory_device_steps.py
|
Python
|
mit
| 7,152
| 0.00028
|
# -*- coding: utf-8 -*-
import sys
from argparse import ArgumentParser
from DatabaseLogin import DatabaseLogin
from GlobalInstaller import GlobalInstaller
from PyQt5 import QtWidgets
from Ui_MainWindow import Ui_MainWindow
# import damit Installer funktioniert. auch wenn diese nicht hier benoetigt werden.
from PyQt5 import QtCore, QtGui
import cx_Oracle
import json
import base64
import urllib
from Crypto.Cipher import AES
from chardet import UniversalDetector
def get_parser():
parser = ArgumentParser()
# Parameter, welche die Gui Initalisierung Regeln.
parser.add_argument('--inst_synonym', action='store_true', default=False,
help=r"Setzt Flag für die Installation von Synonymen.")
parser.add_argument('--inst_sequence', action='store_true', default=False,
help=r"Setzt Flag für die Installation von Sequenzen.")
parser.add_argument('--inst_tab_save', action='store_true', default=False,
help=r"Setzt Flag für die Installation von Tab Save Tabellen.")
parser.add_argument('--inst_tab', action='store_false', default=True,
help=r"Entfernt Flag für die Installation von Tab Tabellen.")
parser.add_argument('--inst_view', action='store_false', default=True,
help=r"Entfernt Flag für die Installation von Views.")
parser.add_argument('--inst_package', action='store_false', default=True,
help=r"Entfernt Flag für die Installation von Packages.")
parser.add_argument('--inst_sql', action='store_false', default=True,
help=r"Entfernt Flag für die Installation von Sqls.")
# Erweiterte Parameter, welche die Gui Initalisierung Regeln.
parser.add_argument('--username', default=r"", help=r"Benutzername der Datenbank Verbindung.")
parser.add_argument('--password', default=r"", help=r"Passwort der Datenbank Verbindung.")
parser.add_argument('--connection', default=r"", help=r"Connection der Datenbank Verbindung.")
parser.add_argument('--svnBasePath', default=r"", help=r"Schreibt Pfad in SVN Basis Pfad.")
parser.add_argument('--svnKndPath', default=r"", help=r"Schreibt Pfad in SVN Kassen Pfad.")
parser.add_argument('--installationPath', default=r"", help=r"Schreibt Pfad in Installation Pfad.")
parser.add_argument('--global_defines_file', default=r"",
help=r"Pfad zu einem TAB seperierten File wo die Defines vordefiniert sind.")
# jsonl_parameters ueberschreibt alle anderen Parameter.
parser.add_argument('--jsonl_parameters', type=str, default=r'',
help=(r"Übergabe von allen Parameter in einem JSONL Format."
"Dieses Format überschreibt alle Parameter."))
# Parameter welche eine blinde Installation ohne Gui zulassen. Dazu muss showGui Paramter zwingend False sein.
parser.add_argument('--hideGui', action='store_true', default=False, help=r"Startet DB Installer ohne GUI.")
parser.add_argument('--clean_installation_path', action='store_true', default=False,
help=r"Führt Aktion Installationspfad Bereinigen durch. Nur in Kombi-nation von Parameter –-hideGui oder --json_file_path.")
parser.add_argument('--copy_all_data_to_installation', action='store_true', default=False,
help=r"Führt Aktion Dateien ab Pfade Laden durch. Nur in Kombination von Parameter -–hideGui oder --json_file_path.")
parser.add_argument('--install_objects', action='store_true', default=False,
help=r"Führt Aktion Objekte installieren durch. Nur in Kombination von Parameter –-hideGui oder --json_file_path.")
parser.add_argument('--json_file_path', default=r"",
help=(r"Übergabe eines Parameter Files in Jsonl Format."
"Zusammen mit den Argumenten für die Aktionen kann damit eine ganze Kette von "
"Arbeiten mit einem einzigen Aufruf erledigt werden. "
"Arbeiten in einem Jsonl File sind immer ohne Gui "
"und schreiben Debug Informationen auf die Konsole."))
return parser
#
# Main Programm. All starts at this point.
#
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
dbLogin = DatabaseLogin(userName=args.username, passWord=args.password, connection=args.connection)
dbLogin.testConnection(printInfo=False)
globalInstaller = GlobalInstaller(dbLogin=dbLogin, svnBasePath=args.svnBasePath, svnKndPath=args.svnKndPath,
installationPath=args.installationPath, flag_synonym=args.inst_synonym,
flag_sequence=args.inst_sequence, flag_tab_save=args.inst_tab_save,
flag_tab=args.inst_tab, flag_view=args.inst_view, flag_package=args.inst_package,
flag_sql=args.inst_sql, global_defines_file=args.global_defines_file,
jsonl_parameters=args.jsonl_parameters
)
if len(args.json_file_path) > 0:
globalInstaller.workJsonlFile(json_file_path=args.json_file_path,
cleanInstallationPath=args.clean_installation_path,
copy_all_data_to_installation=args.copy_all_data_to_installation,
install_objects=args.install_objects)
elif args.hideGui:
# Calls function without gui.
# used in command line only.
if args.clean_installation_path:
globalInstaller.cleanInstallationPath()
if args.copy_all_data_to_installation:
globalInstaller.readInstallationObjectFromPath()
globalInstaller.copyAllData2InstallationPath()
if args.install_objects:
globalInstaller.installAllObjects2Database()
else:
# Default Obption starts Gui
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
ui.connect_user_isgnals()
ui.set_user_variables(globalInstaller=globalInstaller)
MainWindow.show()
sys.exit(app.exec_())
|
handbaggerli/DbInstaller
|
Python/DbInstaller.py
|
Python
|
gpl-3.0
| 6,513
| 0.005236
|
"""
Utility classes and functions to handle Virtual Machine creation using libvirt.
:copyright: 2011 Red Hat Inc.
"""
import time
import string
import os
import logging
import fcntl
import re
import shutil
import tempfile
import platform
import aexpect
from avocado.utils import process
from avocado.utils import crypto
from avocado.core import exceptions
from . import error_context
from . import utils_misc
from . import virt_vm
from . import storage
from . import remote
from . import virsh
from . import libvirt_xml
from . import data_dir
from . import xml_utils
from . import utils_selinux
def normalize_connect_uri(connect_uri):
"""
Processes connect_uri Cartesian into something virsh can use
:param connect_uri: Cartesian Params setting
:return: Normalized connect_uri
"""
if connect_uri == "default":
result = virsh.canonical_uri()
else:
result = virsh.canonical_uri(uri=connect_uri)
if not result:
raise ValueError("Normalizing connect_uri '%s' failed, is libvirt "
"running?" % connect_uri)
return result
def complete_uri(ip_address, protocol=None, port=None):
"""
Return a complete URI with the combination of ip_address and local uri.
It is useful when you need to connect remote hypervisor.
:param ip_address: an ip address or a hostname
:param protocol: protocol for uri eg: tcp, spice etc.
:param port: port for the protocol
:return: a complete uri
"""
if protocol and port:
complete_uri = "%s://%s:%s" % (protocol, ip_address, port)
else:
# Allow to raise CmdError if canonical_uri is failed
uri = virsh.canonical_uri(ignore_status=False)
driver = uri.split(":")[0]
# The libvirtd daemon's mode(system or session on qemu)
daemon_mode = uri.split("/")[-1]
complete_uri = "%s+ssh://%s/%s" % (driver, ip_address, daemon_mode)
return complete_uri
def get_uri_with_transport(uri_type='qemu', transport="", dest_ip=""):
"""
Return a URI to connect driver on dest with a specified transport.
:param origin_uri: The URI on dest used to connect itself directly.
:param transport: The transport type connect to dest.
:param dest_ip: The ip of destination.
"""
_type2uri_ = {'qemu': "qemu:///system",
'qemu_system': "qemu:///system",
'qemu_session': "qemu:///session",
'lxc': "lxc:///",
'xen': "xen:///",
'esx': "esx:///"}
try:
origin_uri = _type2uri_[uri_type]
except KeyError:
raise ValueError("Param uri_type = %s is not supported." % (uri_type))
# For example:
# ("qemu:///system")-->("qemu", "system")
# ("lxc:///")-->("lxc", "")
origin_uri_elems = origin_uri.split(":///")
transport_uri_driver = origin_uri_elems[0]
transport_uri_dest = origin_uri_elems[-1]
if transport:
transport_uri_driver = ("%s+%s" % (transport_uri_driver, transport))
transport_uri_dest = ("://%s/%s" % (dest_ip, transport_uri_dest))
return ("%s%s" % (transport_uri_driver, transport_uri_dest))
class VM(virt_vm.BaseVM):
"""
This class handles all basic VM operations for libvirt.
"""
def __init__(self, name, params, root_dir, address_cache, state=None):
"""
Initialize the object and set a few attributes.
:param name: The name of the object
:param params: A dict containing VM params
(see method make_create_command for a full description)
:param root_dir: Base directory for relative filenames
:param address_cache: A dict that maps MAC addresses to IP addresses
:param state: If provided, use this as self.__dict__
"""
if state:
self.__dict__ = state
else:
self.process = None
self.serial_ports = []
self.serial_console_log = None
self.serial_console = None
self.redirs = {}
self.vnc_port = None
self.vnc_autoport = True
self.pci_assignable = None
self.netdev_id = []
self.device_id = []
self.pci_devices = []
self.uuid = None
self.remote_sessions = []
self.spice_port = 8000
self.name = name
self.params = params
self.root_dir = root_dir
self.address_cache = address_cache
self.vnclisten = "0.0.0.0"
self.connect_uri = normalize_connect_uri(params.get("connect_uri",
"default"))
self.driver_type = virsh.driver(uri=self.connect_uri)
self.params['driver_type_' + self.name] = self.driver_type
# virtnet init depends on vm_type/driver_type being set w/in params
super(VM, self).__init__(name, params)
logging.info("Libvirt VM '%s', driver '%s', uri '%s'",
self.name, self.driver_type, self.connect_uri)
def is_lxc(self):
"""
Return True if VM is linux container.
"""
return (self.connect_uri and self.connect_uri.count("lxc"))
def is_qemu(self):
"""
Return True if VM is a qemu guest.
"""
return (self.connect_uri and self.connect_uri.count("qemu"))
def is_xen(self):
"""
Return True if VM is a xen guest.
"""
return (self.connect_uri and self.connect_uri.count("xen"))
def is_esx(self):
"""
Return True if VM is a esx guest.
"""
return (self.connect_uri and self.connect_uri.count("esx"))
def verify_alive(self):
"""
Make sure the VM is alive.
:raise VMDeadError: If the VM is dead
"""
if not self.is_alive():
raise virt_vm.VMDeadError("Domain %s is inactive" % self.name,
self.state())
def is_alive(self):
"""
Return True if VM is alive.
"""
return virsh.is_alive(self.name, uri=self.connect_uri)
def is_dead(self):
"""
Return True if VM is dead.
"""
return virsh.is_dead(self.name, uri=self.connect_uri)
def is_paused(self):
"""
Return True if VM is paused.
"""
return (self.state() == "paused")
def is_persistent(self):
"""
Return True if VM is persistent.
"""
try:
dominfo = (virsh.dominfo(self.name,
uri=self.connect_uri).stdout.strip())
return bool(re.search(r"^Persistent:\s+[Yy]es", dominfo,
re.MULTILINE))
except process.CmdError:
return False
def is_autostart(self):
"""
Return True if VM is autostart.
"""
try:
dominfo = (virsh.dominfo(self.name,
uri=self.connect_uri).stdout.strip())
return bool(re.search(r"^Autostart:\s+enable", dominfo,
re.MULTILINE))
except process.CmdError:
return False
def exists(self):
"""
Return True if VM exists.
"""
return virsh.domain_exists(self.name, uri=self.connect_uri)
def undefine(self, options=None):
"""
Undefine the VM.
"""
try:
virsh.undefine(self.name, options=options, uri=self.connect_uri,
ignore_status=False)
except process.CmdError, detail:
logging.error("Undefined VM %s failed:\n%s", self.name, detail)
return False
return True
def define(self, xml_file):
"""
Define the VM.
"""
if not os.path.exists(xml_file):
logging.error("File %s not found." % xml_file)
return False
try:
virsh.define(xml_file, uri=self.connect_uri,
ignore_status=False)
except process.CmdError, detail:
logging.error("Defined VM from %s failed:\n%s", xml_file, detail)
return False
return True
def state(self):
"""
Return domain state.
"""
return virsh.domstate(self.name, uri=self.connect_uri).stdout.strip()
def get_id(self):
"""
Return VM's ID.
"""
return virsh.domid(self.name, uri=self.connect_uri).stdout.strip()
def get_xml(self):
"""
Return VM's xml file.
"""
return virsh.dumpxml(self.name, uri=self.connect_uri).stdout.strip()
def backup_xml(self, active=False):
"""
Backup the guest's xmlfile.
"""
# Since backup_xml() is not a function for testing,
# we have to handle the exception here.
try:
xml_file = tempfile.mktemp(dir=data_dir.get_tmp_dir())
if active:
extra = ""
else:
extra = "--inactive"
virsh.dumpxml(self.name, extra=extra,
to_file=xml_file, uri=self.connect_uri)
return xml_file
except Exception, detail:
if os.path.exists(xml_file):
os.remove(xml_file)
logging.error("Failed to backup xml file:\n%s", detail)
return ""
def clone(self, name=None, params=None, root_dir=None, address_cache=None,
copy_state=False):
"""
Return a clone of the VM object with optionally modified parameters.
The clone is initially not alive and needs to be started using create().
Any parameters not passed to this function are copied from the source
VM.
:param name: Optional new VM name
:param params: Optional new VM creation parameters
:param root_dir: Optional new base directory for relative filenames
:param address_cache: A dict that maps MAC addresses to IP addresses
:param copy_state: If True, copy the original VM's state to the clone.
Mainly useful for make_create_command().
"""
if name is None:
name = self.name
if params is None:
params = self.params.copy()
if root_dir is None:
root_dir = self.root_dir
if address_cache is None:
address_cache = self.address_cache
if copy_state:
state = self.__dict__.copy()
else:
state = None
return VM(name, params, root_dir, address_cache, state)
def make_create_command(self, name=None, params=None, root_dir=None):
"""
Generate a libvirt command line. All parameters are optional. If a
parameter is not supplied, the corresponding value stored in the
class attributes is used.
:param name: The name of the object
:param params: A dict containing VM params
:param root_dir: Base directory for relative filenames
:note: The params dict should contain:
mem -- memory size in MBs
cdrom -- ISO filename to use with the qemu -cdrom parameter
extra_params -- a string to append to the qemu command
shell_port -- port of the remote shell daemon on the guest
(SSH, Telnet or the home-made Remote Shell Server)
shell_client -- client program to use for connecting to the
remote shell daemon on the guest (ssh, telnet or nc)
x11_display -- if specified, the DISPLAY environment variable
will be be set to this value for the qemu process (useful for
SDL rendering)
images -- a list of image object names, separated by spaces
nics -- a list of NIC object names, separated by spaces
For each image in images:
drive_format -- string to pass as 'if' parameter for this
image (e.g. ide, scsi)
image_snapshot -- if yes, pass 'snapshot=on' to qemu for
this image
image_boot -- if yes, pass 'boot=on' to qemu for this image
In addition, all parameters required by get_image_filename.
For each NIC in nics:
nic_model -- string to pass as 'model' parameter for this
NIC (e.g. e1000)
"""
# helper function for command line option wrappers
def has_option(help_text, option):
return bool(re.search(r"--%s" % option, help_text, re.MULTILINE))
def has_os_variant(os_text, os_variant):
return bool(re.search(r"%s" % os_variant, os_text, re.MULTILINE))
def has_sub_option(option, sub_option):
option_help_text = process.system_output("%s --%s help" %
(virt_install_binary, option),
verbose=False)
return bool(re.search(r"%s" % sub_option, option_help_text, re.MULTILINE))
# Wrappers for all supported libvirt command line parameters.
# This is meant to allow support for multiple libvirt versions.
# Each of these functions receives the output of 'libvirt --help' as a
# parameter, and should add the requested command line option
# accordingly.
def add_name(help_text, name):
return " --name '%s'" % name
def add_machine_type(help_text, machine_type):
if has_option(help_text, "machine"):
return " --machine %s" % machine_type
else:
return ""
def add_hvm_or_pv(help_text, hvm_or_pv):
if hvm_or_pv == "hvm":
return " --hvm --accelerate"
elif hvm_or_pv == "pv":
return " --paravirt"
else:
logging.warning("Unknown virt type hvm_or_pv, using default.")
return ""
def add_mem(help_text, mem, maxmem=None):
if has_option(help_text, "memory"):
cmd = " --memory=%s" % mem
if maxmem:
if not has_sub_option('memory', 'maxmemory'):
logging.warning("maxmemory option not supported by "
"virt-install")
else:
cmd += ",maxmemory=%s" % maxmem
return cmd
else:
return " --ram=%s" % mem
def add_check_cpu(help_text):
if has_option(help_text, "check-cpu"):
return " --check-cpu"
else:
return ""
def add_smp(help_text, smp, maxvcpus=None, sockets=None,
cores=None, threads=None):
cmd = " --vcpu=%s" % smp
if maxvcpus:
cmd += ",maxvcpus=%s" % maxvcpus
if sockets:
cmd += ",sockets=%s" % sockets
if cores:
cmd += ",cores=%s" % cores
if threads:
cmd += ",threads=%s" % threads
return cmd
def add_location(help_text, location):
if has_option(help_text, "location"):
return " --location %s" % location
else:
return ""
def add_cdrom(help_text, filename, index=None):
if has_option(help_text, "cdrom"):
return " --cdrom %s" % filename
else:
return ""
def add_pxe(help_text):
if has_option(help_text, "pxe"):
return " --pxe"
else:
return ""
def add_import(help_text):
if has_option(help_text, "import"):
return " --import"
else:
return ""
def add_controller(model=None):
"""
Add controller option for virt-install command line.
:param model: string, controller model.
:return: string, empty or controller option.
"""
if model == 'virtio-scsi':
return " --controller type=scsi,model=virtio-scsi"
else:
return ""
def check_controller(virt_install_cmd_line, controller):
"""
Check for the controller already available in virt-install
command line.
:param virt_install_cmd_line: string, virt-install command line.
:param controller: string, controller model.
:return: True if succeed of False if failed.
"""
found = False
output = re.findall(
r"controller\stype=(\S+),model=(\S+)", virt_install_cmd_line)
for item in output:
if controller in item[1]:
found = True
break
return found
def add_drive(help_text, filename, pool=None, vol=None, device=None,
bus=None, perms=None, size=None, sparse=False,
cache=None, fmt=None):
cmd = " --disk"
if filename:
cmd += " path=%s" % filename
elif pool:
if vol:
cmd += " vol=%s/%s" % (pool, vol)
else:
cmd += " pool=%s" % pool
if device:
cmd += ",device=%s" % device
if bus:
cmd += ",bus=%s" % bus
if perms:
cmd += ",%s" % perms
if size:
cmd += ",size=%s" % size.rstrip("Gg")
if sparse:
cmd += ",sparse=false"
if fmt:
cmd += ",format=%s" % fmt
if cache:
cmd += ",cache=%s" % cache
return cmd
def add_floppy(help_text, filename):
return " --disk path=%s,device=floppy,ro" % filename
def add_vnc(help_text, vnc_port=None):
if vnc_port:
return " --vnc --vncport=%d" % (vnc_port)
else:
return " --vnc"
def add_vnclisten(help_text, vnclisten):
if has_option(help_text, "vnclisten"):
return " --vnclisten=%s" % (vnclisten)
else:
return ""
def add_sdl(help_text):
if has_option(help_text, "sdl"):
return " --sdl"
else:
return ""
def add_nographic(help_text):
return " --nographics"
def add_video(help_text, video_device):
if has_option(help_text, "video"):
return " --video=%s" % (video_device)
else:
return ""
def add_uuid(help_text, uuid):
if has_option(help_text, "uuid"):
return " --uuid %s" % uuid
else:
return ""
def add_os_type(help_text, os_type):
if has_option(help_text, "os-type"):
return " --os-type %s" % os_type
else:
return ""
def add_os_variant(help_text, os_variant):
if has_option(help_text, "os-variant"):
return " --os-variant %s" % os_variant
else:
return ""
def add_pcidevice(help_text, pci_device):
if has_option(help_text, "host-device"):
return " --host-device %s" % pci_device
else:
return ""
def add_soundhw(help_text, sound_device):
if has_option(help_text, "soundhw"):
return " --soundhw %s" % sound_device
else:
return ""
def add_serial(help_text):
if has_option(help_text, "serial"):
return " --serial pty"
else:
return ""
def add_kernel_cmdline(help_text, cmdline):
return " -append %s" % cmdline
def add_connect_uri(help_text, uri):
if uri and has_option(help_text, "connect"):
return " --connect=%s" % uri
else:
return ""
def add_security(help_text, sec_type, sec_label=None, sec_relabel=None):
"""
Return security options for install command.
"""
if has_option(help_text, "security"):
result = " --security"
if sec_type == 'static':
if sec_label is None:
raise ValueError("Seclabel is not setted for static.")
result += " type=static,label=%s" % (sec_label)
elif sec_type == 'dynamic':
result += " type=dynamic"
else:
raise ValueError("Security type %s is not supported."
% sec_type)
if sec_relabel is not None:
result += ",relabel=%s" % sec_relabel
else:
result = ""
return result
def add_nic(help_text, nic_params):
"""
Return additional command line params based on dict-like nic_params
"""
mac = nic_params.get('mac')
nettype = nic_params.get('nettype')
netdst = nic_params.get('netdst')
nic_model = nic_params.get('nic_model')
if nettype:
result = " --network=%s" % nettype
else:
result = ""
if has_option(help_text, "bridge"):
# older libvirt (--network=NATdev --bridge=bridgename
# --mac=mac)
if nettype != 'user':
result += ':%s' % netdst
if mac: # possible to specify --mac w/o --network
result += " --mac=%s" % mac
else:
# newer libvirt (--network=mynet,model=virtio,mac=00:11)
if nettype != 'user':
result += '=%s' % netdst
if nettype and nic_model: # only supported along with nettype
result += ",model=%s" % nic_model
if nettype and mac:
result += ',mac=%s' % mac
elif mac: # possible to specify --mac w/o --network
result += " --mac=%s" % mac
logging.debug("vm.make_create_command.add_nic returning: %s",
result)
return result
def add_memballoon(help_text, memballoon_model):
"""
Adding memballoon device to the vm.
:param help_text: string, virt-install help text.
:param memballon_model: string, memballoon model.
:return: string, empty or memballoon model option.
"""
if has_option(help_text, "memballoon"):
result = " --memballoon model=%s" % memballoon_model
else:
logging.warning("memballoon is not supported")
result = ""
logging.debug("vm.add_memballoon returning: %s", result)
return result
def add_kernel(help_text, cmdline, kernel_path=None, initrd_path=None,
kernel_args=None):
"""
Adding Custom kernel option to boot.
: param help_text: string, virt-install help text
: param cmdline: string, current virt-install cmdline
: param kernel_path: string, custom kernel path.
: param initrd_path: string, custom initrd path.
: param kernel_args: string, custom boot args.
"""
if has_option(help_text, "boot"):
if "--boot" in cmdline:
result = ","
else:
result = " --boot "
if has_sub_option("boot", "kernel") and kernel_path:
result += "kernel=%s," % kernel_path
if has_sub_option("boot", "initrd") and initrd_path:
result += "initrd=%s," % initrd_path
if has_sub_option("boot", "kernel_args") and kernel_args:
result += "kernel_args=%s," % kernel_args
else:
result = ""
logging.warning("boot option is not supported")
return result.strip(',')
# End of command line option wrappers
if name is None:
name = self.name
if params is None:
params = self.params
if root_dir is None:
root_dir = self.root_dir
# Clone this VM using the new params
vm = self.clone(name, params, root_dir, copy_state=True)
virt_install_binary = utils_misc.get_path(
root_dir,
params.get("virt_install_binary",
"virt-install"))
help_text = process.system_output("%s --help" % virt_install_binary,
verbose=False)
try:
os_text = process.system_output("osinfo-query os --fields short-id", verbose=False)
except process.CmdError:
os_text = process.system_output("%s --os-variant list" %
virt_install_binary,
verbose=False)
# Find all supported machine types, so we can rule out an unsupported
# machine type option passed in the configuration.
hvm_or_pv = params.get("hvm_or_pv", "hvm")
# default to 'uname -m' output
arch_name = params.get("vm_arch_name", platform.machine())
capabs = libvirt_xml.CapabilityXML()
try:
support_machine_type = capabs.guest_capabilities[
hvm_or_pv][arch_name]['machine']
except KeyError, detail:
if detail.args[0] == hvm_or_pv:
raise KeyError("No libvirt support for %s virtualization, "
"does system hardware + software support it?"
% hvm_or_pv)
elif detail.args[0] == arch_name:
raise KeyError("No libvirt support for %s virtualization of "
"%s, does system hardware + software support "
"it?" % (hvm_or_pv, arch_name))
raise
logging.debug("Machine types supported for %s/%s: %s",
hvm_or_pv, arch_name, support_machine_type)
# Start constructing the qemu command
virt_install_cmd = ""
# Set the X11 display parameter if requested
if params.get("x11_display"):
virt_install_cmd += "DISPLAY=%s " % params.get("x11_display")
# Add the qemu binary
virt_install_cmd += virt_install_binary
# set connect uri
virt_install_cmd += add_connect_uri(help_text, self.connect_uri)
# hvm or pv specified by libvirt switch (pv used by Xen only)
if hvm_or_pv:
virt_install_cmd += add_hvm_or_pv(help_text, hvm_or_pv)
# Add the VM's name
virt_install_cmd += add_name(help_text, name)
machine_type = params.get("machine_type")
if machine_type:
if machine_type in support_machine_type:
virt_install_cmd += add_machine_type(help_text, machine_type)
else:
raise exceptions.TestSkipError("Unsupported machine type %s." %
(machine_type))
mem = params.get("mem")
maxmemory = params.get("maxmemory", None)
if mem:
virt_install_cmd += add_mem(help_text, mem, maxmemory)
# TODO: should we do the check before we call ? negative case ?
check_cpu = params.get("use_check_cpu")
if check_cpu:
virt_install_cmd += add_check_cpu(help_text)
smp = params.get("smp")
vcpu_max_cpus = params.get("vcpu_maxcpus")
vcpu_sockets = params.get("vcpu_sockets")
vcpu_cores = params.get("vcpu_cores")
vcpu_threads = params.get("vcpu_threads")
if smp:
virt_install_cmd += add_smp(help_text, smp, vcpu_max_cpus,
vcpu_sockets, vcpu_cores, vcpu_threads)
# TODO: directory location for vmlinuz/kernel for cdrom install ?
location = None
if params.get("medium") == 'url':
location = params.get('url')
elif params.get("medium") == 'kernel_initrd':
# directory location of kernel/initrd pair (directory layout must
# be in format libvirt will recognize)
location = params.get("image_dir")
elif params.get("medium") == 'nfs':
location = "nfs:%s:%s" % (params.get("nfs_server"),
params.get("nfs_dir"))
elif params.get("medium") == 'cdrom':
if params.get("use_libvirt_cdrom_switch") == 'yes':
virt_install_cmd += add_cdrom(
help_text, params.get("cdrom_cd1"))
elif params.get("unattended_delivery_method") == "integrated":
cdrom_path = os.path.join(data_dir.get_data_dir(),
params.get("cdrom_unattended"))
virt_install_cmd += add_cdrom(help_text, cdrom_path)
else:
location = data_dir.get_data_dir()
kernel_dir = os.path.dirname(params.get("kernel"))
kernel_parent_dir = os.path.dirname(kernel_dir)
pxeboot_link = os.path.join(kernel_parent_dir, "pxeboot")
if os.path.islink(pxeboot_link):
os.unlink(pxeboot_link)
if os.path.isdir(pxeboot_link):
logging.info("Removed old %s leftover directory",
pxeboot_link)
shutil.rmtree(pxeboot_link)
os.symlink(kernel_dir, pxeboot_link)
elif params.get("medium") == "import":
virt_install_cmd += add_import(help_text)
if location:
virt_install_cmd += add_location(help_text, location)
# Disable display when vga is disabled (used mainly by machines.cfg)
if params.get("vga") == "none":
virt_install_cmd += add_nographic(help_text)
elif params.get("display") == "vnc":
if params.get("vnc_autoport") == "yes":
vm.vnc_autoport = True
else:
vm.vnc_autoport = False
if not vm.vnc_autoport and params.get("vnc_port"):
vm.vnc_port = int(params.get("vnc_port"))
virt_install_cmd += add_vnc(help_text, vm.vnc_port)
if params.get("vnclisten"):
vm.vnclisten = params.get("vnclisten")
virt_install_cmd += add_vnclisten(help_text, vm.vnclisten)
elif params.get("display") == "sdl":
virt_install_cmd += add_sdl(help_text)
elif params.get("display") == "nographic":
virt_install_cmd += add_nographic(help_text)
video_device = params.get("video_device")
if video_device:
virt_install_cmd += add_video(help_text, video_device)
sound_device = params.get("sound_device")
if sound_device:
virt_install_cmd += add_soundhw(help_text, sound_device)
# if none is given a random UUID will be generated by libvirt
if params.get("uuid"):
virt_install_cmd += add_uuid(help_text, params.get("uuid"))
# selectable OS type
if params.get("use_os_type") == "yes":
virt_install_cmd += add_os_type(help_text, params.get("os_type"))
# selectable OS variant
if params.get("use_os_variant") == "yes":
if not has_os_variant(os_text, params.get("os_variant")):
raise exceptions.TestSkipError("Unsupported OS variant: %s.\n"
"Supported variants: %s" %
(params.get('os_variant'),
os_text))
virt_install_cmd += add_os_variant(
help_text, params.get("os_variant"))
# Add serial console
virt_install_cmd += add_serial(help_text)
# Add memballoon device
memballoon_model = params.get("memballoon_model")
if memballoon_model:
virt_install_cmd += add_memballoon(help_text, memballoon_model)
# If the PCI assignment step went OK, add each one of the PCI assigned
# devices to the command line.
if self.pci_devices:
for pci_id in self.pci_devices:
virt_install_cmd += add_pcidevice(help_text, pci_id)
for image_name in params.objects("images"):
image_params = params.object_params(image_name)
base_dir = image_params.get("images_base_dir",
data_dir.get_data_dir())
filename = storage.get_image_filename(image_params,
base_dir)
if image_params.get("use_storage_pool") == "yes":
filename = None
virt_install_cmd += add_drive(help_text,
filename,
image_params.get("image_pool"),
image_params.get("image_vol"),
image_params.get("image_device"),
image_params.get("image_bus"),
image_params.get("image_perms"),
image_params.get("image_size"),
image_params.get("drive_sparse"),
image_params.get("drive_cache"),
image_params.get("image_format"))
if image_params.get("boot_drive") == "no":
continue
if filename:
libvirt_controller = image_params.get(
"libvirt_controller", None)
_drive_format = image_params.get("drive_format")
if libvirt_controller:
if not check_controller(virt_install_cmd, libvirt_controller):
virt_install_cmd += add_controller(libvirt_controller)
# this will reset the scsi-hd to scsi as we are adding controller
# to mention the drive format
if 'scsi' in _drive_format:
_drive_format = "scsi"
virt_install_cmd += add_drive(help_text,
filename,
None,
None,
None,
_drive_format,
None,
image_params.get("image_size"),
image_params.get("drive_sparse"),
image_params.get("drive_cache"),
image_params.get("image_format"))
unattended_integrated = (params.get('unattended_delivery_method') !=
'integrated')
xen_pv = self.driver_type == 'xen' and params.get('hvm_or_pv') == 'pv'
if unattended_integrated and not xen_pv:
for cdrom in params.objects("cdroms"):
cdrom_params = params.object_params(cdrom)
iso = cdrom_params.get("cdrom")
if params.get("use_libvirt_cdrom_switch") == 'yes':
# we don't want to skip the winutils iso
if not cdrom == 'winutils':
logging.debug(
"Using --cdrom instead of --disk for install")
logging.debug("Skipping CDROM:%s:%s", cdrom, iso)
continue
if params.get("medium") == 'cdrom_no_kernel_initrd':
if iso == params.get("cdrom_cd1"):
logging.debug("Using cdrom or url for install")
logging.debug("Skipping CDROM: %s", iso)
continue
if iso:
iso_path = utils_misc.get_path(root_dir, iso)
iso_image_pool = image_params.get("iso_image_pool")
iso_image_vol = image_params.get("iso_image_vol")
virt_install_cmd += add_drive(help_text,
iso_path,
iso_image_pool,
virt_install_cmd,
'cdrom',
None,
None,
None,
None,
None,
None)
# We may want to add {floppy_otps} parameter for -fda
# {fat:floppy:}/path/. However vvfat is not usually recommended.
# Only support to add the main floppy if you want to add the second
# one please modify this part.
floppy = params.get("floppy_name")
if floppy:
floppy = utils_misc.get_path(data_dir.get_data_dir(), floppy)
virt_install_cmd += add_drive(help_text, floppy,
None,
None,
'floppy',
None,
None,
None,
None,
None,
None)
# setup networking parameters
for nic in vm.virtnet:
# make_create_command can be called w/o vm.create()
nic = vm.add_nic(**dict(nic))
logging.debug("make_create_command() setting up command for"
" nic: %s" % str(nic))
virt_install_cmd += add_nic(help_text, nic)
if params.get("use_no_reboot") == "yes":
virt_install_cmd += " --noreboot"
if params.get("use_autostart") == "yes":
virt_install_cmd += " --autostart"
if params.get("virt_install_debug") == "yes":
virt_install_cmd += " --debug"
emulator_path = params.get("emulator_path", None)
if emulator_path:
if not has_sub_option('boot', 'emulator'):
logging.warning("emulator option not supported by virt-install")
else:
virt_install_cmd += " --boot emulator=%s" % emulator_path
kernel = params.get("kernel", None)
initrd = params.get("initrd", None)
kernel_args = params.get("kernel_args", None)
if (kernel or initrd) and kernel_args:
virt_install_cmd += add_kernel(help_text, virt_install_cmd, kernel,
initrd, kernel_args)
# bz still open, not fully functional yet
if params.get("use_virt_install_wait") == "yes":
virt_install_cmd += (" --wait %s" %
params.get("virt_install_wait_time"))
kernel_params = params.get("kernel_params")
if kernel_params:
virt_install_cmd += " --extra-args '%s'" % kernel_params
virt_install_cmd += " --noautoconsole"
sec_type = params.get("sec_type", None)
if sec_type:
sec_label = params.get("sec_label", None)
sec_relabel = params.get("sec_relabel", None)
virt_install_cmd += add_security(help_text, sec_type=sec_type,
sec_label=sec_label,
sec_relabel=sec_relabel)
return virt_install_cmd
def get_serial_console_filename(self, name):
"""
Return the serial console filename.
:param name: The serial port name.
"""
return "serial-%s-%s-%s.log" % (name, self.name,
utils_misc.generate_random_string(4))
def get_serial_console_filenames(self):
"""
Return a list of all serial console filenames
(as specified in the VM's params).
"""
return [self.get_serial_console_filename(_) for _ in
self.params.objects("serials")]
def create_serial_console(self):
"""
Establish a session with the serial console.
The libvirt version uses virsh console to manage it.
"""
if not self.serial_ports:
for serial in self.params.objects("serials"):
self.serial_ports.append(serial)
if self.serial_console is None:
try:
cmd = 'virsh'
if self.connect_uri:
cmd += ' -c %s' % self.connect_uri
cmd += (" console %s %s" % (self.name, self.serial_ports[0]))
except IndexError:
raise virt_vm.VMConfigMissingError(self.name, "serial")
output_func = utils_misc.log_line # Because qemu-kvm uses this
# Because qemu-kvm hard-codes this
output_filename = self.get_serial_console_filename(self.serial_ports[0])
output_params = (output_filename,)
prompt = self.params.get("shell_prompt", "[\#\$]")
self.serial_console = aexpect.ShellSession(command=cmd, auto_close=False,
output_func=output_func,
output_params=output_params,
prompt=prompt)
# Cause serial_console.close() to close open log file
self.serial_console.set_log_file(output_filename)
self.serial_console_log = os.path.join(utils_misc.get_log_file_dir(),
output_filename)
def set_root_serial_console(self, device, remove=False):
"""
Allow or ban root to login through serial console.
:param device: device to set root login
:param allow_root: do remove operation
"""
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
else:
try:
securetty_output = session.cmd_output("cat /etc/securetty")
devices = str(securetty_output).strip().splitlines()
if device not in devices:
if not remove:
session.sendline("echo %s >> /etc/securetty" % device)
else:
if remove:
session.sendline("sed -i -e /%s/d /etc/securetty"
% device)
logging.debug("Set root login for %s successfully.", device)
return True
finally:
session.close()
logging.debug("Set root login for %s failed.", device)
return False
def set_kernel_console(self, device, speed=None, remove=False):
"""
Set kernel parameter for given console device.
:param device: a console device
:param speed: speed of serial console
:param remove: do remove operation
"""
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
else:
try:
grub = "/boot/grub/grub.conf"
if not session.cmd_status("ls /boot/grub2/grub.cfg"):
grub = "/boot/grub2/grub.cfg"
kernel_params = "console=%s" % device
if speed is not None:
kernel_params += ",%s" % speed
output = session.cmd_output("cat %s" % grub)
if not re.search("console=%s" % device, output):
if not remove:
session.sendline("sed -i -e \'s/vmlinuz-.*/& %s/g\'"
" %s; sync" % (kernel_params, grub))
else:
if remove:
session.sendline("sed -i -e \'s/console=%s\w*\s//g\'"
" %s; sync" % (device, grub))
logging.debug("Set kernel params for %s successfully.", device)
return True
finally:
session.close()
logging.debug("Set kernel params for %s failed.", device)
return False
def set_kernel_param(self, parameter, value=None, remove=False):
"""
Set a specific kernel parameter.
:param option: A kernel parameter to set.
:param value: The value of the parameter to be set.
:param remove: Remove the parameter if True.
:return: True if succeed of False if failed.
"""
if self.is_dead():
logging.error("Can't set kernel param on a dead VM.")
return False
session = self.wait_for_login()
try:
grub_path = utils_misc.get_bootloader_cfg(session)
if not grub_path:
return False
grub_text = session.cmd_output("cat %s" % grub_path)
kernel_lines = [l.strip() for l in grub_text.splitlines()
if re.match(r"\s*(linux|kernel).*", l)]
if not kernel_lines:
logging.error("Can't find any kernel lines in grub "
"file %s:\n%s" % (grub_path, grub_text))
return False
for line in kernel_lines:
line = line.replace('\t', r'\t')
if remove:
new_string = ""
else:
if value is None:
new_string = parameter
else:
new_string = "%s=%s" % (parameter, value)
patts = [
"\s+(%s=\S*)(\s|$)" % parameter,
"\s+(%s)(\s|$)" % parameter,
]
old_string = ""
for patt in patts:
res = re.search(patt, line)
if res:
old_string = res.group(1)
break
if old_string:
new_line = line.replace(old_string, new_string)
else:
new_line = " ".join((line, new_string))
line_patt = "\s*".join(line.split())
logging.debug("Substituting grub line '%s' to '%s'." %
(line, new_line))
stat_sed, output = session.cmd_status_output(
"sed -i --follow-symlinks -e \"s@%s@%s@g\" %s" %
(line_patt, new_line, grub_path))
if stat_sed:
logging.error("Failed to substitute grub file:\n%s" %
output)
return False
if remove:
logging.debug("Remove kernel params %s successfully.",
parameter)
else:
logging.debug("Set kernel params %s to %s successfully.",
parameter, value)
return True
finally:
session.close()
def set_boot_kernel(self, index, debug_kernel=False):
"""
Set default kernel to the second one or to debug kernel
:param index: index of kernel to set to default
:param debug_kernel: True if set debug kernel to default
:return: default kernel
"""
if self.is_dead():
logging.error("Can't set kernel param on a dead VM.")
return False
session = self.wait_for_login()
try:
grub_path = utils_misc.get_bootloader_cfg(session)
if not grub_path:
return
if "grub2" in grub_path:
grub = 2
output = session.cmd("cat %s |grep menuentry" % grub_path)
kernel_list = re.findall("menuentry '.*?'", output)
else:
grub = 1
output = session.cmd("cat %s |grep initramfs" % grub_path)
kernel_list = re.findall("-.*", output)
if index >= len(kernel_list):
logging.error("Index out of kernel list")
return
logging.debug("kernel list of vm:")
logging.debug(kernel_list)
if debug_kernel:
index = -1
logging.info("Setting debug kernel as default")
for i in range(len(kernel_list)):
if "debug" in kernel_list[i]:
index = i
break
if index == -1:
logging.error("No debug kernel in grub file!")
return
if grub == 1:
cmd_set_grub = "sed -i 's/default=./default=%d/' " % index
cmd_set_grub += grub_path
boot_kernel = kernel_list[index].strip("-")
else:
boot_kernel = kernel_list[index].split("'")[1].strip("'")
cmd_set_grub = 'grub2-set-default %d' % index
session.cmd(cmd_set_grub)
return boot_kernel
finally:
session.close()
def has_swap(self):
"""
Check if there is any active swap partition/file.
:return : True if swap is on or False otherwise.
"""
if self.is_dead():
logging.error("Can't check swap on a dead VM.")
return False
session = self.wait_for_login()
try:
cmd = "swapon -s"
output = session.cmd_output(cmd)
if output.strip():
return True
return False
finally:
session.close()
def create_swap_partition(self, swap_path=None):
"""
Make a swap partition and active it.
A cleanup_swap() should be call after use to clean up
the environment changed.
:param swap_path: Swap image path.
"""
if self.is_dead():
logging.error("Can't create swap on a dead VM.")
return False
if not swap_path:
swap_path = os.path.join(data_dir.get_tmp_dir(), "swap_image")
swap_size = self.get_used_mem()
process.run("qemu-img create %s %s" % (swap_path, swap_size * 1024))
self.created_swap_path = swap_path
device = self.attach_disk(swap_path, extra="--persistent")
session = self.wait_for_login()
try:
dev_path = "/dev/" + device
session.cmd_status("mkswap %s" % dev_path)
session.cmd_status("swapon %s" % dev_path)
self.set_kernel_param("resume", dev_path)
return True
finally:
session.close()
logging.error("Failed to create a swap partition.")
return False
def create_swap_file(self, swapfile='/swapfile'):
"""
Make a swap file and active it through a session.
A cleanup_swap() should be call after use to clean up
the environment changed.
:param swapfile: Swap file path in VM to be created.
"""
if self.is_dead():
logging.error("Can't create swap on a dead VM.")
return False
session = self.wait_for_login()
try:
# Get memory size.
swap_size = self.get_used_mem() / 1024
# Create, change permission, and make a swap file.
cmd = ("dd if=/dev/zero of={1} bs=1M count={0} && "
"chmod 600 {1} && "
"mkswap {1}".format(swap_size, swapfile))
stat_create, output = session.cmd_status_output(cmd)
if stat_create:
logging.error("Fail to create swap file in guest."
"\n%s" % output)
return False
self.created_swap_file = swapfile
# Get physical swap file offset for kernel param resume_offset.
cmd = "filefrag -v %s" % swapfile
output = session.cmd_output(cmd)
# For compatibility of different version of filefrag
# Sample output of 'filefrag -v /swapfile'
# On newer version:
# Filesystem type is: 58465342
# File size of /swapfile is 1048576000 (256000 blocks of 4096 bytes)
# ext: logical_offset: physical_offset: length: expected: flags:
# 0: 0.. 65519: 395320.. 460839: 65520:
# ...
# On older version:
# Filesystem type is: ef53
# File size of /swapfile is 1048576000 (256000 blocks, blocksize 4096)
# ext logical physical expected length flags
# 0 0 2465792 32768
# ...
offset_line = output.splitlines()[3]
if '..' in offset_line:
offset = offset_line.split()[3].rstrip('..')
else:
offset = offset_line.split()[2]
# Get physical swap file device for kernel param resume.
cmd = "df %s" % swapfile
output = session.cmd_output(cmd)
# Sample output of 'df /swapfile':
# Filesystem 1K-blocks Used Available Use% Mounted on
# /dev/vdb 52403200 15513848 36889352 30% /
device = output.splitlines()[1].split()[0]
# Set kernel parameters.
self.set_kernel_param("resume", device)
self.set_kernel_param("resume_offset", offset)
finally:
session.close()
self.reboot()
session = self.wait_for_login()
try:
# Activate a swap file.
cmd = "swapon %s" % swapfile
stat_swapon, output = session.cmd_status_output(cmd)
if stat_create:
logging.error("Fail to activate swap file in guest."
"\n%s" % output)
return False
finally:
session.close()
if self.has_swap():
logging.debug("Successfully created swapfile %s." % swapfile)
return True
else:
logging.error("Failed to create swap file.")
return False
def cleanup_swap(self):
"""
Cleanup environment changed by create_swap_partition() or
create_swap_file().
"""
if self.is_dead():
logging.error("Can't cleanup swap on a dead VM.")
return False
# Remove kernel parameters.
self.set_kernel_param("resume", remove=True)
self.set_kernel_param("resume_offset", remove=True)
# Deactivate swap partition/file.
session = self.wait_for_login()
try:
session.cmd_status("swapoff -a")
if "created_swap_file" in dir(self):
session.cmd_status("rm -f %s" % self.created_swap_file)
del self.created_swap_file
finally:
session.close()
# Cold unplug attached swap disk
if self.shutdown():
if "created_swap_device" in dir(self):
self.detach_disk(
self.created_swap_device, extra="--persistent")
del self.created_swap_device
if "created_swap_path" in dir(self):
os.remove(self.created_swap_path)
del self.created_swap_path
def set_console_getty(self, device, getty="mgetty", remove=False):
"""
Set getty for given console device.
:param device: a console device
:param getty: getty type: agetty, mgetty and so on.
:param remove: do remove operation
"""
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
else:
try:
# Only configurate RHEL5 and below
regex = "gettys are handled by"
# As of RHEL7 systemd message is displayed
regex += "|inittab is no longer used when using systemd"
output = session.cmd_output("cat /etc/inittab")
if re.search(regex, output):
logging.debug("Skip setting inittab for %s", device)
return True
getty_str = "co:2345:respawn:/sbin/%s %s" % (getty, device)
matched_str = "respawn:/sbin/*getty %s" % device
if not re.search(matched_str, output):
if not remove:
session.sendline("echo %s >> /etc/inittab" % getty_str)
else:
if remove:
session.sendline("sed -i -e /%s/d "
"/etc/inittab" % matched_str)
logging.debug("Set inittab for %s successfully.", device)
return True
finally:
session.close()
logging.debug("Set inittab for %s failed.", device)
return False
def cleanup_serial_console(self):
"""
Close serial console and associated log file
"""
if self.serial_console is not None:
if self.is_lxc():
self.serial_console.sendline("^]")
self.serial_console.close()
self.serial_console = None
self.serial_console_log = None
if hasattr(self, "migration_file"):
try:
os.unlink(self.migration_file)
except OSError:
pass
def wait_for_login(self, nic_index=0, timeout=None,
internal_timeout=None,
serial=False, restart_network=False,
username=None, password=None):
"""
Override the wait_for_login method of virt_vm to support other
guest in libvirt.
If connect_uri is lxc related, we call wait_for_serial_login()
directly, without attempting login it via network.
Other connect_uri, call virt_vm.wait_for_login().
"""
# Set the default value of parameters if user did not use it.
if not timeout:
timeout = super(VM, self).LOGIN_WAIT_TIMEOUT
if not internal_timeout:
internal_timeout = super(VM, self).LOGIN_TIMEOUT
if self.is_lxc():
self.cleanup_serial_console()
self.create_serial_console()
return self.wait_for_serial_login(timeout, internal_timeout,
restart_network,
username, password)
return super(VM, self).wait_for_login(nic_index, timeout,
internal_timeout,
serial, restart_network,
username, password)
@error_context.context_aware
def create(self, name=None, params=None, root_dir=None, timeout=5.0,
migration_mode=None, mac_source=None, autoconsole=True):
"""
Start the VM by running a qemu command.
All parameters are optional. If name, params or root_dir are not
supplied, the respective values stored as class attributes are used.
:param name: The name of the object
:param params: A dict containing VM params
:param root_dir: Base directory for relative filenames
:param migration_mode: If supplied, start VM for incoming migration
using this protocol (either 'tcp', 'unix' or 'exec')
:param migration_exec_cmd: Command to embed in '-incoming "exec: ..."'
(e.g. 'gzip -c -d filename') if migration_mode is 'exec'
:param mac_source: A VM object from which to copy MAC addresses. If not
specified, new addresses will be generated.
:raise VMCreateError: If qemu terminates unexpectedly
:raise VMKVMInitError: If KVM initialization fails
:raise VMHugePageError: If hugepage initialization fails
:raise VMImageMissingError: If a CD image is missing
:raise VMHashMismatchError: If a CD image hash has doesn't match the
expected hash
:raise VMBadPATypeError: If an unsupported PCI assignment type is
requested
:raise VMPAError: If no PCI assignable devices could be assigned
"""
error_context.context("creating '%s'" % self.name)
self.destroy(free_mac_addresses=False)
if name is not None:
self.name = name
if params is not None:
self.params = params
if root_dir is not None:
self.root_dir = root_dir
name = self.name
params = self.params
root_dir = self.root_dir
# Verify the md5sum of the ISO images
for cdrom in params.objects("cdroms"):
if params.get("medium") == "import":
break
cdrom_params = params.object_params(cdrom)
iso = cdrom_params.get("cdrom")
xen_pv = (self.driver_type == 'xen' and
params.get('hvm_or_pv') == 'pv')
iso_is_ks = os.path.basename(iso) == 'ks.iso'
if xen_pv and iso_is_ks:
continue
if iso:
iso = utils_misc.get_path(data_dir.get_data_dir(), iso)
if not os.path.exists(iso):
raise virt_vm.VMImageMissingError(iso)
compare = False
if cdrom_params.get("skip_hash", "no") == "yes":
logging.debug("Skipping hash comparison")
elif cdrom_params.get("md5sum_1m"):
logging.debug("Comparing expected MD5 sum with MD5 sum of "
"first MB of ISO file...")
actual_hash = crypto.hash_file(
iso, 1048576, algorithm="md5")
expected_hash = cdrom_params.get("md5sum_1m")
compare = True
elif cdrom_params.get("md5sum"):
logging.debug("Comparing expected MD5 sum with MD5 sum of "
"ISO file...")
actual_hash = crypto.hash_file(iso, algorithm="md5")
expected_hash = cdrom_params.get("md5sum")
compare = True
elif cdrom_params.get("sha1sum"):
logging.debug("Comparing expected SHA1 sum with SHA1 sum "
"of ISO file...")
actual_hash = crypto.hash_file(iso, algorithm="sha1")
expected_hash = cdrom_params.get("sha1sum")
compare = True
if compare:
if actual_hash == expected_hash:
logging.debug("Hashes match")
else:
raise virt_vm.VMHashMismatchError(actual_hash,
expected_hash)
# Make sure the following code is not executed by more than one thread
# at the same time
lockfilename = os.path.join(data_dir.get_tmp_dir(),
"libvirt-autotest-vm-create.lock")
lockfile = open(lockfilename, "w+")
fcntl.lockf(lockfile, fcntl.LOCK_EX)
try:
# Handle port redirections
redir_names = params.objects("redirs")
host_ports = utils_misc.find_free_ports(
5000, 6000, len(redir_names))
self.redirs = {}
for i in range(len(redir_names)):
redir_params = params.object_params(redir_names[i])
guest_port = int(redir_params.get("guest_port"))
self.redirs[guest_port] = host_ports[i]
# Find available PCI devices
self.pci_devices = []
for device in params.objects("pci_devices"):
self.pci_devices.append(device)
# Find available VNC port, if needed
if params.get("display") == "vnc":
if params.get("vnc_autoport") == "yes":
self.vnc_port = None
self.vnc_autoport = True
else:
self.vnc_port = utils_misc.find_free_port(5900, 6100)
self.vnc_autoport = False
# Find available spice port, if needed
if params.get("spice"):
self.spice_port = utils_misc.find_free_port(8000, 8100)
# Find random UUID if specified 'uuid = random' in config file
if params.get("uuid") == "random":
f = open("/proc/sys/kernel/random/uuid")
self.uuid = f.read().strip()
f.close()
# Generate or copy MAC addresses for all NICs
for nic in self.virtnet:
nic_params = dict(nic)
if mac_source is not None:
# Will raise exception if source doesn't
# have corresponding nic
logging.debug("Copying mac for nic %s from VM %s",
nic.nic_name, mac_source.name)
nic_params['mac'] = mac_source.get_mac_address(
nic.nic_name)
# make_create_command() calls vm.add_nic (i.e. on a copy)
nic = self.add_nic(**nic_params)
logging.debug('VM.create activating nic %s' % nic)
self.activate_nic(nic.nic_name)
# Make qemu command
install_command = self.make_create_command()
logging.info("Running libvirt command (reformatted):")
for item in install_command.replace(" -", " \n -").splitlines():
logging.info("%s", item)
try:
process.run(install_command, verbose=True, shell=True)
except process.CmdError, details:
stderr = details.result.stderr.strip()
# This is a common newcomer mistake, be more helpful...
if stderr.count('IDE CDROM must use'):
testname = params.get('name', "")
if testname.count('unattended_install.cdrom'):
if not testname.count('http_ks'):
e_msg = ("Install command "
"failed:\n%s \n\nNote: "
"Older versions of "
"libvirt won't work "
"properly with kickstart "
"on cdrom install. "
"Try using the "
"unattended_install.cdrom.http_ks method "
"instead." % details.result)
raise exceptions.TestSkipError(e_msg)
if stderr.count('failed to launch bridge helper'):
if utils_selinux.is_enforcing():
raise exceptions.TestSkipError("SELinux is enabled "
"and preventing the "
"bridge helper from "
"accessing the bridge. "
"Consider running as "
"root or placing "
"SELinux into "
"permissive mode.")
# some other problem happened, raise normally
raise
# Wait for the domain to be created
utils_misc.wait_for(func=self.is_alive, timeout=60,
text=("waiting for domain %s to start" %
self.name))
self.uuid = virsh.domuuid(self.name,
uri=self.connect_uri).stdout.strip()
# Create isa serial ports.
self.create_serial_console()
finally:
fcntl.lockf(lockfile, fcntl.LOCK_UN)
lockfile.close()
def migrate(self, dest_uri="", option="--live --timeout 60", extra="",
ignore_status=False, debug=False, virsh_opt=""):
"""
Migrate a VM to a remote host.
:param dest_uri: Destination libvirt URI
:param option: Migration options before <domain> <desturi>
:param extra: Migration options after <domain> <desturi>
:return: True if command succeeded
"""
logging.info("Migrating VM %s from %s to %s" %
(self.name, self.connect_uri, dest_uri))
result = virsh.migrate(self.name, dest_uri, option,
extra, uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug, virsh_opt=virsh_opt)
# Close down serial_console logging process
self.cleanup_serial_console()
# On successful migration, point to guests new hypervisor.
# Since dest_uri could be None, checking it is necessary.
if result.exit_status == 0 and dest_uri:
self.connect_uri = dest_uri
self.create_serial_console()
return result
def attach_disk(self, source, target=None, prefix="vd", extra="",
ignore_status=False, debug=False):
"""
Attach a disk to VM and return the target device name.
:param source: source of disk device
:param target: target of disk device, None for automatic assignment.
:param prefix: disk device prefix.
:param extra: additional arguments to command
:return: target device name if succeed, Otherwise None
"""
# Find the next available target device name.
if target is None:
disks = self.get_disk_devices()
for ch in string.ascii_lowercase:
target = prefix + ch
if target not in disks:
break
result = virsh.attach_disk(self.name, source, target, extra,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
if result.exit_status:
logging.error("Failed to attach disk %s to VM."
"Detail: %s." % (source, result.stderr))
return None
return target
def detach_disk(self, target, extra="",
ignore_status=False, debug=False):
"""
Detach a disk from VM.
:param target: target of disk device need to be detached.
:param extra: additional arguments to command
"""
return virsh.detach_disk(self.name, target, extra,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
def attach_interface(self, option="", ignore_status=False,
debug=False):
"""
Attach a NIC to VM.
"""
return virsh.attach_interface(self.name, option,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
def detach_interface(self, option="", ignore_status=False,
debug=False):
"""
Detach a NIC from VM.
"""
return virsh.detach_interface(self.name, option,
uri=self.connect_uri,
ignore_status=ignore_status,
debug=debug)
def destroy(self, gracefully=True, free_mac_addresses=True):
"""
Destroy the VM.
If gracefully is True, first attempt to shutdown the VM with a shell
command. If that fails, send SIGKILL to the qemu process.
:param gracefully: If True, an attempt will be made to end the VM
using a shell command before trying to end the qemu process
with a 'quit' or a kill signal.
:param free_mac_addresses: If vm is undefined with libvirt, also
release/reset associated mac address
"""
try:
# Is it already dead?
if self.is_alive():
logging.debug("Destroying VM")
if self.is_paused():
self.resume()
if (not self.is_lxc() and gracefully and
self.params.get("shutdown_command")):
# Try to destroy with shell command
logging.debug("Trying to shutdown VM with shell command")
try:
session = self.login()
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
else:
try:
# Send the shutdown command
session.sendline(
self.params.get("shutdown_command"))
logging.debug("Shutdown command sent; waiting for VM "
"to go down...")
if utils_misc.wait_for(self.is_dead, 60, 1, 1):
logging.debug("VM is down")
return
finally:
session.close()
# Destroy VM directly, as 'ignore_status=True' by default, so destroy
# a shutoff domain is also acceptable here.
destroy_opt = ''
if gracefully:
destroy_opt = '--graceful'
virsh.destroy(self.name, destroy_opt, uri=self.connect_uri)
finally:
self.cleanup_serial_console()
if free_mac_addresses:
if self.is_persistent():
logging.warning("Requested MAC address release from "
"persistent vm %s. Ignoring." % self.name)
else:
logging.debug("Releasing MAC addresses for vm %s." % self.name)
for nic_name in self.virtnet.nic_name_list():
self.virtnet.free_mac_address(nic_name)
def remove(self):
self.destroy(gracefully=True, free_mac_addresses=False)
# If the current machine contains nvram, we have to set --nvram
if self.params.get("vir_domain_undefine_nvram") == "yes":
options = "--nvram"
else:
options = None
if not self.undefine(options):
raise virt_vm.VMRemoveError("VM '%s' undefine error" % self.name)
self.destroy(gracefully=False, free_mac_addresses=True)
logging.debug("VM '%s' was removed", self.name)
def remove_with_storage(self):
"""
Virsh undefine provides an option named --remove-all-storage, but it
only removes the storage which is managed by libvirt.
This method undefines vm and removes the all storages related with this
vm, no matter storages are managed by libvirt or not.
"""
blklist = self.get_disk_devices().values()
self.remove()
for blk in blklist:
path = blk['source']
if os.path.exists(path):
os.remove(path)
def get_uuid(self):
"""
Return VM's UUID.
"""
uuid = virsh.domuuid(self.name, uri=self.connect_uri).stdout.strip()
# only overwrite it if it's not set
if self.uuid is None:
self.uuid = uuid
return self.uuid
def get_ifname(self, nic_index=0):
raise NotImplementedError
def get_virsh_mac_address(self, nic_index=0):
"""
Get the MAC of this VM domain.
:param nic_index: Index of the NIC
:raise VMMACAddressMissingError: If no MAC address is defined for the
requested NIC
"""
cmd_result = virsh.dumpxml(self.name, uri=self.connect_uri)
if cmd_result.exit_status:
raise exceptions.TestFail("dumpxml %s failed.\n"
"Detail: %s.\n" % (self.name, cmd_result))
thexml = cmd_result.stdout.strip()
xtf = xml_utils.XMLTreeFile(thexml)
interfaces = xtf.find('devices').findall('interface')
# Range check
try:
mac = interfaces[nic_index].find('mac').get('address')
if mac is not None:
return mac
except IndexError:
pass # Allow other exceptions through
# IndexError (range check) or mac is None
raise virt_vm.VMMACAddressMissingError(nic_index)
def get_pid(self):
"""
Return the VM's PID.
:return: int with PID. If VM is not alive, returns None.
"""
if self.is_lxc():
pid_file = "/var/run/libvirt/lxc/%s.pid" % self.name
elif self.is_qemu():
pid_file = "/var/run/libvirt/qemu/%s.pid" % self.name
elif self.is_esx():
pid_file = "/var/run/libvirt/esx/%s.pid" % self.name
# TODO: Add more vm driver type
else:
raise ValueError("Unsupport connect uri: %s." % self.connect_uri)
pid = None
if os.path.exists(pid_file):
try:
pid_file_contents = open(pid_file).read()
pid = int(pid_file_contents)
except IOError:
logging.error("Could not read %s to get PID", pid_file)
except TypeError:
logging.error("PID file %s has invalid contents: '%s'",
pid_file, pid_file_contents)
else:
logging.debug("PID file %s not present", pid_file)
return pid
def get_vcpus_pid(self):
"""
Return the vcpu's pid for a given VM.
:return: list of PID of vcpus of a VM.
"""
output = virsh.qemu_monitor_command(self.name, "info cpus", "--hmp",
uri=self.connect_uri)
vcpu_pids = re.findall(r'thread_id=(\d+)', output.stdout)
return vcpu_pids
def get_shell_pid(self):
"""
Return the PID of the parent shell process.
:note: This works under the assumption that ``self.process.get_pid()``
returns the PID of the parent shell process.
"""
return self.process.get_pid()
def get_shared_meminfo(self):
"""
Returns the VM's shared memory information.
:return: Shared memory used by VM (MB)
"""
if self.is_dead():
logging.error("Could not get shared memory info from dead VM.")
return None
filename = "/proc/%d/statm" % self.get_pid()
shm = int(open(filename).read().split()[2])
# statm stores informations in pages, translate it to MB
return shm * 4.0 / 1024
def get_cpu_topology_in_cmdline(self):
"""
Return the VM's cpu topology in VM cmdline.
:return: A dirt of cpu topology
"""
cpu_topology = {}
vm_pid = self.get_pid()
if vm_pid is None:
logging.error("Fail to get VM pid")
else:
cmdline = open("/proc/%d/cmdline" % vm_pid).read()
values = re.findall("sockets=(\d+),cores=(\d+),threads=(\d+)",
cmdline)[0]
cpu_topology = dict(zip(["sockets", "cores", "threads"], values))
return cpu_topology
def get_cpu_topology_in_vm(self):
cpu_topology = {}
cpu_info = utils_misc.get_cpu_info(self.wait_for_login())
if cpu_info:
cpu_topology['sockets'] = cpu_info['Socket(s)']
cpu_topology['cores'] = cpu_info['Core(s) per socket']
cpu_topology['threads'] = cpu_info['Thread(s) per core']
return cpu_topology
def activate_nic(self, nic_index_or_name):
# TODO: Implement nic hotplugging
pass # Just a stub for now
def deactivate_nic(self, nic_index_or_name):
# TODO: Implement nic hot un-plugging
pass # Just a stub for now
@error_context.context_aware
def reboot(self, session=None, method="shell", nic_index=0, timeout=240,
serial=False):
"""
Reboot the VM and wait for it to come back up by trying to log in until
timeout expires.
:param session: A shell session object or None.
:param method: Reboot method. Can be "shell" (send a shell reboot
command).
:param nic_index: Index of NIC to access in the VM, when logging in
after rebooting.
:param timeout: Time to wait for login to succeed (after rebooting).
:param serial: Just use to unify api in virt_vm module.
:return: A new shell session object.
"""
error_context.base_context("rebooting '%s'" % self.name, logging.info)
error_context.context("before reboot")
session = session or self.login(timeout=timeout)
error_context.context()
if method == "shell":
session.sendline(self.params.get("reboot_command"))
else:
raise virt_vm.VMRebootError("Unknown reboot method: %s" % method)
error_context.context("waiting for guest to go down", logging.info)
if not utils_misc.wait_for(lambda: not
session.is_responsive(timeout=30),
120, 0, 1):
raise virt_vm.VMRebootError("Guest refuses to go down")
session.close()
error_context.context("logging in after reboot", logging.info)
return self.wait_for_login(nic_index, timeout=timeout)
def screendump(self, filename, debug=False):
if debug:
logging.debug("Requesting screenshot %s" % filename)
return virsh.screenshot(self.name, filename, uri=self.connect_uri)
def start(self, autoconsole=True):
"""
Starts this VM.
"""
self.uuid = virsh.domuuid(self.name,
uri=self.connect_uri).stdout.strip()
logging.debug("Starting vm '%s'", self.name)
result = virsh.start(self.name, uri=self.connect_uri)
if not result.exit_status:
# Wait for the domain to be created
has_started = utils_misc.wait_for(func=self.is_alive, timeout=60,
text=("waiting for domain %s "
"to start" % self.name))
if has_started is None:
raise virt_vm.VMStartError(self.name, "libvirt domain not "
"active after start")
self.uuid = virsh.domuuid(self.name,
uri=self.connect_uri).stdout.strip()
# Establish a session with the serial console
if autoconsole:
self.create_serial_console()
else:
raise virt_vm.VMStartError(self.name, result.stderr.strip())
# Pull in mac addresses from libvirt guest definition
for index, nic in enumerate(self.virtnet):
try:
mac = self.get_virsh_mac_address(index)
if not nic.has_key('mac'):
logging.debug("Updating nic %d with mac %s on vm %s"
% (index, mac, self.name))
nic.mac = mac
elif nic.mac != mac:
logging.warning("Requested mac %s doesn't match mac %s "
"as defined for vm %s", nic.mac, mac,
self.name)
# TODO: Checkout/Set nic_model, nettype, netdst also
except virt_vm.VMMACAddressMissingError:
logging.warning("Nic %d requested by test but not defined for"
" vm %s" % (index, self.name))
def wait_for_shutdown(self, count=60):
"""
Return True on successful domain shutdown.
Wait for a domain to shutdown, libvirt does not block on domain
shutdown so we need to watch for successful completion.
:param name: VM name
:param name: Optional timeout value
"""
timeout = count
while count > 0:
# check every 5 seconds
if count % 5 == 0:
if virsh.is_dead(self.name, uri=self.connect_uri):
logging.debug("Shutdown took %d seconds", timeout - count)
return True
count -= 1
time.sleep(1)
logging.debug("Waiting for guest to shutdown %d", count)
return False
def shutdown(self):
"""
Shuts down this VM.
"""
try:
if self.state() != 'shut off':
virsh.shutdown(self.name, uri=self.connect_uri)
if self.wait_for_shutdown():
logging.debug("VM %s shut down", self.name)
self.cleanup_serial_console()
return True
else:
logging.error("VM %s failed to shut down", self.name)
return False
except process.CmdError:
logging.error("VM %s failed to shut down", self.name)
return False
def pause(self):
try:
state = self.state()
if state != 'paused':
virsh.suspend(
self.name, uri=self.connect_uri, ignore_statues=False)
return True
except Exception:
logging.error("VM %s failed to suspend", self.name)
return False
def resume(self):
try:
virsh.resume(self.name, ignore_status=False, uri=self.connect_uri)
if self.is_alive():
logging.debug("Resumed VM %s", self.name)
return True
else:
return False
except process.CmdError, detail:
logging.error("Resume VM %s failed:\n%s", self.name, detail)
return False
def save_to_file(self, path):
"""
Override BaseVM save_to_file method
"""
if self.is_dead():
raise virt_vm.VMStatusError(
"Cannot save a VM that is %s" % self.state())
logging.debug("Saving VM %s to %s" % (self.name, path))
result = virsh.save(self.name, path, uri=self.connect_uri)
if result.exit_status:
raise virt_vm.VMError("Save VM to %s failed.\n"
"Detail: %s." % (path, result.stderr))
if self.is_alive():
raise virt_vm.VMStatusError("VM not shut off after save")
self.cleanup_serial_console()
def restore_from_file(self, path):
"""
Override BaseVM restore_from_file method
"""
if self.is_alive():
raise virt_vm.VMStatusError(
"Can not restore VM that is %s" % self.state())
logging.debug("Restoring VM from %s" % path)
result = virsh.restore(path, uri=self.connect_uri)
if result.exit_status:
raise virt_vm.VMError("Restore VM from %s failed.\n"
"Detail: %s." % (path, result.stderr))
if self.is_dead():
raise virt_vm.VMStatusError(
"VM should not be %s after restore." % self.state())
self.create_serial_console()
def managedsave(self):
"""
Managed save of VM's state
"""
if self.is_dead():
raise virt_vm.VMStatusError(
"Cannot save a VM that is %s" % self.state())
logging.debug("Managed saving VM %s" % self.name)
result = virsh.managedsave(self.name, uri=self.connect_uri)
if result.exit_status:
raise virt_vm.VMError("Managed save VM failed.\n"
"Detail: %s." % result.stderr)
if self.is_alive():
raise virt_vm.VMStatusError("VM not shut off after managed save")
self.cleanup_serial_console()
def pmsuspend(self, target='mem', duration=0):
"""
Suspend a domain gracefully using power management functions
"""
if self.is_dead():
raise virt_vm.VMStatusError(
"Cannot pmsuspend a VM that is %s" % self.state())
logging.debug("PM suspending VM %s" % self.name)
result = virsh.dompmsuspend(self.name, target=target,
duration=duration, uri=self.connect_uri)
if result.exit_status:
raise virt_vm.VMError("PM suspending VM failed.\n"
"Detail: %s." % result.stderr)
self.cleanup_serial_console()
def pmwakeup(self):
"""
Wakeup a domain from pmsuspended state
"""
if self.is_dead():
raise virt_vm.VMStatusError(
"Cannot pmwakeup a VM that is %s" % self.state())
logging.debug("PM waking up VM %s" % self.name)
result = virsh.dompmwakeup(self.name, uri=self.connect_uri)
if result.exit_status:
raise virt_vm.VMError("PM waking up VM failed.\n"
"Detail: %s." % result.stderr)
self.create_serial_console()
def vcpupin(self, vcpu, cpu_list, options=""):
"""
To pin vcpu to cpu_list
"""
result = virsh.vcpupin(self.name, vcpu, cpu_list,
options, uri=self.connect_uri)
if result.exit_status:
raise exceptions.TestFail("Virsh vcpupin command failed.\n"
"Detail: %s.\n" % result)
def dominfo(self):
"""
Return a dict include vm's information.
"""
output = virsh.dominfo(self.name, uri=self.connect_uri).stdout.strip()
# Key: word before ':' | value: content after ':' (stripped)
dominfo_dict = {}
for line in output.splitlines():
key = line.split(':')[0].strip()
value = line.split(':')[-1].strip()
dominfo_dict[key] = value
return dominfo_dict
def vcpuinfo(self):
"""
Return a dict's list include vm's vcpu information.
"""
output = virsh.vcpuinfo(self.name,
uri=self.connect_uri).stdout.strip()
# Key: word before ':' | value: content after ':' (stripped)
vcpuinfo_list = []
vcpuinfo_dict = {}
for line in output.splitlines():
key = line.split(':')[0].strip()
value = line.split(':')[-1].strip()
vcpuinfo_dict[key] = value
if key == "CPU Affinity":
vcpuinfo_list.append(vcpuinfo_dict)
return vcpuinfo_list
def get_used_mem(self):
"""
Get vm's current memory(kilobytes).
"""
dominfo_dict = self.dominfo()
memory = dominfo_dict['Used memory'].split(' ')[0] # strip off ' kb'
return int(memory)
def get_blk_devices(self):
"""
Get vm's block devices.
Return a dict include all devices detail info.
example:
{target: {'type': value, 'device': value, 'source': value}}
"""
domblkdict = {}
options = "--details"
result = virsh.domblklist(self.name, options, ignore_status=True,
uri=self.connect_uri)
blklist = result.stdout.strip().splitlines()
if result.exit_status != 0:
logging.info("Get vm devices failed.")
else:
blklist = blklist[2:]
for line in blklist:
linesplit = line.split(None, 4)
target = linesplit[2]
blk_detail = {'type': linesplit[0],
'device': linesplit[1],
'source': linesplit[3]}
domblkdict[target] = blk_detail
return domblkdict
def get_disk_devices(self):
"""
Get vm's disk type block devices.
"""
blk_devices = self.get_blk_devices()
disk_devices = {}
for target in blk_devices:
details = blk_devices[target]
if details['device'] == "disk":
disk_devices[target] = details
return disk_devices
def get_first_disk_devices(self):
"""
Get vm's first disk type block devices.
"""
disk = {}
options = "--details"
result = virsh.domblklist(self.name, options, ignore_status=True,
uri=self.connect_uri)
blklist = result.stdout.strip().splitlines()
if result.exit_status != 0:
logging.info("Get vm devices failed.")
else:
blklist = blklist[2:]
linesplit = blklist[0].split(None, 4)
disk = {'type': linesplit[0],
'device': linesplit[1],
'target': linesplit[2],
'source': linesplit[3]}
return disk
def get_device_details(self, device_target):
device_details = {}
result = virsh.domblkinfo(self.name, device_target,
uri=self.connect_uri)
details = result.stdout.strip().splitlines()
if result.exit_status != 0:
logging.info("Get vm device details failed.")
else:
for line in details:
attrs = line.split(":")
device_details[attrs[0].strip()] = attrs[-1].strip()
return device_details
def get_device_size(self, device_target):
domblkdict = self.get_blk_devices()
if device_target not in domblkdict.keys():
return None
path = domblkdict[device_target]["source"]
size = self.get_device_details(device_target)["Capacity"]
return path, size
def get_max_mem(self):
"""
Get vm's maximum memory(kilobytes).
"""
dominfo_dict = self.dominfo()
max_mem = dominfo_dict['Max memory'].split(' ')[0] # strip off 'kb'
return int(max_mem)
def domjobabort(self):
"""
Abort job for vm.
"""
result = virsh.domjobabort(self.name, ignore_status=True)
if result.exit_status:
logging.debug(result)
return False
return True
def dump(self, path, option=""):
"""
Dump self to path.
:raise: exceptions.TestFail if dump fail.
"""
cmd_result = virsh.dump(self.name, path=path, option=option,
uri=self.connect_uri)
if cmd_result.exit_status:
raise exceptions.TestFail("Failed to dump %s to %s.\n"
"Detail: %s." % (self.name, path, cmd_result))
def get_job_type(self):
jobresult = virsh.domjobinfo(self.name, uri=self.connect_uri)
if not jobresult.exit_status:
for line in jobresult.stdout.splitlines():
key = line.split(':')[0]
value = line.split(':')[-1]
if key.count("type"):
return value.strip()
else:
logging.error(jobresult)
return False
def get_pci_devices(self, device_str=None):
"""
Get PCI devices in vm accroding to given device character.
:param device_str: a string to identify device.
"""
session = self.wait_for_login()
if device_str is None:
cmd = "lspci -D"
else:
cmd = "lspci -D | grep %s" % device_str
lines = session.cmd_output(cmd)
session.close()
pci_devices = []
for line in lines.splitlines():
pci_devices.append(line.split()[0])
return pci_devices
def get_disks(self, diskname=None):
"""
Get disks in vm.
:param diskname: Specify disk to be listed,
used for checking given disk.
"""
cmd = "lsblk --nodeps -n"
if diskname:
cmd += " | grep %s" % diskname
session = self.wait_for_login()
lines = session.cmd_output(cmd)
session.close()
disks = []
for line in lines.splitlines():
if line.count(" disk "):
disks.append("/dev/%s" % line.split()[0])
return disks
def get_interfaces(self):
"""
Get available interfaces in vm.
"""
cmd = "cat /proc/net/dev"
session = self.wait_for_login()
lines = session.cmd_output(cmd)
session.close()
interfaces = []
for line in lines.splitlines():
if len(line.split(':')) != 2:
continue
interfaces.append(line.split(':')[0].strip())
return interfaces
def get_interface_mac(self, interface):
"""
Get mac address of interface by given name.
"""
if interface not in self.get_interfaces():
return None
cmd = "cat /sys/class/net/%s/address" % interface
session = self.wait_for_login()
try:
mac = session.cmd_output(cmd)
except Exception, detail:
session.close()
logging.error(str(detail))
return None
session.close()
return mac.strip()
def install_package(self, name):
"""
Install a package on VM.
ToDo: Support multiple package manager.
:param name: Name of package to be installed
"""
session = self.wait_for_login()
vm_distro = self.get_distro()
try:
# distro specific support for package manager
if vm_distro.lower() == 'ubuntu':
query_cmd = "dpkg -l | grep %s" % name
cmd = "apt install -y %s" % name
update_cmd = "apt upgrade -y %s" % name
else:
query_cmd = "rpm -q %s" % name
cmd = "yum install -y %s" % name
update_cmd = "yum update -y %s" % name
if session.cmd_status(query_cmd):
# Install the package if it does not exists
status, output = session.cmd_status_output(cmd, timeout=300)
# Just check status is not enough
# It's necessary to check if install successfully
if status != 0 or session.cmd_status(query_cmd) != 0:
raise virt_vm.VMError("Installation of package %s failed:"
"\n%s" % (name, output))
else:
# Update the package
status, output = session.cmd_status_output(update_cmd,
timeout=600)
if status:
raise virt_vm.VMError("Update of package %s failed:\n%s"
% (name, output))
finally:
session.close()
def remove_package(self, name):
"""
Remove a package from VM.
ToDo: Support multiple package manager.
:param name: Name of package to be removed
"""
session = self.wait_for_login()
vm_distro = self.get_distro()
try:
# Remove the package if it exists
if vm_distro.lower() == 'ubuntu':
cmd = "! (dpkg -l | grep %s | grep ^ii) || apt remove -y %s"\
% (name, name)
else:
cmd = "! rpm -q %s || rpm -e %s" % (name, name)
status, output = session.cmd_status_output(cmd, timeout=300)
if status != 0:
raise virt_vm.VMError("Removal of package %s failed:\n%s" %
(name, output))
finally:
session.close()
def prepare_guest_agent(self, prepare_xml=True, channel=True, start=True):
"""
Prepare qemu guest agent on the VM.
:param prepare_xml: Whether change VM's XML
:param channel: Whether add agent channel in VM. Only valid if
prepare_xml is True
:param start: Whether install and start the qemu-ga service
"""
if prepare_xml:
vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(self.name)
# Check if we need to change XML of VM.
if channel != bool(vmxml.get_agent_channels()):
if self.is_alive():
self.destroy()
if channel:
vmxml.set_agent_channel()
else:
vmxml.remove_agent_channels()
vmxml.sync()
if not self.is_alive():
self.start()
self.install_package('pm-utils')
self.install_package('qemu-guest-agent')
session = self.wait_for_login()
def _is_ga_running():
return (not session.cmd_status("pgrep qemu-ga"))
def _is_ga_finished():
return (session.cmd_status("pgrep qemu-ga") == 1)
def _start_ga():
if not _is_ga_running():
cmd = "service qemu-guest-agent start"
status, output = session.cmd_status_output(cmd)
# Sometimes the binary of the guest agent was corrupted on the
# filesystem due to the guest being destroyed and cause service
# masked, so need to reinstall agent to fix it
if status and "is masked" in output:
self.remove_package('qemu-guest-agent')
self.install_package('qemu-guest-agent')
status, output = session.cmd_status_output(cmd)
if status and "unrecognized service" in output:
cmd = "service qemu-ga start"
status, output = session.cmd_status_output(cmd)
if status:
raise virt_vm.VMError("Start qemu-guest-agent failed:"
"\n%s" % output)
def _stop_ga():
if _is_ga_running():
cmd = "service qemu-guest-agent stop"
status, output = session.cmd_status_output(cmd)
if status and "unrecognized service" in output:
cmd = "service qemu-ga stop"
status, output = session.cmd_status_output(cmd)
if status:
raise virt_vm.VMError("Stop qemu-guest-agent failed:"
"\n%s" % output)
try:
# Start/stop qemu-guest-agent
if start:
_start_ga()
else:
_stop_ga()
# Check qemu-guest-agent status
if start:
if not utils_misc.wait_for(_is_ga_running, timeout=60):
raise virt_vm.VMError("qemu-guest-agent is not running.")
else:
if not utils_misc.wait_for(_is_ga_finished, timeout=60):
raise virt_vm.VMError("qemu-guest-agent is running")
finally:
session.close()
def getenforce(self):
"""
Set SELinux mode in the VM.
:return: SELinux mode [Enforcing|Permissive|Disabled]
"""
self.install_package('libselinux-utils')
session = self.wait_for_login()
try:
status, output = session.cmd_status_output("getenforce")
if status != 0:
raise virt_vm.VMError("Get SELinux mode failed:\n%s" % output)
return output.strip()
finally:
session.close()
def setenforce(self, mode):
"""
Set SELinux mode in the VM.
:param mode: SELinux mode [Enforcing|Permissive|1|0]
"""
self.install_package('selinux-policy')
self.install_package('selinux-policy-targeted')
self.install_package('libselinux-utils')
try:
if int(mode) == 1:
target_mode = 'Enforcing'
elif int(mode) == 0:
target_mode = 'Permissive'
except ValueError:
pass
session = self.wait_for_login()
try:
current_mode = self.getenforce()
if current_mode == 'Disabled':
logging.warning("VM SELinux disabled. Can't set mode.")
return
elif current_mode != target_mode:
cmd = "setenforce %s" % mode
status, output = session.cmd_status_output(cmd)
if status != 0:
raise virt_vm.VMError(
"Set SELinux mode failed:\n%s" % output)
else:
logging.debug("VM SELinux mode don't need change.")
finally:
session.close()
|
CongLi/avocado-vt
|
virttest/libvirt_vm.py
|
Python
|
gpl-2.0
| 106,547
| 0.000319
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
"""
django_twilio specific settings.
"""
from .utils import discover_twilio_credentials
TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN = discover_twilio_credentials()
|
xHeliotrope/injustice_dropper
|
env/lib/python3.4/site-packages/django_twilio/settings.py
|
Python
|
mit
| 242
| 0
|
import pytest
pytest.importorskip('numpy')
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
import dask
import dask.array as da
from dask.array.ghost import (Array, fractional_slice, getitem, trim_internal,
ghost_internal, nearest, constant, boundaries,
reflect, periodic, ghost)
from dask.core import get
def eq(a, b):
if isinstance(a, Array):
a = a.compute(get=dask.get)
if isinstance(b, Array):
b = b.compute(get=dask.get)
c = a == b
if isinstance(c, np.ndarray):
c = c.all()
return c
def test_fractional_slice():
assert fractional_slice(('x', 4.9), {0: 2}) == \
(getitem, ('x', 5), (slice(0, 2),))
assert fractional_slice(('x', 3, 5.1), {0: 2, 1: 3}) == \
(getitem, ('x', 3, 5), (slice(None, None, None), slice(-3, None)))
assert fractional_slice(('x', 2.9, 5.1), {0: 2, 1: 3}) == \
(getitem, ('x', 3, 5), (slice(0, 2), slice(-3, None)))
def test_ghost_internal():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
g = ghost_internal(d, {0: 2, 1: 1})
result = g.compute(get=get)
assert g.chunks == ((6, 6), (5, 5))
expected = np.array([
[ 0, 1, 2, 3, 4, 3, 4, 5, 6, 7],
[ 8, 9, 10, 11, 12, 11, 12, 13, 14, 15],
[16, 17, 18, 19, 20, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 43, 44, 45, 46, 47],
[16, 17, 18, 19, 20, 19, 20, 21, 22, 23],
[24, 25, 26, 27, 28, 27, 28, 29, 30, 31],
[32, 33, 34, 35, 36, 35, 36, 37, 38, 39],
[40, 41, 42, 43, 44, 43, 44, 45, 46, 47],
[48, 49, 50, 51, 52, 51, 52, 53, 54, 55],
[56, 57, 58, 59, 60, 59, 60, 61, 62, 63]])
assert eq(result, expected)
def test_trim_internal():
d = da.ones((40, 60), chunks=(10, 10))
e = trim_internal(d, axes={0: 1, 1: 2})
assert e.chunks == ((8, 8, 8, 8), (6, 6, 6, 6, 6, 6))
def test_periodic():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = periodic(d, axis=0, depth=2)
assert e.shape[0] == d.shape[0] + 4
assert e.shape[1] == d.shape[1]
assert eq(e[1, :], d[-1, :])
assert eq(e[0, :], d[-2, :])
def test_reflect():
x = np.arange(10)
d = da.from_array(x, chunks=(5, 5))
e = reflect(d, axis=0, depth=2)
expected = np.array([1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 8])
assert eq(e, expected)
e = reflect(d, axis=0, depth=1)
expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])
assert eq(e, expected)
def test_nearest():
x = np.arange(10)
d = da.from_array(x, chunks=(5, 5))
e = nearest(d, axis=0, depth=2)
expected = np.array([0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9])
assert eq(e, expected)
e = nearest(d, axis=0, depth=1)
expected = np.array([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])
assert eq(e, expected)
def test_constant():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = constant(d, axis=0, depth=2, value=10)
assert e.shape[0] == d.shape[0] + 4
assert e.shape[1] == d.shape[1]
assert eq(e[1, :], 10)
assert eq(e[-1, :], 10)
def test_boundaries():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
e = boundaries(d, {0: 2, 1: 1}, {0: 0, 1: 'periodic'})
expected = np.array(
[[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 7, 0, 1, 2, 3, 4, 5, 6, 7, 0],
[15, 8, 9,10,11,12,13,14,15, 8],
[23,16,17,18,19,20,21,22,23,16],
[31,24,25,26,27,28,29,30,31,24],
[39,32,33,34,35,36,37,38,39,32],
[47,40,41,42,43,44,45,46,47,40],
[55,48,49,50,51,52,53,54,55,48],
[63,56,57,58,59,60,61,62,63,56],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
assert eq(e, expected)
def test_ghost():
x = np.arange(64).reshape((8, 8))
d = da.from_array(x, chunks=(4, 4))
g = ghost(d, depth={0: 2, 1: 1}, boundary={0: 100, 1: 'reflect'})
assert g.chunks == ((8, 8), (6, 6))
expected = np.array(
[[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[ 0, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 7],
[ 8, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 15],
[ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],
[ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],
[ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],
[ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],
[ 16, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 23],
[ 24, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 31],
[ 32, 32, 33, 34, 35, 36, 35, 36, 37, 38, 39, 39],
[ 40, 40, 41, 42, 43, 44, 43, 44, 45, 46, 47, 47],
[ 48, 48, 49, 50, 51, 52, 51, 52, 53, 54, 55, 55],
[ 56, 56, 57, 58, 59, 60, 59, 60, 61, 62, 63, 63],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]])
assert eq(g, expected)
g = ghost(d, depth={0: 2, 1: 1}, boundary={0: 100})
assert g.chunks == ((8, 8), (5, 5))
def test_map_overlap():
x = da.arange(10, chunks=5)
y = x.map_overlap(lambda x: x + len(x), depth=2)
assert eq(y, np.arange(10) + 5 + 2 + 2)
def test_nearest_ghost():
a = np.arange(144).reshape(12, 12).astype(float)
darr = da.from_array(a, chunks=(6, 6))
garr = ghost(darr, depth={0: 5, 1: 5},
boundary={0: 'nearest', 1: 'nearest'})
tarr = trim_internal(garr, {0: 5, 1: 5})
assert_array_almost_equal(tarr, a)
def test_0_depth():
expected = np.arange(100).reshape(10, 10)
darr = da.from_array(expected, chunks=(5, 2))
depth = {0: 0, 1: 0}
reflected = ghost(darr, depth=depth, boundary='reflect')
nearest = ghost(darr, depth=depth, boundary='nearest')
periodic = ghost(darr, depth=depth, boundary='periodic')
constant = ghost(darr, depth=depth, boundary=42)
result = trim_internal(reflected, depth)
assert_array_equal(result, expected)
result = trim_internal(nearest, depth)
assert_array_equal(result, expected)
result = trim_internal(periodic, depth)
assert_array_equal(result, expected)
result = trim_internal(constant, depth)
assert_array_equal(result, expected)
def test_some_0_depth():
expected = np.arange(100).reshape(10, 10)
darr = da.from_array(expected, chunks=(5, 5))
depth = {0: 4, 1: 0}
reflected = ghost(darr, depth=depth, boundary='reflect')
nearest = ghost(darr, depth=depth, boundary='nearest')
periodic = ghost(darr, depth=depth, boundary='periodic')
constant = ghost(darr, depth=depth, boundary=42)
result = trim_internal(reflected, depth)
assert_array_equal(result, expected)
result = trim_internal(nearest, depth)
assert_array_equal(result, expected)
result = trim_internal(periodic, depth)
assert_array_equal(result, expected)
result = trim_internal(constant, depth)
assert_array_equal(result, expected)
def test_one_chunk_along_axis():
a = np.arange(2 * 9).reshape(2, 9)
darr = da.from_array(a, chunks=((2,), (2, 2, 2, 3)))
g = ghost(darr, depth=0, boundary=0)
assert a.shape == g.shape
def test_constant_boundaries():
a = np.arange(1 * 9).reshape(1, 9)
darr = da.from_array(a, chunks=((1,), (2, 2, 2, 3)))
b = boundaries(darr, {0: 0, 1: 0}, {0: 0, 1: 0})
assert b.chunks == darr.chunks
def test_depth_equals_boundary_length():
expected = np.arange(100).reshape(10, 10)
darr = da.from_array(expected, chunks=(5, 5))
depth = {0: 5, 1: 5}
reflected = ghost(darr, depth=depth, boundary='reflect')
nearest = ghost(darr, depth=depth, boundary='nearest')
periodic = ghost(darr, depth=depth, boundary='periodic')
constant = ghost(darr, depth=depth, boundary=42)
result = trim_internal(reflected, depth)
assert_array_equal(result, expected)
result = trim_internal(nearest, depth)
assert_array_equal(result, expected)
result = trim_internal(periodic, depth)
assert_array_equal(result, expected)
result = trim_internal(constant, depth)
assert_array_equal(result, expected)
@pytest.mark.xfail
def test_depth_greater_than_boundary_length():
expected = np.arange(100).reshape(10, 10)
darr = da.from_array(expected, chunks=(5, 5))
depth = {0: 8, 1: 7}
reflected = ghost(darr, depth=depth, boundary='reflect')
nearest = ghost(darr, depth=depth, boundary='nearest')
periodic = ghost(darr, depth=depth, boundary='periodic')
constant = ghost(darr, depth=depth, boundary=42)
result = trim_internal(reflected, depth)
assert_array_equal(result, expected)
result = trim_internal(nearest, depth)
assert_array_equal(result, expected)
result = trim_internal(periodic, depth)
assert_array_equal(result, expected)
result = trim_internal(constant, depth)
assert_array_equal(result, expected)
def test_bad_depth_raises():
expected = np.arange(144).reshape(12, 12)
darr = da.from_array(expected, chunks=(5, 5))
depth = {0: 4, 1: 2}
pytest.raises(ValueError, ghost, darr, depth=depth, boundary=1)
|
esc/dask
|
dask/array/tests/test_ghost.py
|
Python
|
bsd-3-clause
| 9,623
| 0.009145
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo_serialization import jsonutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import vim_util as vutil
import six
from nova.compute import power_state
from nova import context
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova import test
from nova.tests.unit import fake_flavor
from nova.tests.unit import fake_instance
import nova.tests.unit.image.fake
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova import utils
from nova import version
from nova.virt import hardware
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
class DsPathMatcher(object):
def __init__(self, expected_ds_path_str):
self.expected_ds_path_str = expected_ds_path_str
def __eq__(self, ds_path_param):
return str(ds_path_param) == self.expected_ds_path_str
class VMwareVMOpsTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVMOpsTestCase, self).setUp()
vmwareapi_fake.reset()
stubs.set_stubs(self.stubs)
self.flags(enabled=True, group='vnc')
self.flags(image_cache_subdirectory_name='vmware_base',
my_ip='',
flat_injected=True)
self._context = context.RequestContext('fake_user', 'fake_project')
self._session = driver.VMwareAPISession()
self._virtapi = mock.Mock()
self._image_id = nova.tests.unit.image.fake.get_valid_image_id()
fake_ds_ref = vmwareapi_fake.ManagedObjectReference('fake-ds')
self._ds = ds_obj.Datastore(
ref=fake_ds_ref, name='fake_ds',
capacity=10 * units.Gi,
freespace=10 * units.Gi)
self._dc_info = vmops.DcInfo(
ref='fake_dc_ref', name='fake_dc',
vmFolder='fake_vm_folder')
cluster = vmwareapi_fake.create_cluster('fake_cluster', fake_ds_ref)
self._instance_values = {
'display_name': 'fake_display_name',
'name': 'fake_name',
'uuid': 'fake_uuid',
'vcpus': 1,
'memory_mb': 512,
'image_ref': self._image_id,
'root_gb': 10,
'node': '%s(%s)' % (cluster.mo_id, cluster.name),
'expected_attrs': ['system_metadata'],
}
self._instance = fake_instance.fake_instance_obj(
self._context, **self._instance_values)
self._flavor = objects.Flavor(name='m1.small', memory_mb=512, vcpus=1,
root_gb=10, ephemeral_gb=0, swap=0,
extra_specs={})
self._instance.flavor = self._flavor
self._vmops = vmops.VMwareVMOps(self._session, self._virtapi, None,
cluster=cluster.obj)
self._cluster = cluster
self._image_meta = objects.ImageMeta.from_dict({})
subnet_4 = network_model.Subnet(cidr='192.168.0.1/24',
dns=[network_model.IP('192.168.0.1')],
gateway=
network_model.IP('192.168.0.1'),
ips=[
network_model.IP('192.168.0.100')],
routes=None)
subnet_6 = network_model.Subnet(cidr='dead:beef::1/64',
dns=None,
gateway=
network_model.IP('dead:beef::1'),
ips=[network_model.IP(
'dead:beef::dcad:beff:feef:0')],
routes=None)
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_4, subnet_6],
vlan=None,
bridge_interface=None,
injected=True)
self._network_values = {
'id': None,
'address': 'DE:AD:BE:EF:00:00',
'network': network,
'type': None,
'devname': None,
'ovs_interfaceid': None,
'rxtx_cap': 3
}
self.network_info = network_model.NetworkInfo([
network_model.VIF(**self._network_values)
])
pure_IPv6_network = network_model.Network(id=0,
bridge='fa0',
label='fake',
subnets=[subnet_6],
vlan=None,
bridge_interface=None,
injected=True)
self.pure_IPv6_network_info = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=pure_IPv6_network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])
self._metadata = (
"name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.micro\n"
"flavor:memory_mb:6\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
"flavor:swap:33550336\n"
"imageid:70a599e0-31e7-49b7-b260-868f441e862b\n"
"package:%s\n" % version.version_string_with_package())
def test_get_machine_id_str(self):
result = vmops.VMwareVMOps._get_machine_id_str(self.network_info)
self.assertEqual('DE:AD:BE:EF:00:00;192.168.0.100;255.255.255.0;'
'192.168.0.1;192.168.0.255;192.168.0.1#', result)
result = vmops.VMwareVMOps._get_machine_id_str(
self.pure_IPv6_network_info)
self.assertEqual('DE:AD:BE:EF:00:00;;;;;#', result)
def _setup_create_folder_mocks(self):
ops = vmops.VMwareVMOps(mock.Mock(), mock.Mock(), mock.Mock())
base_name = 'folder'
ds_name = "datastore"
ds_ref = mock.Mock()
ds_ref.value = 1
dc_ref = mock.Mock()
ops._datastore_dc_mapping[ds_ref.value] = vmops.DcInfo(
ref=dc_ref,
name='fake-name',
vmFolder='fake-folder')
path = ds_obj.DatastorePath(ds_name, base_name)
return ds_name, ds_ref, ops, path, dc_ref
@mock.patch.object(ds_util, 'mkdir')
def test_create_folder_if_missing(self, mock_mkdir):
ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
mock_mkdir.assert_called_with(ops._session, path, dc)
@mock.patch.object(ds_util, 'mkdir')
def test_create_folder_if_missing_exception(self, mock_mkdir):
ds_name, ds_ref, ops, path, dc = self._setup_create_folder_mocks()
ds_util.mkdir.side_effect = vexc.FileAlreadyExistsException()
ops._create_folder_if_missing(ds_name, ds_ref, 'folder')
mock_mkdir.assert_called_with(ops._session, path, dc)
@mock.patch.object(vutil, 'continue_retrieval', return_value=None)
def test_get_valid_vms_from_retrieve_result(self, _mock_cont):
ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock())
fake_objects = vmwareapi_fake.FakeRetrieveResult()
fake_objects.add_object(vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid()))
fake_objects.add_object(vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid()))
fake_objects.add_object(vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid()))
vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
self.assertEqual(3, len(vms))
@mock.patch.object(vutil, 'continue_retrieval', return_value=None)
def test_get_valid_vms_from_retrieve_result_with_invalid(self,
_mock_cont):
ops = vmops.VMwareVMOps(self._session, mock.Mock(), mock.Mock())
fake_objects = vmwareapi_fake.FakeRetrieveResult()
fake_objects.add_object(vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid()))
invalid_vm1 = vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid())
invalid_vm1.set('runtime.connectionState', 'orphaned')
invalid_vm2 = vmwareapi_fake.VirtualMachine(
name=uuidutils.generate_uuid())
invalid_vm2.set('runtime.connectionState', 'inaccessible')
fake_objects.add_object(invalid_vm1)
fake_objects.add_object(invalid_vm2)
vms = ops._get_valid_vms_from_retrieve_result(fake_objects)
self.assertEqual(1, len(vms))
def test_delete_vm_snapshot(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('RemoveSnapshot_Task', method)
self.assertEqual('fake_vm_snapshot', args[0])
self.assertFalse(kwargs['removeChildren'])
self.assertTrue(kwargs['consolidate'])
return 'fake_remove_snapshot_task'
with contextlib.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method', fake_call_method)
) as (_wait_for_task, _call_method):
self._vmops._delete_vm_snapshot(self._instance,
"fake_vm_ref", "fake_vm_snapshot")
_wait_for_task.assert_has_calls([
mock.call('fake_remove_snapshot_task')])
def test_create_vm_snapshot(self):
method_list = ['CreateSnapshot_Task', 'get_dynamic_property']
def fake_call_method(module, method, *args, **kwargs):
expected_method = method_list.pop(0)
self.assertEqual(expected_method, method)
if (expected_method == 'CreateSnapshot_Task'):
self.assertEqual('fake_vm_ref', args[0])
self.assertFalse(kwargs['memory'])
self.assertTrue(kwargs['quiesce'])
return 'fake_snapshot_task'
elif (expected_method == 'get_dynamic_property'):
task_info = mock.Mock()
task_info.result = "fake_snapshot_ref"
self.assertEqual(('fake_snapshot_task', 'Task', 'info'), args)
return task_info
with contextlib.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method', fake_call_method)
) as (_wait_for_task, _call_method):
snap = self._vmops._create_vm_snapshot(self._instance,
"fake_vm_ref")
self.assertEqual("fake_snapshot_ref", snap)
_wait_for_task.assert_has_calls([
mock.call('fake_snapshot_task')])
def test_update_instance_progress(self):
with mock.patch.object(self._instance, 'save') as mock_save:
self._vmops._update_instance_progress(self._instance._context,
self._instance, 5, 10)
mock_save.assert_called_once_with()
self.assertEqual(50, self._instance.progress)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
def test_get_info(self, mock_get_vm_ref):
result = {
'summary.config.numCpu': 4,
'summary.config.memorySizeMB': 128,
'runtime.powerState': 'poweredOn'
}
def mock_call_method(module, method, *args, **kwargs):
if method == 'continue_retrieval':
return
return result
with mock.patch.object(self._session, '_call_method',
mock_call_method):
info = self._vmops.get_info(self._instance)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
expected = hardware.InstanceInfo(state=power_state.RUNNING,
max_mem_kb=128 * 1024,
mem_kb=128 * 1024,
num_cpu=4)
self.assertEqual(expected, info)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
def test_get_info_when_ds_unavailable(self, mock_get_vm_ref):
result = {
'runtime.powerState': 'poweredOff'
}
def mock_call_method(module, method, *args, **kwargs):
if method == 'continue_retrieval':
return
return result
with mock.patch.object(self._session, '_call_method',
mock_call_method):
info = self._vmops.get_info(self._instance)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
self.assertEqual(hardware.InstanceInfo(state=power_state.SHUTDOWN),
info)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake_ref')
def test_get_info_instance_deleted(self, mock_get_vm_ref):
props = ['summary.config.numCpu', 'summary.config.memorySizeMB',
'runtime.powerState']
prop_cpu = vmwareapi_fake.Prop(props[0], 4)
prop_mem = vmwareapi_fake.Prop(props[1], 128)
prop_state = vmwareapi_fake.Prop(props[2], 'poweredOn')
prop_list = [prop_state, prop_mem, prop_cpu]
obj_content = vmwareapi_fake.ObjectContent(None, prop_list=prop_list)
result = vmwareapi_fake.FakeRetrieveResult()
result.add_object(obj_content)
def mock_call_method(module, method, *args, **kwargs):
raise vexc.ManagedObjectNotFoundException()
with mock.patch.object(self._session, '_call_method',
mock_call_method):
self.assertRaises(exception.InstanceNotFound,
self._vmops.get_info,
self._instance)
mock_get_vm_ref.assert_called_once_with(self._session,
self._instance)
def _test_get_datacenter_ref_and_name(self, ds_ref_exists=False):
instance_ds_ref = mock.Mock()
instance_ds_ref.value = "ds-1"
_vcvmops = vmops.VMwareVMOps(self._session, None, None)
if ds_ref_exists:
ds_ref = mock.Mock()
ds_ref.value = "ds-1"
else:
ds_ref = None
self._continue_retrieval = True
self._fake_object1 = vmwareapi_fake.FakeRetrieveResult()
self._fake_object2 = vmwareapi_fake.FakeRetrieveResult()
def fake_call_method(module, method, *args, **kwargs):
self._fake_object1.add_object(vmwareapi_fake.Datacenter(
ds_ref=ds_ref))
if not ds_ref:
# Token is set for the fake_object1, so it will continue to
# fetch the next object.
setattr(self._fake_object1, 'token', 'token-0')
if self._continue_retrieval:
if self._continue_retrieval:
self._continue_retrieval = False
self._fake_object2.add_object(
vmwareapi_fake.Datacenter())
return self._fake_object2
return
if method == "continue_retrieval":
return
return self._fake_object1
with mock.patch.object(self._session, '_call_method',
side_effect=fake_call_method) as fake_call:
dc_info = _vcvmops.get_datacenter_ref_and_name(instance_ds_ref)
if ds_ref:
self.assertEqual(1, len(_vcvmops._datastore_dc_mapping))
calls = [mock.call(vim_util, "get_objects", "Datacenter",
["name", "datastore", "vmFolder"]),
mock.call(vutil, 'continue_retrieval',
self._fake_object1)]
fake_call.assert_has_calls(calls)
self.assertEqual("ha-datacenter", dc_info.name)
else:
calls = [mock.call(vim_util, "get_objects", "Datacenter",
["name", "datastore", "vmFolder"]),
mock.call(vutil, 'continue_retrieval',
self._fake_object2)]
fake_call.assert_has_calls(calls)
self.assertIsNone(dc_info)
def test_get_datacenter_ref_and_name(self):
self._test_get_datacenter_ref_and_name(ds_ref_exists=True)
def test_get_datacenter_ref_and_name_with_no_datastore(self):
self._test_get_datacenter_ref_and_name()
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(ds_util, 'disk_copy')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
@mock.patch.object(vm_util, 'find_rescue_device')
@mock.patch.object(vm_util, 'get_vm_boot_spec')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'power_on_instance')
@mock.patch.object(ds_obj, 'get_datastore_by_ref')
def test_rescue(self, mock_get_ds_by_ref, mock_power_on, mock_reconfigure,
mock_get_boot_spec, mock_find_rescue,
mock_get_vm_ref, mock_disk_copy,
mock_power_off):
_volumeops = mock.Mock()
self._vmops._volumeops = _volumeops
ds = ds_obj.Datastore('fake-ref', 'ds1')
mock_get_ds_by_ref.return_value = ds
mock_find_rescue.return_value = 'fake-rescue-device'
mock_get_boot_spec.return_value = 'fake-boot-spec'
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = ds.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
with contextlib.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (_get_dc_ref_and_name, fake_vmdk_info):
dc_info = mock.Mock()
_get_dc_ref_and_name.return_value = dc_info
self._vmops.rescue(
self._context, self._instance, None, self._image_meta)
mock_power_off.assert_called_once_with(self._session,
self._instance,
'fake-ref')
uuid = self._instance.image_ref
cache_path = ds.build_path('vmware_base', uuid, uuid + '.vmdk')
rescue_path = ds.build_path('fake_uuid', uuid + '-rescue.vmdk')
mock_disk_copy.assert_called_once_with(self._session, dc_info.ref,
cache_path, rescue_path)
_volumeops.attach_disk_to_vm.assert_called_once_with('fake-ref',
self._instance, mock.ANY, mock.ANY, rescue_path)
mock_get_boot_spec.assert_called_once_with(mock.ANY,
'fake-rescue-device')
mock_reconfigure.assert_called_once_with(self._session,
'fake-ref',
'fake-boot-spec')
mock_power_on.assert_called_once_with(self._session,
self._instance,
vm_ref='fake-ref')
def test_unrescue_power_on(self):
self._test_unrescue(True)
def test_unrescue_power_off(self):
self._test_unrescue(False)
def _test_unrescue(self, power_on):
_volumeops = mock.Mock()
self._vmops._volumeops = _volumeops
vm_ref = mock.Mock()
def fake_call_method(module, method, *args, **kwargs):
expected_args = (vm_ref, 'VirtualMachine',
'config.hardware.device')
self.assertEqual('get_dynamic_property', method)
self.assertEqual(expected_args, args)
with contextlib.nested(
mock.patch.object(vm_util, 'power_on_instance'),
mock.patch.object(vm_util, 'find_rescue_device'),
mock.patch.object(vm_util, 'get_vm_ref', return_value=vm_ref),
mock.patch.object(self._session, '_call_method',
fake_call_method),
mock.patch.object(vm_util, 'power_off_instance')
) as (_power_on_instance, _find_rescue, _get_vm_ref,
_call_method, _power_off):
self._vmops.unrescue(self._instance, power_on=power_on)
if power_on:
_power_on_instance.assert_called_once_with(self._session,
self._instance, vm_ref=vm_ref)
else:
self.assertFalse(_power_on_instance.called)
_get_vm_ref.assert_called_once_with(self._session,
self._instance)
_power_off.assert_called_once_with(self._session, self._instance,
vm_ref)
_volumeops.detach_disk_from_vm.assert_called_once_with(
vm_ref, self._instance, mock.ANY, destroy_disk=True)
def _test_finish_migration(self, power_on=True, resize_instance=False):
with contextlib.nested(
mock.patch.object(self._vmops,
'_resize_create_ephemerals_and_swap'),
mock.patch.object(self._vmops, "_update_instance_progress"),
mock.patch.object(vm_util, "power_on_instance"),
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-ref')
) as (fake_resize_create_ephemerals_and_swap,
fake_update_instance_progress, fake_power_on, fake_get_vm_ref):
self._vmops.finish_migration(context=self._context,
migration=None,
instance=self._instance,
disk_info=None,
network_info=None,
block_device_info=None,
resize_instance=resize_instance,
image_meta=None,
power_on=power_on)
fake_resize_create_ephemerals_and_swap.called_once_with(
'fake-ref', self._instance, None)
if power_on:
fake_power_on.assert_called_once_with(self._session,
self._instance,
vm_ref='fake-ref')
else:
self.assertFalse(fake_power_on.called)
calls = [
mock.call(self._context, self._instance, step=5,
total_steps=vmops.RESIZE_TOTAL_STEPS),
mock.call(self._context, self._instance, step=6,
total_steps=vmops.RESIZE_TOTAL_STEPS)]
fake_update_instance_progress.assert_has_calls(calls)
def test_finish_migration_power_on(self):
self._test_finish_migration(power_on=True, resize_instance=False)
def test_finish_migration_power_off(self):
self._test_finish_migration(power_on=False, resize_instance=False)
def test_finish_migration_power_on_resize(self):
self._test_finish_migration(power_on=True, resize_instance=True)
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vmops.VMwareVMOps, '_resize_create_ephemerals_and_swap')
@mock.patch.object(vmops.VMwareVMOps, '_remove_ephemerals_and_swap')
@mock.patch.object(ds_util, 'disk_delete')
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'file_exists',
return_value=True)
@mock.patch.object(vmops.VMwareVMOps, '_get_ds_browser',
return_value='fake-browser')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_vm_resize_spec',
return_value='fake-spec')
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
@mock.patch.object(vm_util, 'power_on_instance')
def _test_finish_revert_migration(self, fake_power_on,
fake_get_vm_ref, fake_power_off,
fake_resize_spec, fake_reconfigure_vm,
fake_get_browser,
fake_original_exists, fake_disk_move,
fake_disk_delete,
fake_remove_ephemerals_and_swap,
fake_resize_create_ephemerals_and_swap,
fake_get_extra_specs,
power_on):
"""Tests the finish_revert_migration method on vmops."""
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
extra_specs = vm_util.ExtraSpecs()
fake_get_extra_specs.return_value = extra_specs
with contextlib.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (fake_get_dc_ref_and_name, fake_get_vmdk_info):
self._vmops._volumeops = mock.Mock()
mock_attach_disk = self._vmops._volumeops.attach_disk_to_vm
mock_detach_disk = self._vmops._volumeops.detach_disk_from_vm
self._vmops.finish_revert_migration(self._context,
instance=self._instance,
network_info=None,
block_device_info=None,
power_on=power_on)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_power_off.assert_called_once_with(self._session,
self._instance,
'fake-ref')
# Validate VM reconfiguration
metadata = ('name:fake_display_name\n'
'userid:fake_user\n'
'username:None\n'
'projectid:fake_project\n'
'projectname:None\n'
'flavor:name:m1.small\n'
'flavor:memory_mb:512\n'
'flavor:vcpus:1\n'
'flavor:ephemeral_gb:0\n'
'flavor:root_gb:10\n'
'flavor:swap:0\n'
'imageid:70a599e0-31e7-49b7-b260-868f441e862b\n'
'package:%s\n' % version.version_string_with_package())
fake_resize_spec.assert_called_once_with(
self._session.vim.client.factory,
int(self._instance.vcpus),
int(self._instance.memory_mb),
extra_specs,
metadata=metadata)
fake_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-spec')
# Validate disk configuration
fake_get_vmdk_info.assert_called_once_with(
self._session, 'fake-ref', uuid=self._instance.uuid)
fake_get_browser.assert_called_once_with('fake-ref')
fake_original_exists.assert_called_once_with(
self._session, 'fake-browser',
ds_obj.DatastorePath(datastore.name, 'uuid'),
'original.vmdk')
mock_detach_disk.assert_called_once_with('fake-ref',
self._instance,
device)
fake_disk_delete.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/root.vmdk')
fake_disk_move.assert_called_once_with(
self._session, dc_info.ref,
'[fake] uuid/original.vmdk',
'[fake] uuid/root.vmdk')
mock_attach_disk.assert_called_once_with(
'fake-ref', self._instance, 'fake-adapter', 'fake-disk',
'[fake] uuid/root.vmdk')
fake_remove_ephemerals_and_swap.called_once_with('fake-ref')
fake_resize_create_ephemerals_and_swap.called_once_with(
'fake-ref', self._instance, None)
if power_on:
fake_power_on.assert_called_once_with(self._session,
self._instance)
else:
self.assertFalse(fake_power_on.called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(power_on=True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(power_on=False)
@mock.patch.object(vmops.VMwareVMOps, '_get_instance_metadata')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_vm_resize_spec',
return_value='fake-spec')
def test_resize_vm(self, fake_resize_spec, fake_reconfigure,
fake_get_extra_specs, fake_get_metadata):
extra_specs = vm_util.ExtraSpecs()
fake_get_extra_specs.return_value = extra_specs
fake_get_metadata.return_value = self._metadata
flavor = objects.Flavor(name='m1.small',
memory_mb=1024,
vcpus=2,
extra_specs={})
self._vmops._resize_vm(self._context, self._instance, 'vm-ref', flavor,
None)
fake_resize_spec.assert_called_once_with(
self._session.vim.client.factory, 2, 1024, extra_specs,
metadata=self._metadata)
fake_reconfigure.assert_called_once_with(self._session,
'vm-ref', 'fake-spec')
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'disk_copy')
def test_resize_disk(self, fake_disk_copy, fake_disk_move,
fake_extend):
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
self._instance.root_gb * units.Gi,
device)
dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
with mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info) as fake_get_dc_ref_and_name:
self._vmops._volumeops = mock.Mock()
mock_attach_disk = self._vmops._volumeops.attach_disk_to_vm
mock_detach_disk = self._vmops._volumeops.detach_disk_from_vm
flavor = fake_flavor.fake_flavor_obj(self._context,
root_gb=self._instance.root_gb + 1)
self._vmops._resize_disk(self._instance, 'fake-ref', vmdk, flavor)
fake_get_dc_ref_and_name.assert_called_once_with(datastore.ref)
fake_disk_copy.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/root.vmdk',
'[fake] uuid/resized.vmdk')
mock_detach_disk.assert_called_once_with('fake-ref',
self._instance,
device)
fake_extend.assert_called_once_with(
self._instance, flavor['root_gb'] * units.Mi,
'[fake] uuid/resized.vmdk', dc_info.ref)
calls = [
mock.call(self._session, dc_info.ref,
'[fake] uuid/root.vmdk',
'[fake] uuid/original.vmdk'),
mock.call(self._session, dc_info.ref,
'[fake] uuid/resized.vmdk',
'[fake] uuid/root.vmdk')]
fake_disk_move.assert_has_calls(calls)
mock_attach_disk.assert_called_once_with(
'fake-ref', self._instance, 'fake-adapter', 'fake-disk',
'[fake] uuid/root.vmdk')
@mock.patch.object(vm_util, 'detach_devices_from_vm')
@mock.patch.object(vm_util, 'get_swap')
@mock.patch.object(vm_util, 'get_ephemerals')
def test_remove_ephemerals_and_swap(self, get_ephemerals, get_swap,
detach_devices):
get_ephemerals.return_value = [mock.sentinel.ephemeral0,
mock.sentinel.ephemeral1]
get_swap.return_value = mock.sentinel.swap
devices = [mock.sentinel.ephemeral0, mock.sentinel.ephemeral1,
mock.sentinel.swap]
self._vmops._remove_ephemerals_and_swap(mock.sentinel.vm_ref)
detach_devices.assert_called_once_with(self._vmops._session,
mock.sentinel.vm_ref, devices)
@mock.patch.object(ds_util, 'disk_delete')
@mock.patch.object(ds_util, 'file_exists',
return_value=True)
@mock.patch.object(vmops.VMwareVMOps, '_get_ds_browser',
return_value='fake-browser')
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_confirm_migration(self, fake_get_vm_ref, fake_get_browser,
fake_original_exists,
fake_disk_delete):
"""Tests the confirm_migration method on vmops."""
datastore = ds_obj.Datastore(ref='fake-ref', name='fake')
device = vmwareapi_fake.DataObject()
backing = vmwareapi_fake.DataObject()
backing.datastore = datastore.ref
device.backing = backing
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
'fake-capacity',
device)
dc_info = vmops.DcInfo(ref='fake_ref', name='fake',
vmFolder='fake_folder')
with contextlib.nested(
mock.patch.object(self._vmops, 'get_datacenter_ref_and_name',
return_value=dc_info),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk)
) as (fake_get_dc_ref_and_name, fake_get_vmdk_info):
self._vmops.confirm_migration(None,
self._instance,
None)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_get_vmdk_info.assert_called_once_with(
self._session, 'fake-ref', uuid=self._instance.uuid)
fake_get_browser.assert_called_once_with('fake-ref')
fake_original_exists.assert_called_once_with(
self._session, 'fake-browser',
ds_obj.DatastorePath(datastore.name, 'uuid'),
'original.vmdk')
fake_disk_delete.assert_called_once_with(
self._session, dc_info.ref, '[fake] uuid/original.vmdk')
def test_migrate_disk_and_power_off(self):
self._test_migrate_disk_and_power_off(
flavor_root_gb=self._instance.root_gb + 1)
def test_migrate_disk_and_power_off_zero_disk_flavor(self):
self._instance.root_gb = 0
self._test_migrate_disk_and_power_off(flavor_root_gb=0)
def test_migrate_disk_and_power_off_disk_shrink(self):
self.assertRaises(exception.InstanceFaultRollback,
self._test_migrate_disk_and_power_off,
flavor_root_gb=self._instance.root_gb - 1)
@mock.patch.object(vmops.VMwareVMOps, "_remove_ephemerals_and_swap")
@mock.patch.object(vm_util, 'get_vmdk_info')
@mock.patch.object(vmops.VMwareVMOps, "_resize_disk")
@mock.patch.object(vmops.VMwareVMOps, "_resize_vm")
@mock.patch.object(vm_util, 'power_off_instance')
@mock.patch.object(vmops.VMwareVMOps, "_update_instance_progress")
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def _test_migrate_disk_and_power_off(self, fake_get_vm_ref, fake_progress,
fake_power_off, fake_resize_vm,
fake_resize_disk, fake_get_vmdk_info,
fake_remove_ephemerals_and_swap,
flavor_root_gb):
vmdk = vm_util.VmdkInfo('[fake] uuid/root.vmdk',
'fake-adapter',
'fake-disk',
self._instance.root_gb * units.Gi,
'fake-device')
fake_get_vmdk_info.return_value = vmdk
flavor = fake_flavor.fake_flavor_obj(self._context,
root_gb=flavor_root_gb)
self._vmops.migrate_disk_and_power_off(self._context,
self._instance,
None,
flavor)
fake_get_vm_ref.assert_called_once_with(self._session,
self._instance)
fake_power_off.assert_called_once_with(self._session,
self._instance,
'fake-ref')
fake_resize_vm.assert_called_once_with(self._context, self._instance,
'fake-ref', flavor, mock.ANY)
fake_resize_disk.assert_called_once_with(self._instance, 'fake-ref',
vmdk, flavor)
calls = [mock.call(self._context, self._instance, step=i,
total_steps=vmops.RESIZE_TOTAL_STEPS)
for i in range(4)]
fake_progress.assert_has_calls(calls)
@mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
@mock.patch.object(vmops.VMwareVMOps, '_create_config_drive')
def test_configure_config_drive(self,
mock_create_config_drive,
mock_attach_cdrom_to_vm):
injected_files = mock.Mock()
admin_password = mock.Mock()
vm_ref = mock.Mock()
mock_create_config_drive.return_value = "fake_iso_path"
self._vmops._configure_config_drive(
self._instance, vm_ref, self._dc_info, self._ds,
injected_files, admin_password)
upload_iso_path = self._ds.build_path("fake_iso_path")
mock_create_config_drive.assert_called_once_with(self._instance,
injected_files, admin_password, self._ds.name,
self._dc_info.name, self._instance.uuid, "Fake-CookieJar")
mock_attach_cdrom_to_vm.assert_called_once_with(
vm_ref, self._instance, self._ds.ref, str(upload_iso_path))
@mock.patch.object(vmops.LOG, 'debug')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.lockutils, 'lock')
def test_spawn_mask_block_device_info_password(self, mock_lock,
mock_build_virtual_machine, mock_get_vm_config_info,
mock_fetch_image_if_missing, mock_debug):
# Very simple test that just ensures block_device_info auth_password
# is masked when logged; the rest of the test just fails out early.
data = {'auth_password': 'scrubme'}
bdm = [{'boot_index': 0, 'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
self.password_logged = False
# Tests that the parameters to the to_xml method are sanitized for
# passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.password_logged = True
self.assertNotIn('scrubme', args[0])
mock_debug.side_effect = fake_debug
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
# Call spawn(). We don't care what it does as long as it generates
# the log message, which we check below.
with mock.patch.object(self._vmops, '_volumeops') as mock_vo:
mock_vo.attach_root_volume.side_effect = test.TestingException
try:
self._vmops.spawn(
self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi
)
except test.TestingException:
pass
# Check that the relevant log message was generated, and therefore
# that we checked it was scrubbed
self.assertTrue(self.password_logged)
def _get_metadata(self, is_image_used=True):
if is_image_used:
image_id = '70a599e0-31e7-49b7-b260-868f441e862b'
else:
image_id = None
return ("name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.small\n"
"flavor:memory_mb:512\n"
"flavor:vcpus:1\n"
"flavor:ephemeral_gb:0\n"
"flavor:root_gb:10\n"
"flavor:swap:0\n"
"imageid:%(image_id)s\n"
"package:%(version)s\n" % {
'image_id': image_id,
'version': version.version_string_with_package()})
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, '_use_disk_image_as_linked_clone')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@mock.patch(
'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_non_root_block_device(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
enlist_image, fetch_image,
use_disk_image,
power_on_instance):
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info1 = {'data': 'fake-data1', 'serial': 'volume-fake-id1'}
connection_info2 = {'data': 'fake-data2', 'serial': 'volume-fake-id2'}
bdm = [{'connection_info': connection_info1,
'disk_bus': constants.ADAPTER_TYPE_IDE,
'mount_device': '/dev/sdb'},
{'connection_info': connection_info2,
'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'mount_device': '/dev/sdc'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
image_size = (self._instance.root_gb) * units.Gi / 2
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size)
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
with mock.patch.object(self._vmops, '_volumeops') as volumeops:
self._vmops.spawn(self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi)
from_image.assert_called_once_with(self._instance.image_ref,
self._image_meta)
get_vm_config_info.assert_called_once_with(self._instance,
image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata())
enlist_image.assert_called_once_with(image_info.image_id,
vi.datastore, vi.dc_info.ref)
fetch_image.assert_called_once_with(self._context, vi)
use_disk_image.assert_called_once_with('fake-vm-ref', vi)
volumeops.attach_volume.assert_any_call(
connection_info1, self._instance, constants.ADAPTER_TYPE_IDE)
volumeops.attach_volume.assert_any_call(
connection_info2, self._instance,
constants.DEFAULT_ADAPTER_TYPE)
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_with_no_image_and_block_devices(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
power_on_instance):
self._instance.image_ref = None
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info1 = {'data': 'fake-data1', 'serial': 'volume-fake-id1'}
connection_info2 = {'data': 'fake-data2', 'serial': 'volume-fake-id2'}
connection_info3 = {'data': 'fake-data3', 'serial': 'volume-fake-id3'}
bdm = [{'boot_index': 0,
'connection_info': connection_info1,
'disk_bus': constants.ADAPTER_TYPE_IDE},
{'boot_index': 1,
'connection_info': connection_info2,
'disk_bus': constants.DEFAULT_ADAPTER_TYPE},
{'boot_index': 2,
'connection_info': connection_info3,
'disk_bus': constants.ADAPTER_TYPE_LSILOGICSAS}]
bdi = {'block_device_mapping': bdm}
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
image_info = mock.sentinel.image_info
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
with mock.patch.object(self._vmops, '_volumeops') as volumeops:
self._vmops.spawn(self._context, self._instance, self._image_meta,
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi)
from_image.assert_called_once_with(self._instance.image_ref,
self._image_meta)
get_vm_config_info.assert_called_once_with(self._instance,
image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata(is_image_used=False))
volumeops.attach_root_volume.assert_called_once_with(
connection_info1, self._instance, vi.datastore.ref,
constants.ADAPTER_TYPE_IDE)
volumeops.attach_volume.assert_any_call(
connection_info2, self._instance,
constants.DEFAULT_ADAPTER_TYPE)
volumeops.attach_volume.assert_any_call(
connection_info3, self._instance,
constants.ADAPTER_TYPE_LSILOGICSAS)
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_unsupported_hardware(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
power_on_instance):
self._instance.image_ref = None
self._instance.flavor = self._flavor
extra_specs = get_extra_specs.return_value
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'boot_index': 0,
'connection_info': connection_info,
'disk_bus': 'invalid_adapter_type'}]
bdi = {'block_device_mapping': bdm}
self.flags(flat_injected=False)
self.flags(enabled=False, group='vnc')
image_info = mock.sentinel.image_info
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
self.assertRaises(exception.UnsupportedHardware, self._vmops.spawn,
self._context, self._instance, self._image_meta,
injected_files=None,
admin_password=None, network_info=[],
block_device_info=bdi)
from_image.assert_called_once_with(self._instance.image_ref,
self._image_meta)
get_vm_config_info.assert_called_once_with(
self._instance, image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [],
extra_specs, self._get_metadata(is_image_used=False))
def test_get_ds_browser(self):
cache = self._vmops._datastore_browser_mapping
ds_browser = mock.Mock()
moref = vmwareapi_fake.ManagedObjectReference('datastore-100')
self.assertIsNone(cache.get(moref.value))
mock_call_method = mock.Mock(return_value=ds_browser)
with mock.patch.object(self._session, '_call_method',
mock_call_method):
ret = self._vmops._get_ds_browser(moref)
mock_call_method.assert_called_once_with(vim_util,
'get_dynamic_property', moref, 'Datastore', 'browser')
self.assertIs(ds_browser, ret)
self.assertIs(ds_browser, cache.get(moref.value))
@mock.patch.object(
vmops.VMwareVMOps, '_sized_image_exists', return_value=False)
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(vm_util, 'copy_virtual_disk')
def _test_use_disk_image_as_linked_clone(self,
mock_copy_virtual_disk,
mock_extend_virtual_disk,
mock_sized_image_exists,
flavor_fits_image=False):
extra_specs = vm_util.ExtraSpecs()
file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=file_size,
linked_clone=False)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache, extra_specs)
sized_cached_image_ds_loc = cache_root_folder.join(
"%s.%s.vmdk" % (self._image_id, vi.root_gb))
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_disk_image_as_linked_clone("fake_vm_ref", vi)
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
str(vi.cache_image_path),
str(sized_cached_image_ds_loc))
if not flavor_fits_image:
mock_extend_virtual_disk.assert_called_once_with(
self._instance, vi.root_gb * units.Mi,
str(sized_cached_image_ds_loc),
self._dc_info.ref)
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance, vi.ii.adapter_type,
vi.ii.disk_type,
str(sized_cached_image_ds_loc),
vi.root_gb * units.Mi, False,
disk_io_limits=vi._extra_specs.disk_io_limits)
def test_use_disk_image_as_linked_clone(self):
self._test_use_disk_image_as_linked_clone()
def test_use_disk_image_as_linked_clone_flavor_fits_image(self):
self._test_use_disk_image_as_linked_clone(flavor_fits_image=True)
@mock.patch.object(vmops.VMwareVMOps, '_extend_virtual_disk')
@mock.patch.object(vm_util, 'copy_virtual_disk')
def _test_use_disk_image_as_full_clone(self,
mock_copy_virtual_disk,
mock_extend_virtual_disk,
flavor_fits_image=False):
extra_specs = vm_util.ExtraSpecs()
file_size = 10 * units.Gi if flavor_fits_image else 5 * units.Gi
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=file_size,
linked_clone=False)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache,
extra_specs)
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_disk_image_as_full_clone("fake_vm_ref", vi)
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
str(vi.cache_image_path),
'[fake_ds] fake_uuid/fake_uuid.vmdk')
if not flavor_fits_image:
mock_extend_virtual_disk.assert_called_once_with(
self._instance, vi.root_gb * units.Mi,
'[fake_ds] fake_uuid/fake_uuid.vmdk', self._dc_info.ref)
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance, vi.ii.adapter_type,
vi.ii.disk_type, '[fake_ds] fake_uuid/fake_uuid.vmdk',
vi.root_gb * units.Mi, False,
disk_io_limits=vi._extra_specs.disk_io_limits)
def test_use_disk_image_as_full_clone(self):
self._test_use_disk_image_as_full_clone()
def test_use_disk_image_as_full_clone_image_too_big(self):
self._test_use_disk_image_as_full_clone(flavor_fits_image=True)
@mock.patch.object(vmops.VMwareVMOps, '_attach_cdrom_to_vm')
@mock.patch.object(vm_util, 'create_virtual_disk')
def _test_use_iso_image(self,
mock_create_virtual_disk,
mock_attach_cdrom,
with_root_disk):
extra_specs = vm_util.ExtraSpecs()
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=10 * units.Mi,
linked_clone=True)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache, extra_specs)
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
self._vmops._use_iso_image("fake_vm_ref", vi)
mock_attach_cdrom.assert_called_once_with(
"fake_vm_ref", self._instance, self._ds.ref,
str(vi.cache_image_path))
if with_root_disk:
mock_create_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
vi.ii.adapter_type, vi.ii.disk_type,
'[fake_ds] fake_uuid/fake_uuid.vmdk',
vi.root_gb * units.Mi)
linked_clone = False
mock_attach_disk_to_vm.assert_called_once_with(
"fake_vm_ref", self._instance,
vi.ii.adapter_type, vi.ii.disk_type,
'[fake_ds] fake_uuid/fake_uuid.vmdk',
vi.root_gb * units.Mi, linked_clone,
disk_io_limits=vi._extra_specs.disk_io_limits)
def test_use_iso_image_with_root_disk(self):
self._test_use_iso_image(with_root_disk=True)
def test_use_iso_image_without_root_disk(self):
self._test_use_iso_image(with_root_disk=False)
def _verify_spawn_method_calls(self, mock_call_method, extras=None):
# TODO(vui): More explicit assertions of spawn() behavior
# are waiting on additional refactoring pertaining to image
# handling/manipulation. Till then, we continue to assert on the
# sequence of VIM operations invoked.
expected_methods = ['get_dynamic_property',
'SearchDatastore_Task',
'CreateVirtualDisk_Task',
'DeleteDatastoreFile_Task',
'MoveDatastoreFile_Task',
'DeleteDatastoreFile_Task',
'SearchDatastore_Task',
'ExtendVirtualDisk_Task',
]
if extras:
expected_methods.extend(extras)
recorded_methods = [c[1][1] for c in mock_call_method.mock_calls]
self.assertEqual(expected_methods, recorded_methods)
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVMOps._update_vnic_index')
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVMOps._configure_config_drive')
@mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
@mock.patch(
'nova.virt.vmwareapi.vmops.VMwareVMOps.get_datacenter_ref_and_name')
@mock.patch('nova.virt.vmwareapi.vif.get_vif_info',
return_value=[])
@mock.patch('nova.utils.is_neutron',
return_value=False)
@mock.patch('nova.virt.vmwareapi.vm_util.get_vm_create_spec',
return_value='fake_create_spec')
@mock.patch('nova.virt.vmwareapi.vm_util.create_vm',
return_value='fake_vm_ref')
@mock.patch('nova.virt.vmwareapi.ds_util.mkdir')
@mock.patch('nova.virt.vmwareapi.vmops.VMwareVMOps._set_machine_id')
@mock.patch(
'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
@mock.patch.object(vmops.VMwareVMOps, '_get_and_set_vnc_config')
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch('nova.virt.vmwareapi.vm_util.copy_virtual_disk')
# TODO(dims): Need to add tests for create_virtual_disk after the
# disk/image code in spawn gets refactored
def _test_spawn(self,
mock_copy_virtual_disk,
mock_power_on_instance,
mock_get_and_set_vnc_config,
mock_enlist_image,
mock_set_machine_id,
mock_mkdir,
mock_create_vm,
mock_get_create_spec,
mock_is_neutron,
mock_get_vif_info,
mock_get_datacenter_ref_and_name,
mock_get_datastore,
mock_configure_config_drive,
mock_update_vnic_index,
block_device_info=None,
extra_specs=None,
config_drive=False):
if extra_specs is None:
extra_specs = vm_util.ExtraSpecs()
image_size = (self._instance.root_gb) * units.Gi / 2
image = {
'id': self._image_id,
'disk_format': 'vmdk',
'size': image_size,
}
image = objects.ImageMeta.from_dict(image)
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size)
vi = self._vmops._get_vm_config_info(
self._instance, image_info, extra_specs)
self._vmops._volumeops = mock.Mock()
network_info = mock.Mock()
mock_get_datastore.return_value = self._ds
mock_get_datacenter_ref_and_name.return_value = self._dc_info
mock_call_method = mock.Mock(return_value='fake_task')
if extra_specs is None:
extra_specs = vm_util.ExtraSpecs()
with contextlib.nested(
mock.patch.object(self._session, '_wait_for_task'),
mock.patch.object(self._session, '_call_method',
mock_call_method),
mock.patch.object(uuidutils, 'generate_uuid',
return_value='tmp-uuid'),
mock.patch.object(images, 'fetch_image'),
mock.patch.object(self._vmops, '_get_extra_specs',
return_value=extra_specs),
mock.patch.object(self._vmops, '_get_instance_metadata',
return_value='fake-metadata')
) as (_wait_for_task, _call_method, _generate_uuid, _fetch_image,
_get_extra_specs, _get_instance_metadata):
self._vmops.spawn(self._context, self._instance, image,
injected_files='fake_files',
admin_password='password',
network_info=network_info,
block_device_info=block_device_info)
mock_is_neutron.assert_called_once_with()
self.assertEqual(2, mock_mkdir.call_count)
mock_get_vif_info.assert_called_once_with(
self._session, self._cluster.obj, False,
constants.DEFAULT_VIF_MODEL, network_info)
mock_get_create_spec.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
'fake_ds',
[],
extra_specs,
constants.DEFAULT_OS_TYPE,
profile_spec=None,
metadata='fake-metadata')
mock_create_vm.assert_called_once_with(
self._session,
self._instance,
'fake_vm_folder',
'fake_create_spec',
self._cluster.resourcePool)
mock_get_and_set_vnc_config.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
'fake_vm_ref')
mock_set_machine_id.assert_called_once_with(
self._session.vim.client.factory,
self._instance,
network_info,
vm_ref='fake_vm_ref')
mock_power_on_instance.assert_called_once_with(
self._session, self._instance, vm_ref='fake_vm_ref')
if (block_device_info and
'block_device_mapping' in block_device_info):
bdms = block_device_info['block_device_mapping']
for bdm in bdms:
mock_attach_root = (
self._vmops._volumeops.attach_root_volume)
mock_attach = self._vmops._volumeops.attach_volume
adapter_type = bdm.get('disk_bus') or vi.ii.adapter_type
if bdm.get('boot_index') == 0:
mock_attach_root.assert_any_call(
bdm['connection_info'], self._instance,
self._ds.ref, adapter_type)
else:
mock_attach.assert_any_call(
bdm['connection_info'], self._instance,
self._ds.ref, adapter_type)
mock_enlist_image.assert_called_once_with(
self._image_id, self._ds, self._dc_info.ref)
upload_file_name = 'vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
self._image_id, self._image_id)
_fetch_image.assert_called_once_with(
self._context,
self._instance,
self._session._host,
self._session._port,
self._dc_info.name,
self._ds.name,
upload_file_name,
cookies='Fake-CookieJar')
self.assertTrue(len(_wait_for_task.mock_calls) > 0)
extras = None
if block_device_info and ('ephemerals' in block_device_info or
'swap' in block_device_info):
extras = ['CreateVirtualDisk_Task']
self._verify_spawn_method_calls(_call_method, extras)
dc_ref = 'fake_dc_ref'
source_file = six.text_type('[fake_ds] vmware_base/%s/%s.vmdk' %
(self._image_id, self._image_id))
dest_file = six.text_type('[fake_ds] vmware_base/%s/%s.%d.vmdk' %
(self._image_id, self._image_id,
self._instance['root_gb']))
# TODO(dims): add more tests for copy_virtual_disk after
# the disk/image code in spawn gets refactored
mock_copy_virtual_disk.assert_called_with(self._session,
dc_ref,
source_file,
dest_file)
if config_drive:
mock_configure_config_drive.assert_called_once_with(
self._instance, 'fake_vm_ref', self._dc_info,
self._ds, 'fake_files', 'password')
mock_update_vnic_index.assert_called_once_with(
self._context, self._instance, network_info)
@mock.patch.object(ds_util, 'get_datastore')
@mock.patch.object(vmops.VMwareVMOps, 'get_datacenter_ref_and_name')
def _test_get_spawn_vm_config_info(self,
mock_get_datacenter_ref_and_name,
mock_get_datastore,
image_size_bytes=0):
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size_bytes,
linked_clone=True)
mock_get_datastore.return_value = self._ds
mock_get_datacenter_ref_and_name.return_value = self._dc_info
extra_specs = vm_util.ExtraSpecs()
vi = self._vmops._get_vm_config_info(self._instance, image_info,
extra_specs)
self.assertEqual(image_info, vi.ii)
self.assertEqual(self._ds, vi.datastore)
self.assertEqual(self._instance.root_gb, vi.root_gb)
self.assertEqual(self._instance, vi.instance)
self.assertEqual(self._instance.uuid, vi.instance.uuid)
self.assertEqual(extra_specs, vi._extra_specs)
cache_image_path = '[%s] vmware_base/%s/%s.vmdk' % (
self._ds.name, self._image_id, self._image_id)
self.assertEqual(cache_image_path, str(vi.cache_image_path))
cache_image_folder = '[%s] vmware_base/%s' % (
self._ds.name, self._image_id)
self.assertEqual(cache_image_folder, str(vi.cache_image_folder))
def test_get_spawn_vm_config_info(self):
image_size = (self._instance.root_gb) * units.Gi / 2
self._test_get_spawn_vm_config_info(image_size_bytes=image_size)
def test_get_spawn_vm_config_info_image_too_big(self):
image_size = (self._instance.root_gb + 1) * units.Gi
self.assertRaises(exception.InstanceUnacceptable,
self._test_get_spawn_vm_config_info,
image_size_bytes=image_size)
def test_spawn(self):
self._test_spawn()
def test_spawn_config_drive_enabled(self):
self.flags(force_config_drive=True)
self._test_spawn(config_drive=True)
def test_spawn_with_block_device_info(self):
block_device_info = {
'block_device_mapping': [{'boot_index': 0,
'connection_info': 'fake',
'mount_device': '/dev/vda'}]
}
self._test_spawn(block_device_info=block_device_info)
def test_spawn_with_block_device_info_with_config_drive(self):
self.flags(force_config_drive=True)
block_device_info = {
'block_device_mapping': [{'boot_index': 0,
'connection_info': 'fake',
'mount_device': '/dev/vda'}]
}
self._test_spawn(block_device_info=block_device_info,
config_drive=True)
def _spawn_with_block_device_info_ephemerals(self, ephemerals):
block_device_info = {'ephemerals': ephemerals}
self._test_spawn(block_device_info=block_device_info)
def test_spawn_with_block_device_info_ephemerals(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
self._spawn_with_block_device_info_ephemerals(ephemerals)
def test_spawn_with_block_device_info_ephemerals_no_disk_bus(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': None,
'device_name': '/dev/vdb',
'size': 1}]
self._spawn_with_block_device_info_ephemerals(ephemerals)
def test_spawn_with_block_device_info_swap(self):
block_device_info = {'swap': {'disk_bus': None,
'swap_size': 512,
'device_name': '/dev/sdb'}}
self._test_spawn(block_device_info=block_device_info)
@mock.patch('nova.virt.vmwareapi.vm_util.power_on_instance')
@mock.patch.object(vmops.VMwareVMOps, '_create_and_attach_thin_disk')
@mock.patch.object(vmops.VMwareVMOps, '_use_disk_image_as_linked_clone')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_if_missing')
@mock.patch(
'nova.virt.vmwareapi.imagecache.ImageCacheManager.enlist_image')
@mock.patch.object(vmops.VMwareVMOps, 'build_virtual_machine')
@mock.patch.object(vmops.VMwareVMOps, '_get_vm_config_info')
@mock.patch.object(vmops.VMwareVMOps, '_get_extra_specs')
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_with_ephemerals_and_swap(self, from_image,
get_extra_specs,
get_vm_config_info,
build_virtual_machine,
enlist_image,
fetch_image,
use_disk_image,
create_and_attach_thin_disk,
power_on_instance):
self._instance.flavor = objects.Flavor(vcpus=1, memory_mb=512,
name="m1.tiny", root_gb=1,
ephemeral_gb=1, swap=512,
extra_specs={})
extra_specs = self._vmops._get_extra_specs(self._instance.flavor)
ephemerals = [{'device_type': 'disk',
'disk_bus': None,
'device_name': '/dev/vdb',
'size': 1},
{'device_type': 'disk',
'disk_bus': None,
'device_name': '/dev/vdc',
'size': 1}]
swap = {'disk_bus': None, 'swap_size': 512, 'device_name': '/dev/vdd'}
bdi = {'block_device_mapping': [], 'root_device_name': '/dev/sda',
'ephemerals': ephemerals, 'swap': swap}
metadata = self._vmops._get_instance_metadata(self._context,
self._instance)
self.flags(enabled=False, group='vnc')
self.flags(flat_injected=False)
image_size = (self._instance.root_gb) * units.Gi / 2
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=image_size)
vi = get_vm_config_info.return_value
from_image.return_value = image_info
build_virtual_machine.return_value = 'fake-vm-ref'
self._vmops.spawn(self._context, self._instance, {},
injected_files=None, admin_password=None,
network_info=[], block_device_info=bdi)
from_image.assert_called_once_with(self._instance.image_ref, {})
get_vm_config_info.assert_called_once_with(self._instance,
image_info, extra_specs)
build_virtual_machine.assert_called_once_with(self._instance,
image_info, vi.dc_info, vi.datastore, [], extra_specs, metadata)
enlist_image.assert_called_once_with(image_info.image_id,
vi.datastore, vi.dc_info.ref)
fetch_image.assert_called_once_with(self._context, vi)
use_disk_image.assert_called_once_with('fake-vm-ref', vi)
# _create_and_attach_thin_disk should be called for each ephemeral
# and swap disk
eph0_path = str(ds_obj.DatastorePath(vi.datastore.name, 'fake_uuid',
'ephemeral_0.vmdk'))
eph1_path = str(ds_obj.DatastorePath(vi.datastore.name, 'fake_uuid',
'ephemeral_1.vmdk'))
swap_path = str(ds_obj.DatastorePath(vi.datastore.name, 'fake_uuid',
'swap.vmdk'))
create_and_attach_thin_disk.assert_has_calls([
mock.call(self._instance, 'fake-vm-ref', vi.dc_info,
ephemerals[0]['size'] * units.Mi, vi.ii.adapter_type,
eph0_path),
mock.call(self._instance, 'fake-vm-ref', vi.dc_info,
ephemerals[1]['size'] * units.Mi, vi.ii.adapter_type,
eph1_path),
mock.call(self._instance, 'fake-vm-ref', vi.dc_info,
swap['swap_size'] * units.Ki, vi.ii.adapter_type,
swap_path)
])
power_on_instance.assert_called_once_with(self._session,
self._instance,
vm_ref='fake-vm-ref')
def _get_fake_vi(self):
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=7,
linked_clone=False)
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock.Mock())
return vi
@mock.patch.object(vm_util, 'create_virtual_disk')
def test_create_and_attach_thin_disk(self, mock_create):
vi = self._get_fake_vi()
self._vmops._volumeops = mock.Mock()
mock_attach_disk_to_vm = self._vmops._volumeops.attach_disk_to_vm
path = str(ds_obj.DatastorePath(vi.datastore.name, 'fake_uuid',
'fake-filename'))
self._vmops._create_and_attach_thin_disk(self._instance,
'fake-vm-ref',
vi.dc_info, 1,
'fake-adapter-type',
path)
mock_create.assert_called_once_with(
self._session, self._dc_info.ref, 'fake-adapter-type',
'thin', path, 1)
mock_attach_disk_to_vm.assert_called_once_with(
'fake-vm-ref', self._instance, 'fake-adapter-type',
'thin', path, 1, False)
def test_create_ephemeral_with_bdi(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {'ephemerals': ephemerals}
vi = self._get_fake_vi()
with mock.patch.object(
self._vmops, '_create_and_attach_thin_disk') as mock_caa:
self._vmops._create_ephemeral(block_device_info,
self._instance,
'fake-vm-ref',
vi.dc_info, vi.datastore,
'fake_uuid',
vi.ii.adapter_type)
mock_caa.assert_called_once_with(
self._instance, 'fake-vm-ref',
vi.dc_info, 1 * units.Mi, 'virtio',
'[fake_ds] fake_uuid/ephemeral_0.vmdk')
def _test_create_ephemeral_from_instance(self, bdi):
vi = self._get_fake_vi()
with mock.patch.object(
self._vmops, '_create_and_attach_thin_disk') as mock_caa:
self._vmops._create_ephemeral(bdi,
self._instance,
'fake-vm-ref',
vi.dc_info, vi.datastore,
'fake_uuid',
vi.ii.adapter_type)
mock_caa.assert_called_once_with(
self._instance, 'fake-vm-ref',
vi.dc_info, 1 * units.Mi, constants.DEFAULT_ADAPTER_TYPE,
'[fake_ds] fake_uuid/ephemeral_0.vmdk')
def test_create_ephemeral_with_bdi_but_no_ephemerals(self):
block_device_info = {'ephemerals': []}
self._instance.ephemeral_gb = 1
self._test_create_ephemeral_from_instance(block_device_info)
def test_create_ephemeral_with_no_bdi(self):
self._instance.ephemeral_gb = 1
self._test_create_ephemeral_from_instance(None)
def _test_create_swap_from_instance(self, bdi):
vi = self._get_fake_vi()
flavor = objects.Flavor(vcpus=1, memory_mb=1024, ephemeral_gb=1,
swap=1024, extra_specs={})
self._instance.flavor = flavor
with mock.patch.object(
self._vmops, '_create_and_attach_thin_disk'
) as create_and_attach:
self._vmops._create_swap(bdi, self._instance, 'fake-vm-ref',
vi.dc_info, vi.datastore, 'fake_uuid',
'lsiLogic')
size = flavor.swap * units.Ki
if bdi is not None:
swap = bdi.get('swap', {})
size = swap.get('swap_size', 0) * units.Ki
path = str(ds_obj.DatastorePath(vi.datastore.name, 'fake_uuid',
'swap.vmdk'))
create_and_attach.assert_called_once_with(self._instance,
'fake-vm-ref', vi.dc_info, size, 'lsiLogic', path)
def test_create_swap_with_bdi(self):
block_device_info = {'swap': {'disk_bus': None,
'swap_size': 512,
'device_name': '/dev/sdb'}}
self._test_create_swap_from_instance(block_device_info)
def test_create_swap_with_no_bdi(self):
self._test_create_swap_from_instance(None)
def test_build_virtual_machine(self):
image_id = nova.tests.unit.image.fake.get_valid_image_id()
image = images.VMwareImage(image_id=image_id)
extra_specs = vm_util.ExtraSpecs()
vm_ref = self._vmops.build_virtual_machine(self._instance,
image, self._dc_info,
self._ds,
self.network_info,
extra_specs,
self._metadata)
vm = vmwareapi_fake._get_object(vm_ref)
# Test basic VM parameters
self.assertEqual(self._instance.uuid, vm.name)
self.assertEqual(self._instance.uuid,
vm.get('summary.config.instanceUuid'))
self.assertEqual(self._instance_values['vcpus'],
vm.get('summary.config.numCpu'))
self.assertEqual(self._instance_values['memory_mb'],
vm.get('summary.config.memorySizeMB'))
# Test NSX config
for optval in vm.get('config.extraConfig').OptionValue:
if optval.key == 'nvp.vm-uuid':
self.assertEqual(self._instance_values['uuid'], optval.value)
break
else:
self.fail('nvp.vm-uuid not found in extraConfig')
# Test that the VM is associated with the specified datastore
datastores = vm.datastore.ManagedObjectReference
self.assertEqual(1, len(datastores))
datastore = vmwareapi_fake._get_object(datastores[0])
self.assertEqual(self._ds.name, datastore.get('summary.name'))
# Test that the VM's network is configured as specified
devices = vm.get('config.hardware.device').VirtualDevice
for device in devices:
if device.obj_name != 'ns0:VirtualE1000':
continue
self.assertEqual(self._network_values['address'],
device.macAddress)
break
else:
self.fail('NIC not configured')
def test_spawn_cpu_limit(self):
cpu_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_reservation(self):
cpu_limits = vm_util.Limits(reservation=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_allocations(self):
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_shares_level(self):
cpu_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_cpu_shares_custom(self):
cpu_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_limit(self):
memory_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_reservation(self):
memory_limits = vm_util.Limits(reservation=7)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_allocations(self):
memory_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_shares_level(self):
memory_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def test_spawn_memory_shares_custom(self):
memory_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(memory_limits=memory_limits)
self._test_spawn(extra_specs=extra_specs)
def _validate_extra_specs(self, expected, actual):
self.assertEqual(expected.cpu_limits.limit,
actual.cpu_limits.limit)
self.assertEqual(expected.cpu_limits.reservation,
actual.cpu_limits.reservation)
self.assertEqual(expected.cpu_limits.shares_level,
actual.cpu_limits.shares_level)
self.assertEqual(expected.cpu_limits.shares_share,
actual.cpu_limits.shares_share)
def _validate_flavor_extra_specs(self, flavor_extra_specs, expected):
# Validate that the extra specs are parsed correctly
flavor = objects.Flavor(name='my-flavor',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=flavor_extra_specs)
flavor_extra_specs = self._vmops._get_extra_specs(flavor, None)
self._validate_extra_specs(expected, flavor_extra_specs)
def test_extra_specs_cpu_limit(self):
flavor_extra_specs = {'quota:cpu_limit': 7}
cpu_limits = vm_util.Limits(limit=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_reservations(self):
flavor_extra_specs = {'quota:cpu_reservation': 7}
cpu_limits = vm_util.Limits(reservation=7)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_allocations(self):
flavor_extra_specs = {'quota:cpu_limit': 7,
'quota:cpu_reservation': 6}
cpu_limits = vm_util.Limits(limit=7,
reservation=6)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_shares_level(self):
flavor_extra_specs = {'quota:cpu_shares_level': 'high'}
cpu_limits = vm_util.Limits(shares_level='high')
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def test_extra_specs_cpu_shares_custom(self):
flavor_extra_specs = {'quota:cpu_shares_level': 'custom',
'quota:cpu_shares_share': 1948}
cpu_limits = vm_util.Limits(shares_level='custom',
shares_share=1948)
extra_specs = vm_util.ExtraSpecs(cpu_limits=cpu_limits)
self._validate_flavor_extra_specs(flavor_extra_specs, extra_specs)
def _make_vm_config_info(self, is_iso=False, is_sparse_disk=False):
disk_type = (constants.DISK_TYPE_SPARSE if is_sparse_disk
else constants.DEFAULT_DISK_TYPE)
file_type = (constants.DISK_FORMAT_ISO if is_iso
else constants.DEFAULT_DISK_FORMAT)
image_info = images.VMwareImage(
image_id=self._image_id,
file_size=10 * units.Mi,
file_type=file_type,
disk_type=disk_type,
linked_clone=True)
cache_root_folder = self._ds.build_path("vmware_base", self._image_id)
mock_imagecache = mock.Mock()
mock_imagecache.get_image_cache_folder.return_value = cache_root_folder
vi = vmops.VirtualMachineInstanceConfigInfo(
self._instance, image_info,
self._ds, self._dc_info, mock_imagecache)
return vi
@mock.patch.object(vmops.VMwareVMOps, 'check_cache_folder')
@mock.patch.object(vmops.VMwareVMOps, '_fetch_image_as_file')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_iso_image')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_sparse_image')
@mock.patch.object(vmops.VMwareVMOps, '_prepare_flat_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_iso_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_sparse_image')
@mock.patch.object(vmops.VMwareVMOps, '_cache_flat_image')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
def _test_fetch_image_if_missing(self,
mock_delete_datastore_file,
mock_cache_flat_image,
mock_cache_sparse_image,
mock_cache_iso_image,
mock_prepare_flat_image,
mock_prepare_sparse_image,
mock_prepare_iso_image,
mock_fetch_image_as_file,
mock_check_cache_folder,
is_iso=False,
is_sparse_disk=False):
tmp_dir_path = mock.Mock()
tmp_image_path = mock.Mock()
if is_iso:
mock_prepare = mock_prepare_iso_image
mock_cache = mock_cache_iso_image
elif is_sparse_disk:
mock_prepare = mock_prepare_sparse_image
mock_cache = mock_cache_sparse_image
else:
mock_prepare = mock_prepare_flat_image
mock_cache = mock_cache_flat_image
mock_prepare.return_value = tmp_dir_path, tmp_image_path
vi = self._make_vm_config_info(is_iso, is_sparse_disk)
self._vmops._fetch_image_if_missing(self._context, vi)
mock_check_cache_folder.assert_called_once_with(
self._ds.name, self._ds.ref)
mock_prepare.assert_called_once_with(vi)
mock_fetch_image_as_file.assert_called_once_with(
self._context, vi, tmp_image_path)
mock_cache.assert_called_once_with(vi, tmp_image_path)
mock_delete_datastore_file.assert_called_once_with(
str(tmp_dir_path), self._dc_info.ref)
def test_fetch_image_if_missing(self):
self._test_fetch_image_if_missing()
def test_fetch_image_if_missing_with_sparse(self):
self._test_fetch_image_if_missing(
is_sparse_disk=True)
def test_fetch_image_if_missing_with_iso(self):
self._test_fetch_image_if_missing(
is_iso=True)
def test_get_esx_host_and_cookies(self):
datastore = mock.Mock()
datastore.get_connected_hosts.return_value = ['fira-host']
file_path = mock.Mock()
def fake_invoke(module, method, *args, **kwargs):
if method == 'AcquireGenericServiceTicket':
ticket = mock.Mock()
ticket.id = 'fira-ticket'
return ticket
elif method == 'get_object_property':
return 'fira-host'
with contextlib.nested(
mock.patch.object(self._session, 'invoke_api', fake_invoke),
):
result = self._vmops._get_esx_host_and_cookies(datastore,
'ha-datacenter',
file_path)
self.assertEqual('fira-host', result[0])
cookies = result[1]
self.assertEqual(1, len(cookies))
self.assertEqual('vmware_cgi_ticket', cookies[0].name)
self.assertEqual('"fira-ticket"', cookies[0].value)
@mock.patch.object(images, 'fetch_image')
@mock.patch.object(vmops.VMwareVMOps, '_get_esx_host_and_cookies')
def test_fetch_image_as_file(self,
mock_get_esx_host_and_cookies,
mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
host = mock.Mock()
dc_name = 'ha-datacenter'
cookies = mock.Mock()
mock_get_esx_host_and_cookies.return_value = host, cookies
self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc)
mock_get_esx_host_and_cookies.assert_called_once_with(
vi.datastore,
dc_name,
image_ds_loc.rel_path)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
host,
self._session._port,
dc_name,
self._ds.name,
image_ds_loc.rel_path,
cookies=cookies)
@mock.patch.object(images, 'fetch_image')
@mock.patch.object(vmops.VMwareVMOps, '_get_esx_host_and_cookies')
def test_fetch_image_as_file_exception(self,
mock_get_esx_host_and_cookies,
mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
dc_name = 'ha-datacenter'
mock_get_esx_host_and_cookies.side_effect = \
exception.HostNotFound(host='')
self._vmops._fetch_image_as_file(self._context, vi, image_ds_loc)
mock_get_esx_host_and_cookies.assert_called_once_with(
vi.datastore,
dc_name,
image_ds_loc.rel_path)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
self._session._host,
self._session._port,
self._dc_info.name,
self._ds.name,
image_ds_loc.rel_path,
cookies='Fake-CookieJar')
@mock.patch.object(images, 'fetch_image_stream_optimized')
def test_fetch_image_as_vapp(self, mock_fetch_image):
vi = self._make_vm_config_info()
image_ds_loc = mock.Mock()
image_ds_loc.parent.basename = 'fake-name'
self._vmops._fetch_image_as_vapp(self._context, vi, image_ds_loc)
mock_fetch_image.assert_called_once_with(
self._context,
vi.instance,
self._session,
'fake-name',
self._ds.name,
vi.dc_info.vmFolder,
self._vmops._root_resource_pool)
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
def test_prepare_iso_image(self, mock_generate_uuid):
vi = self._make_vm_config_info(is_iso=True)
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_iso_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s.iso' % (
self._ds.name, self._image_id, self._image_id)
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
def test_prepare_sparse_image(self, mock_generate_uuid):
vi = self._make_vm_config_info(is_sparse_disk=True)
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_sparse_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s' % (
self._ds.name, self._image_id, "tmp-sparse.vmdk")
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
@mock.patch.object(ds_util, 'mkdir')
@mock.patch.object(vm_util, 'create_virtual_disk')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
@mock.patch.object(uuidutils, 'generate_uuid', return_value='tmp-uuid')
def test_prepare_flat_image(self,
mock_generate_uuid,
mock_delete_datastore_file,
mock_create_virtual_disk,
mock_mkdir):
vi = self._make_vm_config_info()
tmp_dir_loc, tmp_image_ds_loc = self._vmops._prepare_flat_image(vi)
expected_tmp_dir_path = '[%s] vmware_temp/tmp-uuid' % (self._ds.name)
expected_image_path = '[%s] vmware_temp/tmp-uuid/%s/%s-flat.vmdk' % (
self._ds.name, self._image_id, self._image_id)
expected_image_path_parent = '[%s] vmware_temp/tmp-uuid/%s' % (
self._ds.name, self._image_id)
expected_path_to_create = '[%s] vmware_temp/tmp-uuid/%s/%s.vmdk' % (
self._ds.name, self._image_id, self._image_id)
mock_mkdir.assert_called_once_with(
self._session, DsPathMatcher(expected_image_path_parent),
self._dc_info.ref)
self.assertEqual(str(tmp_dir_loc), expected_tmp_dir_path)
self.assertEqual(str(tmp_image_ds_loc), expected_image_path)
image_info = vi.ii
mock_create_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
image_info.adapter_type,
image_info.disk_type,
DsPathMatcher(expected_path_to_create),
image_info.file_size_in_kb)
mock_delete_datastore_file.assert_called_once_with(
DsPathMatcher(expected_image_path),
self._dc_info.ref)
@mock.patch.object(ds_util, 'file_move')
def test_cache_iso_image(self, mock_file_move):
vi = self._make_vm_config_info(is_iso=True)
tmp_image_ds_loc = mock.Mock()
self._vmops._cache_iso_image(vi, tmp_image_ds_loc)
mock_file_move.assert_called_once_with(
self._session, self._dc_info.ref,
tmp_image_ds_loc.parent,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
@mock.patch.object(ds_util, 'file_move')
def test_cache_flat_image(self, mock_file_move):
vi = self._make_vm_config_info()
tmp_image_ds_loc = mock.Mock()
self._vmops._cache_flat_image(vi, tmp_image_ds_loc)
mock_file_move.assert_called_once_with(
self._session, self._dc_info.ref,
tmp_image_ds_loc.parent,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id))
@mock.patch.object(ds_util, 'disk_move')
@mock.patch.object(ds_util, 'mkdir')
def test_cache_stream_optimized_image(self, mock_mkdir, mock_disk_move):
vi = self._make_vm_config_info()
self._vmops._cache_stream_optimized_image(vi, mock.sentinel.tmp_image)
mock_mkdir.assert_called_once_with(
self._session,
DsPathMatcher('[fake_ds] vmware_base/%s' % self._image_id),
self._dc_info.ref)
mock_disk_move.assert_called_once_with(
self._session, self._dc_info.ref,
mock.sentinel.tmp_image,
DsPathMatcher('[fake_ds] vmware_base/%s/%s.vmdk' %
(self._image_id, self._image_id)))
@mock.patch.object(ds_util, 'file_move')
@mock.patch.object(vm_util, 'copy_virtual_disk')
@mock.patch.object(vmops.VMwareVMOps, '_delete_datastore_file')
@mock.patch.object(vmops.VMwareVMOps, '_update_image_size')
def test_cache_sparse_image(self,
mock_update_image_size,
mock_delete_datastore_file,
mock_copy_virtual_disk,
mock_file_move):
vi = self._make_vm_config_info(is_sparse_disk=True)
sparse_disk_path = "[%s] vmware_temp/tmp-uuid/%s/tmp-sparse.vmdk" % (
self._ds.name, self._image_id)
tmp_image_ds_loc = ds_obj.DatastorePath.parse(sparse_disk_path)
self._vmops._cache_sparse_image(vi, tmp_image_ds_loc)
target_disk_path = "[%s] vmware_temp/tmp-uuid/%s/%s.vmdk" % (
self._ds.name,
self._image_id, self._image_id)
mock_copy_virtual_disk.assert_called_once_with(
self._session, self._dc_info.ref,
sparse_disk_path,
DsPathMatcher(target_disk_path))
mock_update_image_size.assert_called_once_with(vi)
def test_get_storage_policy_none(self):
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
self.flags(pbm_enabled=True,
pbm_default_policy='fake-policy', group='vmware')
extra_specs = self._vmops._get_extra_specs(flavor, None)
self.assertEqual('fake-policy', extra_specs.storage_policy)
def test_get_storage_policy_extra_specs(self):
extra_specs = {'vmware:storage_policy': 'flavor-policy'}
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=extra_specs)
self.flags(pbm_enabled=True,
pbm_default_policy='default-policy', group='vmware')
extra_specs = self._vmops._get_extra_specs(flavor, None)
self.assertEqual('flavor-policy', extra_specs.storage_policy)
def test_get_base_folder_not_set(self):
self.flags(image_cache_subdirectory_name='vmware_base')
base_folder = self._vmops._get_base_folder()
self.assertEqual('vmware_base', base_folder)
def test_get_base_folder_host_ip(self):
self.flags(my_ip='7.7.7.7',
image_cache_subdirectory_name='_base')
base_folder = self._vmops._get_base_folder()
self.assertEqual('7.7.7.7_base', base_folder)
def test_get_base_folder_cache_prefix(self):
self.flags(cache_prefix='my_prefix', group='vmware')
self.flags(image_cache_subdirectory_name='_base')
base_folder = self._vmops._get_base_folder()
self.assertEqual('my_prefix_base', base_folder)
def _test_reboot_vm(self, reboot_type="SOFT"):
expected_methods = ['get_object_properties_dict']
if reboot_type == "SOFT":
expected_methods.append('RebootGuest')
else:
expected_methods.append('ResetVM_Task')
query = {}
query['runtime.powerState'] = "poweredOn"
query['summary.guest.toolsStatus'] = "toolsOk"
query['summary.guest.toolsRunningStatus'] = "guestToolsRunning"
def fake_call_method(module, method, *args, **kwargs):
expected_method = expected_methods.pop(0)
self.assertEqual(expected_method, method)
if (expected_method == 'get_object_properties_dict'):
return query
elif (expected_method == 'ResetVM_Task'):
return 'fake-task'
with contextlib.nested(
mock.patch.object(vm_util, "get_vm_ref",
return_value='fake-vm-ref'),
mock.patch.object(self._session, "_call_method",
fake_call_method),
mock.patch.object(self._session, "_wait_for_task")
) as (_get_vm_ref, fake_call_method, _wait_for_task):
self._vmops.reboot(self._instance, self.network_info, reboot_type)
_get_vm_ref.assert_called_once_with(self._session,
self._instance)
if reboot_type == "HARD":
_wait_for_task.assert_has_calls([
mock.call('fake-task')])
def test_reboot_vm_soft(self):
self._test_reboot_vm()
def test_reboot_vm_hard(self):
self._test_reboot_vm(reboot_type="HARD")
def test_get_instance_metadata(self):
flavor = objects.Flavor(id=7,
name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
self._instance.flavor = flavor
metadata = self._vmops._get_instance_metadata(
self._context, self._instance)
expected = ("name:fake_display_name\n"
"userid:fake_user\n"
"username:None\n"
"projectid:fake_project\n"
"projectname:None\n"
"flavor:name:m1.small\n"
"flavor:memory_mb:6\n"
"flavor:vcpus:28\n"
"flavor:ephemeral_gb:8128\n"
"flavor:root_gb:496\n"
"flavor:swap:33550336\n"
"imageid:70a599e0-31e7-49b7-b260-868f441e862b\n"
"package:%s\n" % version.version_string_with_package())
self.assertEqual(expected, metadata)
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_network_attach_config_spec',
return_value='fake-attach-spec')
@mock.patch.object(vm_util, 'get_attach_port_index', return_value=1)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_attach_interface(self, mock_get_vm_ref,
mock_get_attach_port_index,
mock_get_network_attach_config_spec,
mock_reconfigure_vm):
_network_api = mock.Mock()
self._vmops._network_api = _network_api
vif_info = vif.get_vif_dict(self._session, self._cluster,
'VirtualE1000',
utils.is_neutron(),
self._network_values)
self._vmops.attach_interface(self._instance, self._image_meta,
self._network_values)
mock_get_vm_ref.assert_called_once_with(self._session, self._instance)
mock_get_attach_port_index(self._session, 'fake-ref')
mock_get_network_attach_config_spec.assert_called_once_with(
self._session.vim.client.factory, vif_info, 1)
mock_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-attach-spec')
_network_api.update_instance_vnic_index(mock.ANY,
self._instance, self._network_values, 1)
@mock.patch.object(vif, 'get_network_device', return_value='device')
@mock.patch.object(vm_util, 'reconfigure_vm')
@mock.patch.object(vm_util, 'get_network_detach_config_spec',
return_value='fake-detach-spec')
@mock.patch.object(vm_util, 'get_vm_detach_port_index', return_value=1)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_detach_interface(self, mock_get_vm_ref,
mock_get_detach_port_index,
mock_get_network_detach_config_spec,
mock_reconfigure_vm,
mock_get_network_device):
_network_api = mock.Mock()
self._vmops._network_api = _network_api
with mock.patch.object(self._session, '_call_method',
return_value='hardware-devices'):
self._vmops.detach_interface(self._instance, self._network_values)
mock_get_vm_ref.assert_called_once_with(self._session, self._instance)
mock_get_detach_port_index(self._session, 'fake-ref')
mock_get_network_detach_config_spec.assert_called_once_with(
self._session.vim.client.factory, 'device', 1)
mock_reconfigure_vm.assert_called_once_with(self._session,
'fake-ref',
'fake-detach-spec')
_network_api.update_instance_vnic_index(mock.ANY,
self._instance, self._network_values, None)
@mock.patch.object(vm_util, 'get_vm_ref', return_value='fake-ref')
def test_get_mks_console(self, mock_get_vm_ref):
ticket = mock.MagicMock()
ticket.host = 'esx1'
ticket.port = 902
ticket.ticket = 'fira'
ticket.sslThumbprint = 'aa:bb:cc:dd:ee:ff'
ticket.cfgFile = '[ds1] fira/foo.vmx'
with mock.patch.object(self._session, '_call_method',
return_value=ticket):
console = self._vmops.get_mks_console(self._instance)
self.assertEqual('esx1', console.host)
self.assertEqual(902, console.port)
path = jsonutils.loads(console.internal_access_path)
self.assertEqual('fira', path['ticket'])
self.assertEqual('aabbccddeeff', path['thumbprint'])
self.assertEqual('[ds1] fira/foo.vmx', path['cfgFile'])
def test_get_cores_per_socket(self):
extra_specs = {'hw:cpu_sockets': 7}
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs=extra_specs)
extra_specs = self._vmops._get_extra_specs(flavor, None)
self.assertEqual(4, int(extra_specs.cores_per_socket))
|
shail2810/nova
|
nova/tests/unit/virt/vmwareapi/test_vmops.py
|
Python
|
apache-2.0
| 114,813
| 0.000575
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.model.document import Document
from frappe.model.rename_doc import rename_doc
class Medication(Document):
def validate(self):
self.enable_disable_item()
def after_insert(self):
create_item_from_medication(self)
def on_update(self):
if self.change_in_item:
self.update_item_and_item_price()
def enable_disable_item(self):
if self.is_billable:
if self.disabled:
frappe.db.set_value('Item', self.item, 'disabled', 1)
else:
frappe.db.set_value('Item', self.item, 'disabled', 0)
def update_item_and_item_price(self):
if self.is_billable and self.item:
item_doc = frappe.get_doc('Item', {'item_code': self.item})
item_doc.item_name = self.medication_name
item_doc.item_group = self.item_group
item_doc.description = self.description
item_doc.stock_uom = self.stock_uom
item_doc.disabled = 0
item_doc.save(ignore_permissions=True)
if self.rate:
item_price = frappe.get_doc('Item Price', {'item_code': self.item})
item_price.item_name = self.medication_name
item_price.price_list_rate = self.rate
item_price.save()
elif not self.is_billable and self.item:
frappe.db.set_value('Item', self.item, 'disabled', 1)
self.db_set('change_in_item', 0)
def create_item_from_medication(doc):
disabled = doc.disabled
if doc.is_billable and not doc.disabled:
disabled = 0
uom = doc.stock_uom or frappe.db.get_single_value('Stock Settings', 'stock_uom')
item = frappe.get_doc({
'doctype': 'Item',
'item_code': doc.medication_name,
'item_name':doc.medication_name,
'item_group': doc.item_group,
'description':doc.description,
'is_sales_item': 1,
'is_service_item': 1,
'is_purchase_item': 0,
'is_stock_item': 0,
'show_in_website': 0,
'is_pro_applicable': 0,
'disabled': disabled,
'stock_uom': uom
}).insert(ignore_permissions=True, ignore_mandatory=True)
make_item_price(item.name, doc.rate)
doc.db_set('item', item.name)
def make_item_price(item, item_price):
price_list_name = frappe.db.get_value('Price List', {'selling': 1})
frappe.get_doc({
'doctype': 'Item Price',
'price_list': price_list_name,
'item_code': item,
'price_list_rate': item_price
}).insert(ignore_permissions=True, ignore_mandatory=True)
@frappe.whitelist()
def change_item_code_from_medication(item_code, doc):
doc = frappe._dict(json.loads(doc))
if frappe.db.exists('Item', {'item_code': item_code}):
frappe.throw(_('Item with Item Code {0} already exists').format(item_code))
else:
rename_doc('Item', doc.item_code, item_code, ignore_permissions=True)
frappe.db.set_value('Medication', doc.name, 'item_code', item_code)
return
|
ESS-LLP/erpnext
|
erpnext/healthcare/doctype/medication/medication.py
|
Python
|
gpl-3.0
| 2,862
| 0.025507
|
""" TODO: Add docstring """
import re
import pexpect
class MediaObject(object):
"""Represents an encodable object"""
def __init__(self, input_filename, output_filename):
self.input_filename = input_filename
self.output_filename = output_filename
self.media_duration = self.get_media_duration()
# INFO: All other media information could potentially be put here too
def get_media_duration(self):
"""
Spawns an avprobe process to get the media duration.
Spawns an avprobe process and saves the output to a list, then uses
regex to find the duration of the media and return it as an integer.
"""
info_process = pexpect.spawn("/usr/bin/avprobe " + self.input_filename)
subprocess_output = info_process.readlines()
info_process.close
# Non-greedy match on characters 'Duration: ' followed by
# number in form 00:00:00:00
regex_group = re.compile(".*?Duration: .*?(\\d+):(\\d+):(\\d+).(\\d+)",
re.IGNORECASE | re.DOTALL)
# Exits as soon as duration is found
# PERF: Perform some tests to find the min number of lines
# certain not to contain the duration, then operate on a slice
# not containing those lines
for line in subprocess_output:
regex_match = regex_group.search(line)
if regex_match:
# Return the total duration in seconds
return ((int(regex_match.group(1)) * 3600) + # Hours
(int(regex_match.group(2)) * 60) + # Minutes
int(regex_match.group(3)) + # Seconds
# Round milliseconds to nearest second
1 if int(regex_match.group(3)) > 50 else 0)
# Not found so it's possible the process terminated early or an update
# broke the regex. Unlikely but we must return something just in case.
return -1
|
thethomaseffect/travers-media-tools
|
traversme/encoder/media_object.py
|
Python
|
mit
| 2,000
| 0
|
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#import cv2
from scipy.misc import imresize
from scipy.misc import imread
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import math
import tensorflow as tf
from utils.timer import Timer
from utils.cython_nms import nms, nms_new
from utils.boxes_grid import get_boxes_grid
from utils.blob import im_list_to_blob
from model.config import cfg, get_output_dir
from model.bbox_transform import clip_boxes, bbox_transform_inv
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im_row,im_col,_ = im.shape
im = imresize(im_orig, (int(im_row*im_scale), int(im_col*im_scale)))
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def _get_blobs(im):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale_factors = _get_image_blob(im)
return blobs, im_scale_factors
def _clip_boxes(boxes, im_shape):
"""Clip boxes to image boundaries."""
# x1 >= 0
boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)
# y2 < im_shape[0]
boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)
return boxes
def _rescale_boxes(boxes, inds, scales):
"""Rescale boxes according to image rescaling."""
for i in range(boxes.shape[0]):
boxes[i,:] = boxes[i,:] / scales[int(inds[i])]
return boxes
def im_detect(sess, net, im):
blobs, im_scales = _get_blobs(im)
assert len(im_scales) == 1, "Only single-image batch implemented"
im_blob = blobs['data']
# seems to have height, width, and image scales
# still not sure about the scale, maybe full image it is 1.
blobs['im_info'] = \
np.array([[im_blob.shape[1], im_blob.shape[2], im_scales[0]]], dtype=np.float32)
_, scores, bbox_pred, rois = \
net.test_image(sess, blobs['data'], blobs['im_info'])
boxes = rois[:, 1:5] / im_scales[0]
# print(scores.shape, bbox_pred.shape, rois.shape, boxes.shape)
scores = np.reshape(scores, [scores.shape[0], -1])
bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = _clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
return scores, pred_boxes
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]
for cls_ind in range(num_classes):
for im_ind in range(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
inds = np.where((x2 > x1) & (y2 > y1) & (scores > cfg.TEST.DET_THRESHOLD))[0]
dets = dets[inds,:]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def test_net(sess, net, imdb, weights_filename, experiment_setup=None,
max_per_image=100, thresh=0.05):
np.random.seed(cfg.RNG_SEED)
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
# num_images = 2
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
output_dir = get_output_dir(imdb, weights_filename)
print('using output_dir: ', output_dir)
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
# define a writer to write the histogram of summaries
# test_tbdir = '/home/shuang/projects/tf-faster-rcnn/tensorboard/'
# if not os.path.exists(test_tbdir):
# print('making directory for test tensorboard result')
# os.mkdir(test_tbdir)
# writer = tf.summary.FileWriter(test_tbdir,sess.graph)
# define a folder for activation results
test_actdir = '../activations_retrained'
if not os.path.exists(test_actdir):
os.mkdir(test_actdir)
# define a folder for zero fractions
test_zerodir = './zero_fractions'
if not os.path.exists(test_zerodir):
os.mkdir(test_zerodir)
for i in range(num_images):
im = imread(imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = im_detect(sess, net, im)
_t['im_detect'].toc()
# write act summaries to tensorboard
# writer.add_summary(act_summaries)
# record the zero fraction -> only for vgg16
# zero_frac = []
# for layer_ind in range(13):
# batch_num,row,col,filter_num = acts[layer_ind].shape
# zero_frac.append([])
# for j in range(filter_num):
# # print(acts[0][:,:,:,i].shape)
# fraction = 1-np.count_nonzero(acts[layer_ind][:,:,:,j])/(batch_num*row*col)
# zero_frac[layer_ind].append(fraction)
_t['misc'].tic()
# skip j = 0, because it's the background class
chosen_classes = []
for j in range(1, imdb.num_classes):
# for j, clas in enumerate(imdb._classes[1:]):
inds = np.where(scores[:, j] > thresh)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
all_boxes[j][i] = cls_dets
# if len(cls_dets)!=0: # only for recording activations_res
# chosen_classes.append(imdb._classes[j])
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
image_scores = np.hstack([all_boxes[j][i][:, -1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
_t['misc'].toc()
# write acts to a seperate text file for each seprate image file -> only vgg
# f_name = '{}/{}.txt'.format(test_actdir,i)
# act_file = open(f_name,'w')
# act_file.write('\n'.join(chosen_classes))
# act_file.write('\n')
# sum_act = []
# for arr in acts:
# temp = np.sum(arr,axis = (0,1,2))
# sum_act.append(temp)
# for item in sum_act:
# act_file.write('{}\n'.format(str(item)))
# act_file.close()
# chosen_classes = []
# write zero fractions to text files -> only vgg
# file_name = '{}/{}.txt'.format(test_zerodir,i)
# zero_file = open(file_name,'w')
# zero_file.write('\n'.join(chosen_classes))
# zero_file.write('\n')
# for arr in zero_frac:
# zero_file.write('{}\n'.format(str(arr)))
# zero_file.close()
# chosen_classes = []
if i%1000==0:
print('im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time,
_t['misc'].average_time))
# writer.close()
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
imdb.evaluate_detections(all_boxes, output_dir, experiment_setup)
|
shuang1330/tf-faster-rcnn
|
lib/model/test.py
|
Python
|
mit
| 8,856
| 0.015583
|
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) Ansible Inc, 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import glob
import os
import pickle
import platform
import select
import shlex
import subprocess
import traceback
from ansible.module_utils.six import PY2, b
from ansible.module_utils._text import to_bytes, to_text
def sysv_is_enabled(name):
'''
This function will check if the service name supplied
is enabled in any of the sysv runlevels
:arg name: name of the service to test for
'''
return bool(glob.glob('/etc/rc?.d/S??%s' % name))
def get_sysv_script(name):
'''
This function will return the expected path for an init script
corresponding to the service name supplied.
:arg name: name or path of the service to test for
'''
if name.startswith('/'):
result = name
else:
result = '/etc/init.d/%s' % name
return result
def sysv_exists(name):
'''
This function will return True or False depending on
the existence of an init script corresponding to the service name supplied.
:arg name: name of the service to test for
'''
return os.path.exists(get_sysv_script(name))
def fail_if_missing(module, found, service, msg=''):
'''
This function will return an error or exit gracefully depending on check mode status
and if the service is missing or not.
:arg module: is an AnsibleModule object, used for it's utility methods
:arg found: boolean indicating if services was found or not
:arg service: name of service
:kw msg: extra info to append to error/success msg when missing
'''
if not found:
if module.check_mode:
module.exit_json(msg="Service %s not found on %s, assuming it will exist on full run" % (service, msg), changed=True)
else:
module.fail_json(msg='Could not find the requested service %s: %s' % (service, msg))
def daemonize(module, cmd):
'''
Execute a command while detaching as a daemon, returns rc, stdout, and stderr.
:arg module: is an AnsibleModule object, used for it's utility methods
:arg cmd: is a list or string representing the command and options to run
This is complex because daemonization is hard for people.
What we do is daemonize a part of this module, the daemon runs the command,
picks up the return code and output, and returns it to the main process.
'''
# init some vars
chunk = 4096 # FIXME: pass in as arg?
errors = 'surrogate_or_strict'
# start it!
try:
pipe = os.pipe()
pid = os.fork()
except OSError:
module.fail_json(msg="Error while attempting to fork: %s", exception=traceback.format_exc())
# we don't do any locking as this should be a unique module/process
if pid == 0:
os.close(pipe[0])
# Set stdin/stdout/stderr to /dev/null
fd = os.open(os.devnull, os.O_RDWR)
# clone stdin/out/err
for num in range(3):
if fd != num:
os.dup2(fd, num)
# close otherwise
if fd not in range(3):
os.close(fd)
# Make us a daemon
pid = os.fork()
# end if not in child
if pid > 0:
os._exit(0)
# get new process session and detach
sid = os.setsid()
if sid == -1:
module.fail_json(msg="Unable to detach session while daemonizing")
# avoid possible problems with cwd being removed
os.chdir("/")
pid = os.fork()
if pid > 0:
os._exit(0)
# if command is string deal with py2 vs py3 conversions for shlex
if not isinstance(cmd, list):
if PY2:
cmd = shlex.split(to_bytes(cmd, errors=errors))
else:
cmd = shlex.split(to_text(cmd, errors=errors))
# make sure we always use byte strings
run_cmd = []
for c in cmd:
run_cmd.append(to_bytes(c, errors=errors))
# execute the command in forked process
p = subprocess.Popen(run_cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1]))
fds = [p.stdout, p.stderr]
# loop reading output till its done
output = {p.stdout: b(""), p.sterr: b("")}
while fds:
rfd, wfd, efd = select.select(fds, [], fds, 1)
if (rfd + wfd + efd) or p.poll():
for out in fds:
if out in rfd:
data = os.read(out.fileno(), chunk)
if not data:
fds.remove(out)
output[out] += b(data)
# even after fds close, we might want to wait for pid to die
p.wait()
# Return a pickled data of parent
return_data = pickle.dumps([p.returncode, to_text(output[p.stdout]), to_text(output[p.stderr])], protocol=pickle.HIGHEST_PROTOCOL)
os.write(pipe[1], to_bytes(return_data, errors=errors))
# clean up
os.close(pipe[1])
os._exit(0)
elif pid == -1:
module.fail_json(msg="Unable to fork, no exception thrown, probably due to lack of resources, check logs.")
else:
# in parent
os.close(pipe[1])
os.waitpid(pid, 0)
# Grab response data after child finishes
return_data = b("")
while True:
rfd, wfd, efd = select.select([pipe[0]], [], [pipe[0]])
if pipe[0] in rfd:
data = os.read(pipe[0], chunk)
if not data:
break
return_data += b(data)
# Note: no need to specify encoding on py3 as this module sends the
# pickle to itself (thus same python interpreter so we aren't mixing
# py2 and py3)
return pickle.loads(to_bytes(return_data, errors=errors))
def check_ps(module, pattern):
# Set ps flags
if platform.system() == 'SunOS':
psflags = '-ef'
else:
psflags = 'auxww'
# Find ps binary
psbin = module.get_bin_path('ps', True)
(rc, out, err) = module.run_command('%s %s' % (psbin, psflags))
# If rc is 0, set running as appropriate
if rc == 0:
for line in out.split('\n'):
if pattern in line:
return True
return False
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/module_utils/service.py
|
Python
|
bsd-3-clause
| 7,923
| 0.002398
|
# -*- coding: utf-8 -*-
"""
XML object class
Hervé Déjean
cpy Xerox 2009
a class for TEXT from a XMLDocument
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from .XMLDSObjectClass import XMLDSObjectClass
from .XMLDSTOKENClass import XMLDSTOKENClass
from config import ds_xml_def as ds_xml
class XMLDSTEXTClass(XMLDSObjectClass):
"""
TEXT (chunk) class
"""
name=ds_xml.sTEXT
def __init__(self,domNode = None):
XMLDSObjectClass.__init__(self)
XMLDSObjectClass.id += 1
self._domNode = domNode
self.tagName = ds_xml.sTEXT
self.Obaseline=None
self.setName(ds_xml.sTEXT)
# def getX(self): return float(self.getAttribute('x'))
# def getY(self): return float(self.getAttribute('y'))
# def getX2(self):
# return float(self.getAttribute('x'))+self.getWidth()
# def getY2(self):
# return float(self.getAttribute('y'))+self.getHeight()
# def getHeight(self): return float(self.getAttribute('height'))
# def getWidth(self): return float(self.getAttribute('width'))
def fromDom(self,domNode):
"""
only contains TEXT?
attributes x y id height width (all!)
"""
# self.setName(domNode.atg)
self.setNode(domNode)
# get properties
# for prop in domNode.keys():
# self.addAttribute(prop,domNode.get(prop))
try:
self._id = self.getAttribute('id')
except:pass
for prop in domNode.keys():
self.addAttribute(prop,domNode.get(prop))
if prop =='x': self._x= float(domNode.get(prop))
elif prop =='y': self._y = float(domNode.get(prop))
elif prop =='height': self._h = float(domNode.get(prop))
elif prop =='width': self.setWidth(float(domNode.get(prop)))
self.addAttribute('x2', self.getX()+self.getWidth())
self.addAttribute('y2',self.getY()+self.getHeight() )
if self.hasAttribute('blpoints'):
from ObjectModel.XMLDSBASELINEClass import XMLDSBASELINEClass
b= XMLDSBASELINEClass()
b.fromDom(domNode)
b.setParent(self.getParent())
self.setBaseline(b)
## if no text: add a category: text, graphic, image, whitespace??
for txt in domNode.itertext():
stxt=txt.strip()
if len(stxt) == 0:
continue
if type(txt) != str:
pass
else:
try:txt=txt.decode('utf-8')
except AttributeError as e:
pass
if self.getContent() is not None:
self.addContent(txt)
else:
self.setContent(txt)
ldomElts = domNode.findall('./%s'%(ds_xml.sTOKEN))
for elt in ldomElts:
try:
myObject= XMLDSTOKENClass(elt)
self.addObject(myObject)
myObject.setPage(self.getParent().getPage())
myObject.fromDom(elt)
except: pass #print 'issue with token'
def setBaseline(self,ob): self.Obaseline = ob
def getBaseline(self):
return self.Obaseline
def computeBaseline(self):
if self.getBaseline() is not None:
return self.getBaseline()
# lHisto={}
lY=[]
lX=[]
# test if TOKEN has position (not in GT)!
for token in self.getAllNamedObjects(XMLDSTOKENClass):
try:
lX.append(token.getX())
lX.append(token.getX2())
lY.append(token.getY())
lY.append(token.getY2())
except TypeError:
pass
import numpy as np
if len(lX) > 0:
a,bx = np.polyfit(lX, lY, 1)
lPoints = ','.join(["%d,%d"%(xa,ya) for xa,ya in zip(lX, lY)])
# print 'ANLGE:',math.degrees(math.atan(a))
ymax = a*self.getWidth()+bx
from ObjectModel.XMLDSBASELINEClass import XMLDSBASELINEClass
b= XMLDSBASELINEClass()
b.setNode(self)
# b.addAttribute("points",lPoints)
b.setAngle(a)
b.setBx(bx)
b.setPoints(lPoints)
b.setParent(self)
self.setBaseline(b)
b.computePoints()
def getTokens(self):
"""
if dom tokens: rturn them
else split content
"""
if self.getAllNamedObjects(XMLDSTOKENClass) != []:
return self.getAllNamedObjects(XMLDSTOKENClass)
else:
for token in self.getContent().split():
oT=XMLDSTOKENClass()
oT.setParent(self)
oT.setPage(self.getPage())
self.addObject(oT)
oT.setContent(token)
return self.getAllNamedObjects(XMLDSTOKENClass)
def getSetOfFeaturesXPos(self,TH,lAttr,myObject):
from spm.feature import featureObject
if self._lBasicFeatures is None:
self._lBasicFeatures = []
ftype= featureObject.NUMERICAL
feature = featureObject()
feature.setName('x')
feature.setTH(TH)
feature.addNode(self)
feature.setObjectName(self)
feature.setValue(round(self.getX()))
feature.setType(ftype)
self.addFeature(feature)
ftype= featureObject.NUMERICAL
feature = featureObject()
feature.setName('x2')
feature.setTH(TH)
feature.addNode(self)
feature.setObjectName(self)
feature.setValue(round(self.getX()+self.getWidth()))
feature.setType(ftype)
self.addFeature(feature)
ftype= featureObject.NUMERICAL
feature = featureObject()
feature.setName('xc')
feature.setTH(TH)
feature.addNode(self)
feature.setObjectName(self)
feature.setValue(round(self.getX()+self.getWidth()/2))
feature.setType(ftype)
self.addFeature(feature)
return self.getSetofFeatures()
def getSetOfListedAttributes(self,TH,lAttributes,myObject):
"""
Generate a set of features: X start of the lines
"""
from spm.feature import featureObject
if self._lBasicFeatures is None:
self._lBasicFeatures = []
# needed to keep canonical values!
elif self.getSetofFeatures() != []:
return self.getSetofFeatures()
lHisto = {}
for elt in self.getAllNamedObjects(myObject):
for attr in lAttributes:
try:lHisto[attr]
except KeyError:lHisto[attr] = {}
if elt.hasAttribute(attr):
# if elt.getWidth() >500:
# print elt.getName(),attr, elt.getAttribute(attr) #, elt.getNode()
try:
try:lHisto[attr][round(float(elt.getAttribute(attr)))].append(elt)
except KeyError: lHisto[attr][round(float(elt.getAttribute(attr)))] = [elt]
except TypeError:pass
for attr in lAttributes:
for value in lHisto[attr]:
# print attr, value, lHisto[attr][value]
if len(lHisto[attr][value]) > 0.1:
ftype= featureObject.NUMERICAL
feature = featureObject()
feature.setName(attr)
# feature.setName('f')
feature.setTH(TH)
feature.addNode(self)
feature.setObjectName(self)
feature.setValue(value)
feature.setType(ftype)
self.addFeature(feature)
if 'text' in lAttributes:
if len(self.getContent()):
ftype= featureObject.EDITDISTANCE
feature = featureObject()
# feature.setName('content')
feature.setName('f')
feature.setTH(90)
feature.addNode(self)
feature.setObjectName(self)
feature.setValue(self.getContent().split()[0])
feature.setType(ftype)
self.addFeature(feature)
if 'tokens' in lAttributes:
if len(self.getContent()):
for token in self.getContent().split():
if len(token) > 4:
ftype= featureObject.EDITDISTANCE
feature = featureObject()
feature.setName('token')
feature.setTH(TH)
feature.addNode(self)
feature.setObjectName(self)
feature.setValue(token.lower())
feature.setType(ftype)
self.addFeature(feature)
if 'xc' in lAttributes:
ftype= featureObject.NUMERICAL
feature = featureObject()
# feature.setName('xc')
feature.setName('xc')
feature.setTH(TH)
feature.addNode(self)
feature.setObjectName(self)
feature.setValue(round(self.getX()+self.getWidth()/2))
feature.setType(ftype)
self.addFeature(feature)
#
if 'virtual' in lAttributes:
ftype= featureObject.BOOLEAN
feature = featureObject()
feature.setName('f')
feature.setTH(TH)
feature.addNode(self)
feature.setObjectName(self)
feature.setValue(self.getAttribute('virtual'))
feature.setType(ftype)
self.addFeature(feature)
if 'bl' in lAttributes:
for inext in self.next:
ftype= featureObject.NUMERICAL
feature = featureObject()
baseline = self.getBaseline()
nbl = inext.getBaseline()
if baseline and nbl:
feature.setName('bl')
feature.setTH(TH)
feature.addNode(self)
feature.setObjectName(self)
# avg of baseline?
avg1= baseline.getY() +(baseline.getY2() -baseline.getY())/2
avg2= nbl.getY() +(nbl.getY2()-nbl.getY())/2
feature.setValue(round(abs(avg2-avg1)))
feature.setType(ftype)
self.addFeature(feature)
if 'linegrid' in lAttributes:
#lgridlist.append((ystart,rowH, y1,yoverlap))
for ystart,rowh,_,_ in self.lgridlist:
ftype= featureObject.BOOLEAN
feature = featureObject()
feature.setName('linegrid%s'%rowh)
feature.setTH(TH)
feature.addNode(self)
feature.setObjectName(self)
feature.setValue(ystart)
feature.setType(ftype)
self.addFeature(feature)
return self.getSetofFeatures()
def getSetOfMutliValuedFeatures(self,TH,lMyFeatures,myObject):
"""
define a multivalued features
"""
from spm.feature import multiValueFeatureObject
#reinit
self._lBasicFeatures = None
mv =multiValueFeatureObject()
name= "multi" #'|'.join(i.getName() for i in lMyFeatures)
mv.setName(name)
mv.addNode(self)
mv.setObjectName(self)
mv.setTH(TH)
mv.setObjectName(self)
mv.setValue(map(lambda x:x,lMyFeatures))
mv.setType(multiValueFeatureObject.COMPLEX)
self.addFeature(mv)
return self._lBasicFeatures
|
Transkribus/TranskribusDU
|
TranskribusDU/ObjectModel/XMLDSTEXTClass.py
|
Python
|
bsd-3-clause
| 12,221
| 0.013504
|
def add(x, y):
return x + y
x = 0
import pdb; pdb.set_trace()
x = add(1, 2)
|
fengbohello/practice
|
python/pdb/sample.py
|
Python
|
lgpl-3.0
| 81
| 0.037037
|
# This script has to run using the Python executable found in:
# /opt/mgmtworker/env/bin/python in order to properly load the manager
# blueprints utils.py module.
import argparse
import logging
import utils
class CtxWithLogger(object):
logger = logging.getLogger('internal-ssl-certs-logger')
utils.ctx = CtxWithLogger()
parser = argparse.ArgumentParser()
parser.add_argument('--metadata', default=utils.CERT_METADATA_FILE_PATH,
help='File containing the cert metadata. It should be a '
'JSON file containing an object with the '
'"internal_rest_host" and "networks" fields.')
parser.add_argument('manager_ip', default=None, nargs='?',
help='The IP of this machine on the default network')
if __name__ == '__main__':
args = parser.parse_args()
cert_metadata = utils.load_cert_metadata(filename=args.metadata)
internal_rest_host = args.manager_ip or cert_metadata['internal_rest_host']
networks = cert_metadata.get('networks', {})
networks['default'] = internal_rest_host
cert_ips = [internal_rest_host] + list(networks.values())
utils.generate_internal_ssl_cert(ips=cert_ips, name=internal_rest_host)
utils.store_cert_metadata(internal_rest_host, networks,
filename=args.metadata)
|
cloudify-cosmo/cloudify-manager-blueprints
|
components/manager-ip-setter/scripts/create-internal-ssl-certs.py
|
Python
|
apache-2.0
| 1,346
| 0
|
"""
Copyright (C) 2017 Open Source Robotics Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import numpy as np
import latlon
import ecef
class Enu(object):
def __init__(self, e, n, u):
self.e = e
self.n = n
self.u = u
def __eq__(self, other):
return self.e == other.e and self.n == other.n and self.u == other.u
def __hash__(self):
return hash((self.e, self.n, self.u))
def to_ecef(self, origin):
# this doesn't work at the poles because longitude is not uniquely defined there
sin_lon = origin._sin_lon()
sin_lat = origin._sin_lat()
cos_lon = origin._cos_lon()
cos_lat = origin._cos_lat()
global_to_ecef_matrix = np.array([[-sin_lon, -cos_lon * sin_lat, cos_lon * cos_lat],
[cos_lon, - sin_lon * sin_lat, sin_lon * cos_lat],
[0, cos_lat, sin_lat]])
enu_vector = np.array([[self.e], [self.n], [self.u]])
ecef_vector = np.dot(global_to_ecef_matrix, enu_vector)
return ecef.Ecef(ecef_vector[0][0], ecef_vector[1][0], ecef_vector[2][0])
|
ekumenlabs/terminus
|
terminus/geometry/enu.py
|
Python
|
apache-2.0
| 1,657
| 0.002414
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
FS Pairtree storage - Reverse lookup
====================================
Conventions used:
From http://www.cdlib.org/inside/diglib/pairtree/pairtreespec.html version 0.1
This is an implementation of a reverse lookup index, using the pairtree path spec to
record the link between local id and the id's that it corresponds to.
eg to denote issn:1234-1234 as being linked to a global id of "uuid:1e4f..."
--> create a file at ROOT_DIR/pairtree_rl/is/sn/+1/23/4-/12/34/uuid+1e4f...
Note that the id it links to is recorded as a filename encoded as per the pairtree spec.
Usage
=====
>>> from pairtree import PairtreeReverseLookup
>>> rl = PairtreeReverseLookup(storage_dir="ROOT")
>>> rl["issn:1234-1234"].append("uuid:1e4f...")
>>> rl["issn:1234-1234"]
["uuid:1e4f"]
>>> rl["issn:1234-1234"] = ["id:1", "uuid:32fad..."]
>>>
Notes
=====
This was created to avoid certain race conditions I had with a pickled dictionary for this index.
A sqllite or similar lookup would also be effective, but this one relies solely on pairtree.
"""
import os
from pairtree.pairtree_path import id_encode, id_decode, id_to_dirpath
PAIRTREE_RL = "pairtree_rl"
class PairtreeReverseLookup_list(object):
def __init__(self, rl_dir, id):
self._rl_dir = rl_dir
self._id = id
self._dirpath = id_to_dirpath(self._id, self._rl_dir)
def _get_ids(self):
if os.path.isdir(self._dirpath):
ids = []
for f in os.listdir(self._dirpath):
ids.append(id_decode(f))
return ids
else:
return []
def _add_id(self, new_id):
if not os.path.exists(self._dirpath):
os.makedirs(self._dirpath)
enc_id = id_encode(new_id)
if not os.path.isfile(enc_id):
with open(os.path.join(self._dirpath, enc_id), "w") as f:
f.write(new_id)
def _exists(self, id):
if os.path.exists(self._dirpath):
return id_encode(id) in os.listdir(self._dirpath)
else:
return False
def append(self, *args):
[self._add_id(x) for x in args if not self._exists(x)]
def __len__(self):
return len(os.listdir(self._dirpath))
def __repr__(self):
return "ID:'%s' -> ['%s']" % (self._id, "','".join(self._get_ids()))
def __str__(self):
return self.__repr__()
def __iter__(self):
for f in self._get_ids():
yield id_decode(f)
class PairtreeReverseLookup(object):
def __init__(self, storage_dir="data"):
self._storage_dir = storage_dir
self._rl_dir = os.path.join(storage_dir, PAIRTREE_RL)
self._init_store()
def _init_store(self):
if not os.path.isdir(self._storage_dir):
os.makedirs(self._storage_dir)
def __getitem__(self, id):
return PairtreeReverseLookup_list(self._rl_dir, id)
def __setitem__(self, id, value):
id_c = PairtreeReverseLookup_list(self._rl_dir, id)
if isinstance(list, value):
id_c.append(*value)
else:
id_c.append(value)
def __delitem__(self, id):
dirpath = id_to_dirpath(id, self._rl_dir)
if os.path.isdir(dirpath):
for f in os.listdir(dirpath):
os.remove(os.path.join(dirpath, f))
os.removedirs(dirpath) # will throw OSError if the dir cannot be removed.
self._init_store() # just in case
|
benosteen/pairtree
|
pairtree/pairtree_revlookup.py
|
Python
|
apache-2.0
| 3,270
| 0.015596
|
# -*- coding: utf-8 -*-
#
# libnacl documentation build configuration file, created by
# sphinx-quickstart on Thu May 29 10:29:25 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
from libnacl import version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'libnacl'
copyright = u'2020, Thomas S Hatch'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
#htmlhelp_basename = 'libnacl'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'libnacl.tex', u'libnacl Documentation',
u'Thomas S Hatch', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'libnacl', u'libnacl Documentation',
[u'Thomas S Hatch'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'libnacl', u'libnacl Documentation',
u'Thomas S Hatch', 'libnacl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'libnacl'
epub_author = u'Thomas S Hatch'
epub_publisher = u'Thomas S Hatch'
epub_copyright = u'2020, Thomas S Hatch'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'libnacl'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
saltstack/libnacl
|
doc/conf.py
|
Python
|
apache-2.0
| 10,276
| 0.007201
|
# Generated by Django 2.2.17 on 2021-01-28 01:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('discounts', '0010_merge_20191028_1925'),
]
operations = [
migrations.AddField(
model_name='registrationdiscount',
name='applied',
field=models.BooleanField(null=True, verbose_name='Use finalized'),
),
migrations.DeleteModel(
name='TemporaryRegistrationDiscount',
),
]
|
django-danceschool/django-danceschool
|
danceschool/discounts/migrations/0011_auto_20210127_2052.py
|
Python
|
bsd-3-clause
| 525
| 0
|
# Copyright (c) 2015 - present Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import codecs
import datetime
import itertools
import operator
import os
import re
import sys
try:
from lxml import etree
except ImportError:
etree = None
from . import colorize, config, source, utils
ISSUE_KIND_ERROR = 'ERROR'
ISSUE_KIND_WARNING = 'WARNING'
ISSUE_KIND_INFO = 'INFO'
ISSUE_KIND_ADVICE = 'ADVICE'
# field names in rows of json reports
JSON_INDEX_DOTTY = 'dotty'
JSON_INDEX_FILENAME = 'file'
JSON_INDEX_HASH = 'hash'
JSON_INDEX_INFER_SOURCE_LOC = 'infer_source_loc'
JSON_INDEX_ISL_FILE = 'file'
JSON_INDEX_ISL_LNUM = 'lnum'
JSON_INDEX_ISL_CNUM = 'cnum'
JSON_INDEX_ISL_ENUM = 'enum'
JSON_INDEX_KIND = 'kind'
JSON_INDEX_LINE = 'line'
JSON_INDEX_PROCEDURE = 'procedure'
JSON_INDEX_PROCEDURE_ID = 'procedure_id'
JSON_INDEX_QUALIFIER = 'qualifier'
JSON_INDEX_QUALIFIER_TAGS = 'qualifier_tags'
JSON_INDEX_TYPE = 'bug_type'
JSON_INDEX_TRACE = 'bug_trace'
JSON_INDEX_TRACE_LEVEL = 'level'
JSON_INDEX_TRACE_FILENAME = 'filename'
JSON_INDEX_TRACE_LINE = 'line_number'
JSON_INDEX_TRACE_DESCRIPTION = 'description'
JSON_INDEX_VISIBILITY = 'visibility'
ISSUE_TYPES_URL = 'http://fbinfer.com/docs/infer-issue-types.html#'
def _text_of_infer_loc(loc):
return ' ({}:{}:{}-{}:)'.format(
loc[JSON_INDEX_ISL_FILE],
loc[JSON_INDEX_ISL_LNUM],
loc[JSON_INDEX_ISL_CNUM],
loc[JSON_INDEX_ISL_ENUM],
)
def text_of_report(report):
filename = report[JSON_INDEX_FILENAME]
kind = report[JSON_INDEX_KIND]
line = report[JSON_INDEX_LINE]
error_type = report[JSON_INDEX_TYPE]
msg = report[JSON_INDEX_QUALIFIER]
infer_loc = ''
if JSON_INDEX_INFER_SOURCE_LOC in report:
infer_loc = _text_of_infer_loc(report[JSON_INDEX_INFER_SOURCE_LOC])
return '%s:%d: %s: %s%s\n %s' % (
filename,
line,
kind.lower(),
error_type,
infer_loc,
msg,
)
def _text_of_report_list(project_root, reports, bugs_txt_path, limit=None,
formatter=colorize.TERMINAL_FORMATTER):
n_issues = len(reports)
if n_issues == 0:
if formatter == colorize.TERMINAL_FORMATTER:
out = colorize.color(' No issues found ',
colorize.SUCCESS, formatter)
return out + '\n'
else:
return 'No issues found'
text_errors_list = []
for report in reports[:limit]:
filename = report[JSON_INDEX_FILENAME]
line = report[JSON_INDEX_LINE]
source_context = ''
source_context = source.build_source_context(
os.path.join(project_root, filename),
formatter,
line,
)
indenter = source.Indenter() \
.indent_push() \
.add(source_context)
source_context = '\n' + unicode(indenter)
msg = text_of_report(report)
if report[JSON_INDEX_KIND] == ISSUE_KIND_ERROR:
msg = colorize.color(msg, colorize.ERROR, formatter)
elif report[JSON_INDEX_KIND] == ISSUE_KIND_WARNING:
msg = colorize.color(msg, colorize.WARNING, formatter)
elif report[JSON_INDEX_KIND] == ISSUE_KIND_ADVICE:
msg = colorize.color(msg, colorize.ADVICE, formatter)
text = '%s%s' % (msg, source_context)
text_errors_list.append(text)
error_types_count = {}
for report in reports:
t = report[JSON_INDEX_TYPE]
# assert failures are not very informative without knowing
# which assertion failed
if t == 'Assert_failure' and JSON_INDEX_INFER_SOURCE_LOC in report:
t += _text_of_infer_loc(report[JSON_INDEX_INFER_SOURCE_LOC])
if t not in error_types_count:
error_types_count[t] = 1
else:
error_types_count[t] += 1
max_type_length = max(map(len, error_types_count.keys())) + 2
sorted_error_types = error_types_count.items()
sorted_error_types.sort(key=operator.itemgetter(1), reverse=True)
types_text_list = map(lambda (t, count): '%s: %d' % (
t.rjust(max_type_length),
count,
), sorted_error_types)
text_errors = '\n\n'.join(text_errors_list)
if limit >= 0 and n_issues > limit:
text_errors += colorize.color(
('\n\n...too many issues to display (limit=%d exceeded), please ' +
'see %s or run `inferTraceBugs` for the remaining issues.')
% (limit, bugs_txt_path), colorize.HEADER, formatter)
issues_found = 'Found {n_issues}'.format(
n_issues=utils.get_plural('issue', n_issues),
)
msg = '{issues_found}\n\n{issues}\n\n{header}\n\n{summary}'.format(
issues_found=colorize.color(issues_found,
colorize.HEADER,
formatter),
issues=text_errors,
header=colorize.color('Summary of the reports',
colorize.HEADER, formatter),
summary='\n'.join(types_text_list),
)
return msg
def _is_user_visible(project_root, report):
kind = report[JSON_INDEX_KIND]
return kind in [ISSUE_KIND_ERROR, ISSUE_KIND_WARNING, ISSUE_KIND_ADVICE]
def print_and_save_errors(infer_out, project_root, json_report, bugs_out,
pmd_xml):
errors = utils.load_json_from_path(json_report)
errors = [e for e in errors if _is_user_visible(project_root, e)]
console_out = _text_of_report_list(project_root, errors, bugs_out,
limit=10)
utils.stdout('\n' + console_out)
plain_out = _text_of_report_list(project_root, errors, bugs_out,
formatter=colorize.PLAIN_FORMATTER)
with codecs.open(bugs_out, 'w',
encoding=config.CODESET, errors='replace') as file_out:
file_out.write(plain_out)
if pmd_xml:
xml_out = os.path.join(infer_out, config.PMD_XML_FILENAME)
with codecs.open(xml_out, 'w',
encoding=config.CODESET,
errors='replace') as file_out:
file_out.write(_pmd_xml_of_issues(errors))
def merge_reports_from_paths(report_paths):
json_data = []
for json_path in report_paths:
json_data.extend(utils.load_json_from_path(json_path))
return _sort_and_uniq_rows(json_data)
def _pmd_xml_of_issues(issues):
if etree is None:
print('ERROR: "etree" Python package not found.')
print('ERROR: You need to install it to use Infer with --pmd-xml')
sys.exit(1)
root = etree.Element('pmd')
root.attrib['version'] = '5.4.1'
root.attrib['date'] = datetime.datetime.now().isoformat()
for issue in issues:
fully_qualifed_method_name = re.search('(.*)\(.*',
issue[JSON_INDEX_PROCEDURE_ID])
class_name = ''
package = ''
if fully_qualifed_method_name is not None:
# probably Java
info = fully_qualifed_method_name.groups()[0].split('.')
class_name = info[-2:-1][0]
method = info[-1]
package = '.'.join(info[0:-2])
else:
method = issue[JSON_INDEX_PROCEDURE]
file_node = etree.Element('file')
file_node.attrib['name'] = issue[JSON_INDEX_FILENAME]
violation = etree.Element('violation')
violation.attrib['begincolumn'] = '0'
violation.attrib['beginline'] = str(issue[JSON_INDEX_LINE])
violation.attrib['endcolumn'] = '0'
violation.attrib['endline'] = str(issue[JSON_INDEX_LINE] + 1)
violation.attrib['class'] = class_name
violation.attrib['method'] = method
violation.attrib['package'] = package
violation.attrib['priority'] = '1'
violation.attrib['rule'] = issue[JSON_INDEX_TYPE]
violation.attrib['ruleset'] = 'Infer Rules'
violation.attrib['externalinfourl'] = (
ISSUE_TYPES_URL + issue[JSON_INDEX_TYPE])
violation.text = issue[JSON_INDEX_QUALIFIER]
file_node.append(violation)
root.append(file_node)
return etree.tostring(root, pretty_print=True, encoding=config.CODESET)
def _sort_and_uniq_rows(l):
key = operator.itemgetter(JSON_INDEX_FILENAME,
JSON_INDEX_LINE,
JSON_INDEX_HASH,
JSON_INDEX_QUALIFIER)
l.sort(key=key)
groups = itertools.groupby(l, key)
# guaranteed to be at least one element in each group
return map(lambda (keys, dups): dups.next(), groups)
|
jsachs/infer
|
infer/lib/python/inferlib/issues.py
|
Python
|
bsd-3-clause
| 9,001
| 0.000222
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkSecurityGroupsOperations:
"""NetworkSecurityGroupsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_security_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_security_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_security_group_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.NetworkSecurityGroup":
"""Gets the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.NetworkSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "_models.NetworkSecurityGroup",
**kwargs: Any
) -> "_models.NetworkSecurityGroup":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "_models.NetworkSecurityGroup",
**kwargs: Any
) -> AsyncLROPoller["_models.NetworkSecurityGroup"]:
"""Creates or updates a network security group in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to the create or update network security group
operation.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.NetworkSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.NetworkSecurityGroup":
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
network_security_group_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.NetworkSecurityGroup"]:
"""Updates a network security group tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to update network security group tags.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs: Any
) -> AsyncIterable["_models.NetworkSecurityGroupListResult"]:
"""Gets all network security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.NetworkSecurityGroupListResult"]:
"""Gets all network security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
|
Azure/azure-sdk-for-python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/aio/operations/_network_security_groups_operations.py
|
Python
|
mit
| 30,870
| 0.005248
|
#!/usr/bin/env python3
import sys
from pathlib import Path
ALLOWED_SUFFIXES = ['.feature',
'.bugfix',
'.doc',
'.removal',
'.misc']
def get_root(script_path):
folder = script_path.absolute().parent
while not (folder / '.git').exists():
folder = folder.parent
if folder == folder.anchor:
raise RuntimeError("git repo not found")
return folder
def main(argv):
print('Check "CHANGES" folder... ', end='', flush=True)
here = Path(argv[0])
root = get_root(here)
changes = root / 'CHANGES'
failed = False
for fname in changes.iterdir():
if fname.name in ('.gitignore', '.TEMPLATE.rst'):
continue
if fname.suffix not in ALLOWED_SUFFIXES:
if not failed:
print('')
print(fname, 'has illegal suffix', file=sys.stderr)
failed = True
if failed:
print('', file=sys.stderr)
print('Allowed suffixes are:', ALLOWED_SUFFIXES, file=sys.stderr)
print('', file=sys.stderr)
else:
print('OK')
return int(failed)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
arthurdarcet/aiohttp
|
tools/check_changes.py
|
Python
|
apache-2.0
| 1,224
| 0
|
import string
import random
import logging
import os
from rootpy import asrootpy, log
from rootpy.plotting import Legend, Canvas, Pad, Graph
from rootpy.plotting.base import Color, MarkerStyle
from rootpy.plotting.utils import get_limits
import ROOT
# from external import husl
# suppress some nonsense logging messages when writing to pdfs.
# Also, setup default logger
log["/ROOT.TCanvas.Print"].setLevel(log.WARNING)
logging.basicConfig(level=logging.DEBUG)
log = log["/roofie"]
def is_plottable(obj):
"""
Check if the given object is considered a plottable.
Currently, TH1 and TGraph are considered plottable.
"""
return isinstance(obj, (ROOT.TH1, ROOT.TGraph))
class Styles(object):
# Define names of plot layouts:
class _Default_Style(object):
pt_per_cm = 28.4527625
titlefont = 43
labelfont = 43
markerSizepx = 4 # number of pixels of the marker
class Presentation_full(_Default_Style):
axisTitleSize = 14
axisLabelSize = 14
legendSize = 14
canvasWidth = 340
canvasHeight = 300
plot_margins = (.13, .05, .13, .1) # left, right, bottom, top
plot_ytitle_offset = 1.15 # factor of the normal offset :P, may lay outside of the canvas
class Presentation_half(_Default_Style):
axisTitleSize = 10
axisLabelSize = 10
legendSize = 10
canvasWidth = 170
canvasHeight = 150
plot_margins = (.3, .08, .2, .1)
plot_ytitle_offset = 1
class Public_full(_Default_Style):
axisTitleSize = 10
axisLabelSize = 8
legendSize = 8
canvasWidth = 340
canvasHeight = 300
plot_margins = (.13, .05, .13, .04)
plot_ytitle_offset = 1.15
def gen_random_name():
"""Generate a random name for temp hists"""
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(25))
def get_color_generator(palette='root', ncolors=10):
"""
Returns a generator for n colors.
Parameters
----------
palette : string
name of the color palette which should be used
ncolors : int
number of colors this palette should have, it might be ignored by some palettes!
Returns
-------
generator :
colors which can be digested by _rootpy_
"""
# generated with sns.palplot(sns.color_palette("colorblind", 10))
if palette == 'colorblind':
colors = ([(0.0, 0.4470588235294118, 0.6980392156862745),
(0.0, 0.6196078431372549, 0.45098039215686275),
(0.8352941176470589, 0.3686274509803922, 0.0),
(0.8, 0.4745098039215686, 0.6549019607843137),
(0.9411764705882353, 0.8941176470588236, 0.25882352941176473),
(0.33725490196078434, 0.7058823529411765, 0.9137254901960784)])
if palette == 'set2':
colors = ([(0.40000000596046448, 0.7607843279838562, 0.64705884456634521),
(0.98131487965583808, 0.55538641635109398, 0.38740485135246722),
(0.55432528607985565, 0.62711267120697922, 0.79595541393055635),
(0.90311419262605563, 0.54185316071790801, 0.76495195557089413),
(0.65371782148585622, 0.84708959004458262, 0.32827375098770734),
(0.9986312957370983, 0.85096502233954041, 0.18488274134841617),
(0.89573241682613591, 0.76784315109252932, 0.58182240093455595),
(0.70196080207824707, 0.70196080207824707, 0.70196080207824707)])
if palette == 'husl':
colors = [(0.9677975592919913, 0.44127456009157356, 0.5358103155058701),
(0.8616090647292522, 0.536495730113334, 0.19548899031476086),
(0.6804189127793346, 0.6151497514677574, 0.19405452111445337),
(0.46810256823426105, 0.6699492535792404, 0.1928958739904499),
(0.20125317221201128, 0.6907920815379025, 0.47966761189275336),
(0.21044753832183283, 0.6773105080456748, 0.6433941168468681),
(0.2197995660828324, 0.6625157876850336, 0.7732093159317209),
(0.433280341176423, 0.6065273407962815, 0.9585467098271748),
(0.8004936186423958, 0.47703363533737203, 0.9579547196007522),
(0.962272393509669, 0.3976451968965351, 0.8008274363432775)]
if palette == 'root':
# named colors of the ROOT TColor colorwheel are between 800 and 900, +1 to make them look better
colors = []
for i in range(0, ncolors):
colors.append((800 + int(100.0 / ncolors) * i) + 1)
if colors:
for color in colors:
yield color
else:
raise ValueError("Unknonw palette")
class Figure(object):
def __init__(self):
# User settable parameters:
self.title = ''
self.xtitle = ''
self.ytitle = ''
self.plot = self.Plot()
self.legend = self.Legend()
# Private:
self._plottables = []
self.style = Styles.Presentation_full
class Plot(object):
logx = False
logy = False
gridx = False
gridy = False
palette = 'root'
palette_ncolors = 10
xmin, xmax, ymin, ymax = None, None, None, None
frame = None
class Legend(object):
title = None
position = 'tl'
def _create_legend(self):
nentries = len([pdic['legend_title'] for pdic in self._plottables if pdic['legend_title'] != ''])
leg = Legend(nentries, leftmargin=0, rightmargin=0, entrysep=0.01,
textsize=self.style.legendSize, textfont=43, margin=0.1, )
if self.legend.title:
leg.SetHeader(self.legend.title)
leg.SetBorderSize(0) # no box
leg.SetFillStyle(0) # transparent background of legend TPave(!)
return leg
def _theme_plottable(self, obj):
try:
axes = obj.GetXaxis(), obj.GetYaxis()
for axis in axes:
axis.SetLabelSize(self.style.axisLabelSize)
axis.SetLabelFont(self.style.labelfont)
axis.SetTitleFont(self.style.titlefont)
axis.SetTitleSize(self.style.axisTitleSize)
# yaxis only settings:
axes[1].SetTitleOffset(self.style.plot_ytitle_offset)
except AttributeError:
# obj might not be of the right type
pass
# apply styles, this might need to get more fine grained
# markers are avilable in children of TAttMarker
if isinstance(obj, ROOT.TAttMarker):
# marker size 1 == 8 px, and never scales with canvas...
obj.SetMarkerSize(self.style.markerSizepx / 8.0)
def add_plottable(self, obj, legend_title='', markerstyle='circle', color=None, use_as_frame=None):
"""
Add a plottable objet to this figure. This function performs a
copy of the passed object and assigns it a random name. Once
commited, these should not be touched any more by the user!!!
Parameters
----------
obj : Hist1D, Graph, None
A root plottable object; If none, this object will only show up in the legend
legend_title : string
Title for this plottable as shown in the legend
"""
# Make a copy if we got a plottable
if obj is not None:
p = asrootpy(obj.Clone(gen_random_name()))
else:
p = ROOT.TLegendEntry()
if isinstance(p, ROOT.TH1):
p.SetDirectory(0) # make sure that the hist is not associated with a file anymore!
self._plottables.append({'p': p,
'legend_title': legend_title,
'markerstyle': markerstyle,
'color': color,
'use_as_frame': use_as_frame,
})
def import_plottables_from_canvas(self, canvas):
"""
Import plottables from a canvas which was previously created with roofie
Parameters
----------
canvas : Canvas
A canvas which was created with roofie.
Raises
------
ValueError :
The given canvas did not have the internal format as expected from roofie canvases
"""
pad = canvas.FindObject('plot')
if pad == None: # "is None" does not work since TObject is not None, but equal to None...
raise ValueError("Cannot import canvas, since it is not in roofie format.")
try:
legend = [p for p in pad.GetListOfPrimitives() if isinstance(p, ROOT.TLegend)][0]
except IndexError:
legend_entries = []
else:
legend_entries = [e for e in legend.GetListOfPrimitives()]
# load the plottables but ignore the frame
plottables = []
for p in pad.GetListOfPrimitives():
if is_plottable(p):
if p.GetName() != "__frame":
plottables.append({'p': asrootpy(p.Clone(gen_random_name()))})
for legend_entry in legend_entries:
if p == legend_entry.GetObject():
plottables[-1]['legend_title'] = legend_entry.GetLabel()
else:
self.xtitle = p.GetXaxis().GetTitle()
self.ytitle = p.GetYaxis().GetTitle()
# set legend title if any
if legend.GetHeader():
self.legend.title = legend.GetHeader()
self._plottables += plottables
def draw_to_canvas(self):
"""
Draw this figure to a canvas, which is then returned.
"""
if len(self._plottables) == 0:
raise IndexError("No plottables defined")
c = Canvas(width=self.style.canvasWidth,
height=self.style.canvasHeight,
size_includes_decorations=True)
if self.legend.position == 'seperate':
legend_width = .2
pad_legend = Pad(1 - legend_width, 0, 1., 1., name="legend")
pad_legend.SetLeftMargin(0.0)
pad_legend.SetFillStyle(0) # make this pad transparent
pad_legend.Draw()
else:
legend_width = 0
pad_plot = Pad(0., 0., 1 - legend_width, 1., name="plot", )
pad_plot.SetMargin(*self.style.plot_margins)
pad_plot.Draw()
pad_plot.cd()
# awkward hack around a bug in get limits where everything fails if one plottable is shitty...
xmin, xmax, ymin, ymax = None, None, None, None
for pdic in self._plottables:
try:
limits = get_limits(pdic['p'], logx=self.plot.logx, logy=self.plot.logy)
# Beware: Python 2 evaluates min/max of None in an undefined way with no error! Wow...
xmin = min([xmin, limits[0]]) if xmin is not None else limits[0]
xmax = max([xmax, limits[1]]) if xmax is not None else limits[1]
ymin = min([ymin, limits[2]]) if ymin is not None else limits[2]
ymax = max([ymax, limits[3]]) if ymax is not None else limits[3]
except TypeError:
# some plottables do not work with this rootpy function (eg. graph without points, tf1)
# TODO: should be fixed upstream
pass
# overwrite these ranges if defaults are given
if self.plot.xmin is not None:
xmin = self.plot.xmin
if self.plot.xmax is not None:
xmax = self.plot.xmax
if self.plot.ymax is not None:
ymax = self.plot.ymax
if self.plot.ymin is not None:
ymin = self.plot.ymin
if not all([val is not None for val in [xmin, xmax, ymin, ymax]]):
raise TypeError("unable to determine plot axes ranges from the given plottables")
colors = get_color_generator(self.plot.palette, self.plot.palette_ncolors)
# draw an empty frame within the given ranges;
frame_from_plottable = [p for p in self._plottables if p.get('use_as_frame')]
if len(frame_from_plottable) > 0:
frame = frame_from_plottable[0]['p'].Clone('__frame')
frame.Reset()
frame.SetStats(0)
frame.xaxis.SetRangeUser(xmin, xmax)
frame.yaxis.SetRangeUser(ymin, ymax)
frame.GetXaxis().SetTitle(self.xtitle)
frame.GetYaxis().SetTitle(self.ytitle)
self._theme_plottable(frame)
frame.Draw()
else:
frame = Graph()
frame.SetName("__frame")
# add a silly point in order to have root draw this frame...
frame.SetPoint(0, 0, 0)
frame.GetXaxis().SetLimits(xmin, xmax)
frame.GetYaxis().SetLimits(ymin, ymax)
frame.SetMinimum(ymin)
frame.SetMaximum(ymax)
frame.GetXaxis().SetTitle(self.xtitle)
frame.GetYaxis().SetTitle(self.ytitle)
self._theme_plottable(frame)
# Draw this frame: 'A' should draw the axis, but does not work if nothing else is drawn.
# L would draw a line between the points but is seems to do nothing if only one point is present
# P would also draw that silly point but we don't want that!
frame.Draw("AL")
xtick_length = frame.GetXaxis().GetTickLength()
ytick_length = frame.GetYaxis().GetTickLength()
for i, pdic in enumerate(self._plottables):
obj = pdic['p']
if isinstance(obj, ROOT.TLegendEntry):
_root_color = Color(pdic['color'])
_root_markerstyle = MarkerStyle(pdic['markerstyle'])
obj.SetMarkerStyle(_root_markerstyle('root'))
obj.SetMarkerColor(_root_color('root'))
elif isinstance(obj, (ROOT.TH1, ROOT.TGraph, ROOT.TF1)):
self._theme_plottable(obj)
obj.SetMarkerStyle(pdic.get('markerstyle', 'circle'))
if pdic.get('color', None):
obj.color = pdic['color']
else:
try:
color = next(colors)
except StopIteration:
log.warning("Ran out of colors; defaulting to black")
color = 1
obj.color = color
xaxis = obj.GetXaxis()
yaxis = obj.GetYaxis()
# Set the title to the given title:
obj.title = self.title
# the xaxis depends on the type of the plottable :P
if isinstance(obj, ROOT.TGraph):
# SetLimit on a TH1 is simply messing up the
# lables of the axis to screw over the user, presumably...
xaxis.SetLimits(xmin, xmax)
yaxis.SetLimits(ymin, ymax) # for unbinned data
# 'P' plots the current marker, 'L' would connect the dots with a simple line
# see: https://root.cern.ch/doc/master/classTGraphPainter.html for more draw options
drawoption = 'Psame'
elif isinstance(obj, ROOT.TH1):
obj.SetStats(0)
xaxis.SetRangeUser(xmin, xmax)
yaxis.SetRangeUser(ymin, ymax)
drawoption = 'same'
elif isinstance(obj, ROOT.TF1):
# xaxis.SetLimits(xmin, xmax)
# yaxis.SetLimits(ymin, ymax) # for unbinned data
drawoption = 'same'
obj.Draw(drawoption)
# Its ok if obj is non; then we just add it to the legend.
else:
raise TypeError("Un-plottable type given.")
pad_plot.SetTicks()
pad_plot.SetLogx(self.plot.logx)
pad_plot.SetLogy(self.plot.logy)
pad_plot.SetGridx(self.plot.gridx)
pad_plot.SetGridy(self.plot.gridy)
# do we have legend titles?
if any([pdic.get('legend_title') for pdic in self._plottables]):
leg = self._create_legend()
longest_label = 0
for pdic in self._plottables:
if not pdic.get('legend_title', False):
continue
leg.AddEntry(pdic['p'], pdic['legend_title'])
if len(pdic['legend_title']) > longest_label:
longest_label = len(pdic['legend_title'])
# Set the legend position
# vertical:
if self.legend.position.startswith('t'):
leg_hight = leg.y2 - leg.y1
leg.y2 = 1 - pad_plot.GetTopMargin() - ytick_length
leg.y1 = leg.y2 - leg_hight
elif self.legend.position.startswith('b'):
leg_hight = leg.y2 - leg.y1
leg.y1 = pad_plot.GetBottomMargin() + ytick_length
leg.y2 = leg.y1 + leg_hight
# horizontal:
if self.legend.position[1:].startswith('l'):
leg_width = 0.3
leg.x1 = pad_plot.GetLeftMargin() + xtick_length
leg.x2 = leg.x1 + leg_width
elif self.legend.position[1:].startswith('r'):
leg_width = 0.3
leg.x2 = 1 - pad_plot.GetRightMargin() - xtick_length
leg.x1 = leg.x2 - leg_width
if self.legend.position == 'seperate':
with pad_legend:
leg.Draw()
else:
leg.Draw()
if self.plot.logx:
pad_plot.SetLogx(True)
if self.plot.logy:
pad_plot.SetLogy(True)
pad_plot.Update() # needed sometimes with import of canvas. maybe because other "plot" pads exist...
return c
def delete_plottables(self):
"""
Delete all plottables in this figure so that it can be filled with
new ones while keeping the lables.
"""
self._plottables = []
def save_to_root_file(self, in_f, name, path=''):
"""
Save the current figure to the given root file under the given path
Parameters
----------
f : TFile
Root file object open in writable mode
name : str
Name for the canvas in the root file
path : str
The path where the figure should be saved within the root file
Returns
-------
TFile :
The file where the object was written to
"""
f = asrootpy(in_f)
c = self.draw_to_canvas()
c.name = name
try:
f.mkdir(path, recurse=True)
except ValueError:
pass
f.cd(path)
success = c.Write()
if success == 0:
raise ValueError("Could not write to file!")
return f
def save_to_file(self, path, name):
"""
Save the current figure to the given root file under the given path
Parameters
----------
name : string
Name of the file including its extension
path : string
Path excluding the file name, relative files are interpreted relative to the working dir
"""
# check if the name has the right extension
if len(name.split('.')) != 2:
raise ValueError("Filename must be given with extension")
if name.split('.')[1] != 'pdf':
raise NotImplementedError("Only PDF export is implemented at the moment")
# strip of tailing / if any
# this is not compatible with windows, I guess!
path = path.rstrip('/')
try:
os.makedirs(path)
except OSError:
pass
# The order of the following is important! First, set paper
# size, then draw the canvas and then create the pdf Doin
# pdf.Range(10, 10) is not sufficient. it just does random
# sh...
# Be careful to reset the global gStyle when we are finished. Yeah! Globals!
# Ok, Root does not like that either...
# paper_width, paper_height = ROOT.Double(), ROOT.Double()
# ROOT.gStyle.GetPaperSize(paper_width, paper_height)
ROOT.gStyle.SetPaperSize(self.style.canvasWidth / self.style.pt_per_cm,
self.style.canvasHeight / self.style.pt_per_cm,)
c = self.draw_to_canvas()
c.Print("{0}/{1}".format(path, name))
# reset the page size
# ROOT.gStyle.SetPaperSize(paper_width, paper_height)
|
aaniin/AliPhysics
|
PWGMM/MC/aligenqa/aligenqa/roofie/figure.py
|
Python
|
bsd-3-clause
| 20,665
| 0.002516
|
from pyramid.httpexceptions import HTTPMovedPermanently
from pyramid.view import view_config
from zeit.redirect.db import Redirect
import json
@view_config(route_name='redirect', renderer='string')
def check_redirect(request):
redirect = Redirect.query().filter_by(source=request.path).first()
if redirect:
# XXX Should we be protocol-relative (https etc.)?
raise HTTPMovedPermanently(
'http://' + request.headers['Host'] + redirect.target)
else:
return ''
@view_config(route_name='add', renderer='string', request_method='POST')
def add_redirect(request):
body = json.loads(request.body)
Redirect.add(body['source'], body['target'])
return '{}'
|
ZeitOnline/zeit.redirect
|
src/zeit/redirect/redirect.py
|
Python
|
bsd-3-clause
| 710
| 0
|
from staffjoy.resource import Resource
from staffjoy.resources.location import Location
from staffjoy.resources.admin import Admin
from staffjoy.resources.organization_worker import OrganizationWorker
class Organization(Resource):
PATH = "organizations/{organization_id}"
ID_NAME = "organization_id"
def get_locations(self, **kwargs):
return Location.get_all(parent=self, **kwargs)
def get_location(self, id):
return Location.get(parent=self, id=id)
def create_location(self, **kwargs):
return Location.create(parent=self, **kwargs)
def get_admins(self):
return Admin.get_all(parent=self)
def get_admin(self, id):
return Admin.get(parent=self, id=id)
def create_admin(self, **kwargs):
"""Typically just pass email"""
return Admin.create(parent=self, **kwargs)
def get_workers(self, **kwargs):
return OrganizationWorker.get_all(parent=self, **kwargs)
|
Staffjoy/client_python
|
staffjoy/resources/organization.py
|
Python
|
mit
| 959
| 0
|
from setuptools import setup, find_packages
from fccsmap import __version__
test_requirements = []
with open('requirements-test.txt') as f:
test_requirements = [r for r in f.read().splitlines()]
setup(
name='fccsmap',
version=__version__,
author='Joel Dubowy',
license='GPLv3+',
author_email='jdubowy@gmail.com',
packages=find_packages(),
scripts=[
'bin/fccsmap'
],
package_data={
'fccsmap': ['data/*.nc']
},
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)",
"Programming Language :: Python :: 3.8",
"Operating System :: POSIX",
"Operating System :: MacOS"
],
url='https://github.com/pnwairfire/fccsmap/',
description='supports the look-up of FCCS fuelbed information by lat/lng or vector geo spatial data.',
install_requires=[
"afscripting>=2.0.0",
# Note: numpy and gdal must now be installed manually beforehand
"shapely==1.7.1",
"pyproj==3.0.0.post1",
"rasterstats==0.15.0"
],
dependency_links=[
"https://pypi.airfire.org/simple/afscripting/",
],
tests_require=test_requirements
)
|
pnwairfire/fccsmap
|
setup.py
|
Python
|
gpl-3.0
| 1,292
| 0.001548
|
#!/usr/bin/env python
people = 30
cars = 40
trucks = 15
if cars > people:
print("We should take the cars.")
elif cars < people:
print("We should not take the cars")
else:
print("We can't decide.")
if trucks > cars:
print("That's too many trucks.")
elif trucks < cars:
print("Maybe we coudl take the trucks.")
else:
print("We still can't decide.")
if people > trucks:
print("Alright, let's just take the trucks.")
else:
print("Fine, let's stay home then.")
|
davvi/Hardway3
|
ex30.py
|
Python
|
mit
| 492
| 0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import string
from urlparse import urlparse, parse_qs
from mopidy import backend
from mopidy.models import SearchResult, Track, Album, Artist
import pykka
import pafy
import requests
import unicodedata
from mopidy_youtube import logger
yt_api_endpoint = 'https://www.googleapis.com/youtube/v3/'
yt_key = 'AIzaSyAl1Xq9DwdE_KD4AtPaE4EJl3WZe2zCqg4'
def resolve_track(track, stream=False):
logger.debug("Resolving Youtube for track '%s'", track)
if hasattr(track, 'uri'):
return resolve_url(track.comment, stream)
else:
return resolve_url(track.split('.')[-1], stream)
def safe_url(uri):
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
safe_uri = unicodedata.normalize(
'NFKD',
unicode(uri)
).encode('ASCII', 'ignore')
return re.sub(
'\s+',
' ',
''.join(c for c in safe_uri if c in valid_chars)
).strip()
def resolve_url(url, stream=False):
video = pafy.new(url)
if not stream:
uri = 'youtube:video/%s.%s' % (
safe_url(video.title), video.videoid
)
else:
uri = video.getbestaudio()
if not uri: # get video url
uri = video.getbest()
logger.debug('%s - %s %s %s' % (
video.title, uri.bitrate, uri.mediatype, uri.extension))
uri = uri.url
if not uri:
return
if '-' in video.title:
title = video.title.split('-')
track = Track(
name=title[1].strip(),
comment=video.videoid,
length=video.length*1000,
artists=[Artist(name=title[0].strip())],
album=Album(
name='Youtube',
images=[video.bigthumb, video.bigthumbhd]
),
uri=uri
)
else:
track = Track(
name=video.title,
comment=video.videoid,
length=video.length*1000,
album=Album(
name='Youtube',
images=[video.bigthumb, video.bigthumbhd]
),
uri=uri
)
return track
def search_youtube(q):
query = {
'part': 'id',
'maxResults': 15,
'type': 'video',
'q': q,
'key': yt_key
}
pl = requests.get(yt_api_endpoint+'search', params=query)
playlist = []
for yt_id in pl.json().get('items'):
try:
track = resolve_url(yt_id.get('id').get('videoId'))
playlist.append(track)
except Exception as e:
logger.info(e.message)
return playlist
def resolve_playlist(url):
logger.info("Resolving Youtube for playlist '%s'", url)
query = {
'part': 'snippet',
'maxResults': 50,
'playlistId': url,
'fields': 'items/snippet/resourceId',
'key': yt_key
}
pl = requests.get(yt_api_endpoint+'playlistItem', params=query)
playlist = []
for yt_id in pl.json().get('items'):
try:
yt_id = yt_id.get('snippet').get('resourceId').get('videoId')
playlist.append(resolve_url(yt_id))
except Exception as e:
logger.info(e.message)
return playlist
class YoutubeBackend(pykka.ThreadingActor, backend.Backend):
def __init__(self, config, audio):
super(YoutubeBackend, self).__init__()
self.config = config
self.library = YoutubeLibraryProvider(backend=self)
self.playback = YoutubePlaybackProvider(audio=audio, backend=self)
self.uri_schemes = ['youtube', 'yt']
class YoutubeLibraryProvider(backend.LibraryProvider):
def lookup(self, track):
if 'yt:' in track:
track = track.replace('yt:', '')
if 'youtube.com' in track:
url = urlparse(track)
req = parse_qs(url.query)
if 'list' in req:
return resolve_playlist(req.get('list')[0])
else:
return [resolve_url(track)]
else:
return [resolve_url(track)]
def search(self, query=None, uris=None):
if not query:
return
if 'uri' in query:
search_query = ''.join(query['uri'])
url = urlparse(search_query)
if 'youtube.com' in url.netloc:
req = parse_qs(url.query)
if 'list' in req:
return SearchResult(
uri='youtube:search',
tracks=resolve_playlist(req.get('list')[0])
)
else:
logger.info(
"Resolving Youtube for track '%s'", search_query)
return SearchResult(
uri='youtube:search',
tracks=[resolve_url(search_query)]
)
else:
search_query = '|'.join(query.values()[0])
logger.info("Searching Youtube for query '%s'", search_query)
return SearchResult(
uri='youtube:search',
tracks=search_youtube(search_query)
)
class YoutubePlaybackProvider(backend.PlaybackProvider):
def play(self, track):
track = resolve_track(track, True)
return super(YoutubePlaybackProvider, self).play(track)
|
hkariti/mopidy-youtube
|
mopidy_youtube/backend.py
|
Python
|
apache-2.0
| 5,359
| 0.000187
|
import unittest
import pysal
import numpy as np
import random
class Test_Maxp(unittest.TestCase):
def setUp(self):
random.seed(100)
np.random.seed(100)
def test_Maxp(self):
w = pysal.lat2W(10, 10)
z = np.random.random_sample((w.n, 2))
p = np.ones((w.n, 1), float)
floor = 3
solution = pysal.region.Maxp(
w, z, floor, floor_variable=p, initial=100)
self.assertEquals(solution.p, 29)
self.assertEquals(solution.regions[0], [4, 14, 5, 24, 3, 25, 15, 23])
def test_inference(self):
w = pysal.weights.lat2W(5, 5)
z = np.random.random_sample((w.n, 2))
p = np.ones((w.n, 1), float)
floor = 3
solution = pysal.region.Maxp(
w, z, floor, floor_variable=p, initial=100)
solution.inference(nperm=9)
self.assertAlmostEquals(solution.pvalue, 0.20000000000000001, 10)
def test_cinference(self):
w = pysal.weights.lat2W(5, 5)
z = np.random.random_sample((w.n, 2))
p = np.ones((w.n, 1), float)
floor = 3
solution = pysal.region.Maxp(
w, z, floor, floor_variable=p, initial=100)
solution.cinference(nperm=9, maxiter=100)
self.assertAlmostEquals(solution.cpvalue, 0.10000000000000001, 10)
def test_Maxp_LISA(self):
w = pysal.lat2W(10, 10)
z = np.random.random_sample((w.n, 2))
p = np.ones(w.n)
mpl = pysal.region.Maxp_LISA(w, z, p, floor=3, floor_variable=p)
self.assertEquals(mpl.p, 31)
self.assertEquals(mpl.regions[0], [99, 89, 98, 97])
suite = unittest.TestLoader().loadTestsFromTestCase(Test_Maxp)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
|
AlanZatarain/pysal
|
pysal/region/tests/test_maxp.py
|
Python
|
bsd-3-clause
| 1,770
| 0
|
class Node(object):
def __init__(self,pos,parent,costSoFar,distanceToEnd):
self.pos = pos
self.parent = parent
self.costSoFar = costSoFar
self.distanceToEnd = distanceToEnd
self.totalCost = distanceToEnd +costSoFar
|
helloworldC2/VirtualRobot
|
Node.py
|
Python
|
mit
| 260
| 0.019231
|
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView, DeleteView
from catalog.views.base import GenericListView, GenericCreateView
from catalog.models import Astronaut, CrewedMission
from catalog.forms import AstronautForm
from catalog.filters import AstronautFilter
from django.core.urlresolvers import reverse_lazy
from django.core.urlresolvers import reverse
from django.http import Http404
class AstronautListView(GenericListView):
model = Astronaut
f = AstronautFilter
display_data = ('organization', 'nationality', 'birth_date')
class AstronautDetailView(DetailView):
model = Astronaut
template_name = "catalog/astronaut_detail.html"
class AstronautCreateView(GenericCreateView):
model = Astronaut
form_class = AstronautForm
success_url = reverse_lazy("astronaut_list")
def form_valid(self, form):
obj = form.save(commit=False)
obj.creator = self.request.user
obj.save()
return super(AstronautUpdateView, self).form_valid(form)
def get_success_url(self):
return reverse("astronaut_detail", args=(self.object.pk,))
class AstronautUpdateView(UpdateView):
model = Astronaut
form_class = AstronautForm
template_name = "catalog/generic_update.html"
initial = {}
def form_valid(self, form):
obj = form.save(commit=False)
obj.modifier = self.request.user
obj.save()
return super(AstronautUpdateView, self).form_valid(form)
def get_success_url(self):
return reverse("astronaut_detail", args=(self.object.pk,))
class AstronautDeleteView(DeleteView):
model = Astronaut
template_name = "catalog/generic_delete.html"
success_url = reverse_lazy("astronaut_list")
|
Hattivat/hypergolic-django
|
hypergolic/catalog/views/astronaut_views.py
|
Python
|
agpl-3.0
| 1,773
| 0
|
# hello_asyncio.py
import asyncio
import tornado.ioloop
import tornado.web
import tornado.gen
from tornado.httpclient import AsyncHTTPClient
try:
import aioredis
except ImportError:
print("Please install aioredis: pip install aioredis")
exit(0)
class AsyncRequestHandler(tornado.web.RequestHandler):
"""Base class for request handlers with `asyncio` coroutines support.
It runs methods on Tornado's ``AsyncIOMainLoop`` instance.
Subclasses have to implement one of `get_async()`, `post_async()`, etc.
Asynchronous method should be decorated with `@asyncio.coroutine`.
Usage example::
class MyAsyncRequestHandler(AsyncRequestHandler):
@asyncio.coroutine
def get_async(self):
html = yield from self.application.http.get('http://python.org')
self.write({'html': html})
You may also just re-define `get()` or `post()` methods and they will be simply run
synchronously. This may be convinient for draft implementation, i.e. for testing
new libs or concepts.
"""
@tornado.gen.coroutine
def get(self, *args, **kwargs):
"""Handle GET request asyncronously, delegates to
``self.get_async()`` coroutine.
"""
yield self._run_method('get', *args, **kwargs)
@tornado.gen.coroutine
def post(self, *args, **kwargs):
"""Handle POST request asyncronously, delegates to
``self.post_async()`` coroutine.
"""
yield self._run_method('post', *args, **kwargs)
@asyncio.coroutine
def _run_async(self, coroutine, future_, *args, **kwargs):
"""Perform coroutine and set result to ``Future`` object."""
try:
result = yield from coroutine(*args, **kwargs)
future_.set_result(result)
except Exception as e:
future_.set_exception(e)
print(traceback.format_exc())
def _run_method(self, method_, *args, **kwargs):
"""Run ``get_async()`` / ``post_async()`` / etc. coroutine
wrapping result with ``tornado.concurrent.Future`` for
compatibility with ``gen.coroutine``.
"""
coroutine = getattr(self, '%s_async' % method_, None)
if not coroutine:
raise tornado.web.HTTPError(405)
future_ = tornado.concurrent.Future()
asyncio.async(
self._run_async(coroutine, future_, *args, **kwargs)
)
return future_
class MainHandler(AsyncRequestHandler):
@asyncio.coroutine
def get_async(self):
redis = self.application.redis
yield from redis.set('my-key', 'OK')
val = yield from redis.get('my-key')
self.write('Hello asyncio.coroutine: %s' % val)
class Application(tornado.web.Application):
def __init__(self):
# Prepare IOLoop class to run instances on asyncio
tornado.ioloop.IOLoop.configure('tornado.platform.asyncio.AsyncIOMainLoop')
handlers = [
(r"/", MainHandler),
]
super().__init__(handlers, debug=True)
def init_with_loop(self, loop):
self.redis = loop.run_until_complete(
aioredis.create_redis(('localhost', 6379), loop=loop)
)
if __name__ == "__main__":
print("Run hello_asyncio ... http://127.0.0.1:8888")
application = Application()
application.listen(8888)
loop = asyncio.get_event_loop()
application.init_with_loop(loop)
loop.run_forever()
|
rudyryk/python-samples
|
hello_tornado/hello_asyncio.py
|
Python
|
cc0-1.0
| 3,463
| 0.001733
|
# -*- coding: utf-8 -*-
#
# Python documentation build configuration file
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
import sys, os, time
sys.path.append(os.path.abspath('tools/sphinxext'))
# General configuration
# ---------------------
extensions = ['sphinx.ext.refcounting', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'pyspecific']
templates_path = ['tools/sphinxext']
# General substitutions.
project = 'Python'
copyright = '1990-%s, Python Software Foundation' % time.strftime('%Y')
# The default replacements for |version| and |release|.
#
# The short X.Y version.
# version = '2.6'
# The full version, including alpha/beta/rc tags.
# release = '2.6a0'
# We look for the Include/patchlevel.h file in the current Python source tree
# and replace the values accordingly.
import patchlevel
version, release = patchlevel.get_version_info()
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of files that shouldn't be included in the build.
unused_docs = [
'maclib/scrap',
'library/xmllib',
'library/xml.etree',
]
# Ignore .rst in Sphinx its self.
exclude_trees = ['tools/sphinx']
# Relative filename of the reference count data file.
refcount_file = 'data/refcounts.dat'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# Options for HTML output
# -----------------------
html_theme = 'default'
html_theme_options = {'collapsiblesidebar': True}
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, filenames relative to this file.
html_sidebars = {
'index': 'indexsidebar.html',
}
# Additional templates that should be rendered to pages.
html_additional_pages = {
'download': 'download.html',
'index': 'indexcontent.html',
}
# Output an OpenSearch description file.
html_use_opensearch = 'http://docs.python.org/'
# Additional static files.
html_static_path = ['tools/sphinxext/static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'python' + release.replace('.', '')
# Split the index
html_split_index = True
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = r'Guido van Rossum\\Fred L. Drake, Jr., editor'
latex_documents = [
('c-api/index', 'c-api.tex',
'The Python/C API', _stdauthor, 'manual'),
('distutils/index', 'distutils.tex',
'Distributing Python Modules', _stdauthor, 'manual'),
('extending/index', 'extending.tex',
'Extending and Embedding Python', _stdauthor, 'manual'),
('install/index', 'install.tex',
'Installing Python Modules', _stdauthor, 'manual'),
('library/index', 'library.tex',
'The Python Library Reference', _stdauthor, 'manual'),
('reference/index', 'reference.tex',
'The Python Language Reference', _stdauthor, 'manual'),
('tutorial/index', 'tutorial.tex',
'Python Tutorial', _stdauthor, 'manual'),
('using/index', 'using.tex',
'Python Setup and Usage', _stdauthor, 'manual'),
('faq/index', 'faq.tex',
'Python Frequently Asked Questions', _stdauthor, 'manual'),
('whatsnew/' + version, 'whatsnew.tex',
'What\'s New in Python', 'A. M. Kuchling', 'howto'),
]
# Collect all HOWTOs individually
latex_documents.extend(('howto/' + fn[:-4], 'howto-' + fn[:-4] + '.tex',
'', _stdauthor, 'howto')
for fn in os.listdir('howto')
if fn.endswith('.rst') and fn != 'index.rst')
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\authoraddress{
\strong{Python Software Foundation}\\
Email: \email{docs@python.org}
}
\let\Verbatim=\OriginalVerbatim
\let\endVerbatim=\endOriginalVerbatim
'''
# Documents to append as an appendix to all manuals.
latex_appendices = ['glossary', 'about', 'license', 'copyright']
# Get LaTeX to handle Unicode correctly
latex_elements = {'inputenc': r'\usepackage[utf8x]{inputenc}', 'utf8extra': ''}
# Options for the coverage checker
# --------------------------------
# The coverage checker will ignore all modules/functions/classes whose names
# match any of the following regexes (using re.match).
coverage_ignore_modules = [
r'[T|t][k|K]',
r'Tix',
r'distutils.*',
]
coverage_ignore_functions = [
'test($|_)',
]
coverage_ignore_classes = [
]
# Glob patterns for C source files for C API coverage, relative to this directory.
coverage_c_path = [
'../Include/*.h',
]
# Regexes to find C items in the source files.
coverage_c_regexes = {
'cfunction': (r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'),
'data': (r'^PyAPI_DATA\(.*\)\s+([^_][\w_]+)'),
'macro': (r'^#define ([^_][\w_]+)\(.*\)[\s|\\]'),
}
# The coverage checker will ignore all C items whose names match these regexes
# (using re.match) -- the keys must be the same as in coverage_c_regexes.
coverage_ignore_c_items = {
# 'cfunction': [...]
}
|
teeple/pns_server
|
work/install/Python-2.7.4/Doc/conf.py
|
Python
|
gpl-2.0
| 5,857
| 0.001195
|
import zlib, base64, sys
MAX_DEPTH = 50
if __name__ == "__main__":
try:
hashfile = open("hashfile", "r")
except:
print("ERROR: While opening hash file!")
sys.exit(-1)
line_number = 0
depths = [0 for _ in range(MAX_DEPTH)]
for line in hashfile.readlines():
line_number += 1
l = line.strip().split()
if len(l) < 7:
print(
"Bad entry on line " + str(line_number) + " (ignored): " + line.strip()
)
continue
hash = l[0]
depth = int(l[1])
score = int(l[2])
fen = " ".join(l[3:])
depths[depth] += 1
hashfile.close()
print("-- Depths --")
for i in range(MAX_DEPTH):
if not depths[i]:
continue
print("{:2d}: {:8d}".format(i, depths[i]))
print("------------")
|
fredericojordan/fast-chess
|
scripts/hashfileStats.py
|
Python
|
mit
| 900
| 0.004444
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-21 04:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_auto_20170621_1224'),
]
operations = [
migrations.AddField(
model_name='category',
name='slug',
field=models.SlugField(default=''),
),
migrations.AddField(
model_name='tag',
name='slug',
field=models.SlugField(default=''),
),
]
|
r26zhao/django_blog
|
blog/migrations/0012_auto_20170621_1250.py
|
Python
|
mit
| 586
| 0
|
from django.conf.urls import patterns, url
from django.core.urlresolvers import reverse
from django.http import Http404
from modeltree.tree import MODELTREE_DEFAULT_ALIAS, trees
from restlib2.params import Parametizer, IntParam, StrParam
from avocado.export import BaseExporter, registry as exporters
from avocado.query import pipeline, utils
from serrano.resources import API_VERSION
from serrano.resources.base import BaseResource
from serrano.resources.processors import EXPORTER_RESULT_PROCESSOR_NAME, \
process_results
# Single list of all registered exporters
EXPORT_TYPES = zip(*exporters.choices)[0]
class ExporterRootResource(BaseResource):
def get_links(self, request):
uri = request.build_absolute_uri
links = {
'self': uri(reverse('serrano:data:exporter')),
}
for export_type in EXPORT_TYPES:
links[export_type] = {
'link': uri(reverse(
'serrano:data:exporter',
kwargs={'export_type': export_type}
)),
'data': {
'title': exporters.get(export_type).short_name,
'description': exporters.get(export_type).long_name,
}
}
return links
def get(self, request):
resp = {
'title': 'Serrano Exporter Endpoints',
'version': API_VERSION
}
return resp
class ExporterParametizer(Parametizer):
limit = IntParam(50)
processor = StrParam('default', choices=pipeline.query_processors)
reader = StrParam('cached', choices=BaseExporter.readers)
tree = StrParam(MODELTREE_DEFAULT_ALIAS, choices=trees)
class ExporterResource(BaseResource):
cache_max_age = 0
private_cache = True
parametizer = ExporterParametizer
QUERY_NAME_TEMPLATE = '{session_key}:{export_type}'
def _get_query_name(self, request, export_type):
return self.QUERY_NAME_TEMPLATE.format(
session_key=request.session.session_key,
export_type=export_type)
# Resource is dependent on the available export types
def is_not_found(self, request, response, export_type, **kwargs):
return export_type not in EXPORT_TYPES
def get(self, request, export_type, **kwargs):
view = self.get_view(request)
context = self.get_context(request)
params = self.get_params(request)
# Configure the query options used for retrieving the results.
query_options = {
'export_type': export_type,
'query_name': self._get_query_name(request, export_type),
}
query_options.update(**kwargs)
query_options.update(params)
try:
row_data = utils.get_result_rows(context, view, query_options,
request=request)
except ValueError:
raise Http404
return process_results(
request, EXPORTER_RESULT_PROCESSOR_NAME, row_data)
post = get
def delete(self, request, export_type, **kwargs):
query_name = self._get_query_name(request, export_type)
canceled = utils.cancel_query(query_name)
return self.render(request, {'canceled': canceled})
exporter_resource = ExporterResource()
exporter_root_resource = ExporterRootResource()
# Resource endpoints
urlpatterns = patterns(
'',
url(r'^$', exporter_root_resource, name='exporter'),
url(r'^(?P<export_type>\w+)/$', exporter_resource, name='exporter'),
url(r'^(?P<export_type>\w+)/(?P<page>\d+)/$', exporter_resource,
name='exporter'),
url(r'^(?P<export_type>\w+)/(?P<page>\d+)\.\.\.(?P<stop_page>\d+)/$',
exporter_resource, name='exporter'),
)
|
chop-dbhi/serrano
|
serrano/resources/exporter.py
|
Python
|
bsd-2-clause
| 3,758
| 0
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Container v1 action implementations"""
import logging
import six
from cliff import command
from cliff import lister
from cliff import show
from openstackclient.common import parseractions
from openstackclient.common import utils
class CreateContainer(lister.Lister):
"""Create new container"""
log = logging.getLogger(__name__ + '.CreateContainer')
def get_parser(self, prog_name):
parser = super(CreateContainer, self).get_parser(prog_name)
parser.add_argument(
'containers',
metavar='<container-name>',
nargs="+",
help='New container name(s)',
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
results = []
for container in parsed_args.containers:
data = self.app.client_manager.object_store.container_create(
container=container,
)
results.append(data)
columns = ("account", "container", "x-trans-id")
return (columns,
(utils.get_dict_properties(
s, columns,
formatters={},
) for s in results))
class DeleteContainer(command.Command):
"""Delete container"""
log = logging.getLogger(__name__ + '.DeleteContainer')
def get_parser(self, prog_name):
parser = super(DeleteContainer, self).get_parser(prog_name)
parser.add_argument(
'containers',
metavar='<container>',
nargs="+",
help='Container(s) to delete',
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
for container in parsed_args.containers:
self.app.client_manager.object_store.container_delete(
container=container,
)
class ListContainer(lister.Lister):
"""List containers"""
log = logging.getLogger(__name__ + '.ListContainer')
def get_parser(self, prog_name):
parser = super(ListContainer, self).get_parser(prog_name)
parser.add_argument(
"--prefix",
metavar="<prefix>",
help="Filter list using <prefix>",
)
parser.add_argument(
"--marker",
metavar="<marker>",
help="Anchor for paging",
)
parser.add_argument(
"--end-marker",
metavar="<end-marker>",
help="End anchor for paging",
)
parser.add_argument(
"--limit",
metavar="<limit>",
type=int,
help="Limit the number of containers returned",
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='List additional fields in output',
)
parser.add_argument(
'--all',
action='store_true',
default=False,
help='List all containers (default is 10000)',
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
if parsed_args.long:
columns = ('Name', 'Bytes', 'Count')
else:
columns = ('Name',)
kwargs = {}
if parsed_args.prefix:
kwargs['prefix'] = parsed_args.prefix
if parsed_args.marker:
kwargs['marker'] = parsed_args.marker
if parsed_args.end_marker:
kwargs['end_marker'] = parsed_args.end_marker
if parsed_args.limit:
kwargs['limit'] = parsed_args.limit
if parsed_args.all:
kwargs['full_listing'] = True
data = self.app.client_manager.object_store.container_list(
**kwargs
)
return (columns,
(utils.get_dict_properties(
s, columns,
formatters={},
) for s in data))
class SaveContainer(command.Command):
"""Save container contents locally"""
log = logging.getLogger(__name__ + ".SaveContainer")
def get_parser(self, prog_name):
parser = super(SaveContainer, self).get_parser(prog_name)
parser.add_argument(
'container',
metavar='<container>',
help='Container to save',
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)", parsed_args)
self.app.client_manager.object_store.container_save(
container=parsed_args.container,
)
class SetContainer(command.Command):
"""Set container properties"""
log = logging.getLogger(__name__ + '.SetContainer')
def get_parser(self, prog_name):
parser = super(SetContainer, self).get_parser(prog_name)
parser.add_argument(
'container',
metavar='<container>',
help='Container to modify',
)
parser.add_argument(
"--property",
metavar="<key=value>",
required=True,
action=parseractions.KeyValueAction,
help="Set a property on this container "
"(repeat option to set multiple properties)"
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
self.app.client_manager.object_store.container_set(
parsed_args.container,
properties=parsed_args.property,
)
class ShowContainer(show.ShowOne):
"""Display container details"""
log = logging.getLogger(__name__ + '.ShowContainer')
def get_parser(self, prog_name):
parser = super(ShowContainer, self).get_parser(prog_name)
parser.add_argument(
'container',
metavar='<container>',
help='Container to display',
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
data = self.app.client_manager.object_store.container_show(
container=parsed_args.container,
)
if 'properties' in data:
data['properties'] = utils.format_dict(data.pop('properties'))
return zip(*sorted(six.iteritems(data)))
class UnsetContainer(command.Command):
"""Unset container properties"""
log = logging.getLogger(__name__ + '.UnsetContainer')
def get_parser(self, prog_name):
parser = super(UnsetContainer, self).get_parser(prog_name)
parser.add_argument(
'container',
metavar='<container>',
help='Container to modify',
)
parser.add_argument(
'--property',
metavar='<key>',
required=True,
action='append',
default=[],
help='Property to remove from container '
'(repeat option to remove multiple properties)',
)
return parser
@utils.log_method(log)
def take_action(self, parsed_args):
self.app.client_manager.object_store.container_unset(
parsed_args.container,
properties=parsed_args.property,
)
|
BjoernT/python-openstackclient
|
openstackclient/object/v1/container.py
|
Python
|
apache-2.0
| 7,719
| 0
|
# coding: utf-8
#
# Copyright 2015 Palantir Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import json
import pytest
import six
import werkzeug.debug
from webtest import TestApp
from werkzeug.exceptions import HTTPException
import typedjsonrpc.errors
from typedjsonrpc.registry import Registry
from typedjsonrpc.server import DebuggedJsonRpcApplication, Response, Server, current_request
if six.PY3:
import unittest.mock as mock
else:
import mock
class TestDebuggedJsonRpcApplication(object):
@staticmethod
def get_app():
registry = Registry()
server = Server(registry)
debugged_app = DebuggedJsonRpcApplication(server)
return registry, server, debugged_app
def test_handle_debug_no_such_traceback(self):
registry, server, debugged_app = TestDebuggedJsonRpcApplication.get_app()
with pytest.raises(HTTPException) as excinfo:
debugged_app.handle_debug(None, None, -1)
assert excinfo.value.code == 404
def test_handle_debug_response_called(self):
registry, server, debugged_app = TestDebuggedJsonRpcApplication.get_app()
mock_traceback = mock.Mock()
mock_traceback.render_full = mock.Mock(return_value="")
mock_traceback.frames = mock.NonCallableMagicMock()
registry.tracebacks[1234] = mock_traceback
start_response = mock.Mock()
environ = {
"SERVER_NAME": "localhost",
"SERVER_PORT": "5060",
"PATH_INFO": "/api",
"REQUEST_METHOD": "POST",
"wsgi.url_scheme": "http",
}
debugged_app.handle_debug(environ, start_response, 1234)
@mock.patch("typedjsonrpc.server.DebuggedJsonRpcApplication.handle_debug",
mock.Mock(return_value=["foo"]))
def test_debug_application_debug_endpoint(self):
registry, server, debugged_app = TestDebuggedJsonRpcApplication.get_app()
environ = {
"SERVER_NAME": "localhost",
"SERVER_PORT": "5060",
"PATH_INFO": "/debug/1234",
"REQUEST_METHOD": "POST",
"wsgi.url_scheme": "http",
}
start_response = mock.Mock()
assert ["foo"] == debugged_app.debug_application(environ, start_response)
assert DebuggedJsonRpcApplication.handle_debug.called
@mock.patch("werkzeug.debug.DebuggedApplication.debug_application",
mock.Mock(return_value=["foo"]))
def test_debug_application_normal_endpoint(self):
registry, server, debugged_app = TestDebuggedJsonRpcApplication.get_app()
environ = {
"SERVER_NAME": "localhost",
"SERVER_PORT": "5060",
"PATH_INFO": "/api",
"REQUEST_METHOD": "POST",
"wsgi.url_scheme": "http",
}
start_response = mock.NonCallableMock()
result = debugged_app.debug_application(environ, start_response)
assert result == ["foo"]
assert werkzeug.debug.DebuggedApplication.debug_application.called
class TestServer(object):
@staticmethod
def _create_mock_registry():
mock_registry = mock.Mock()
mock_registry.json_encoder = json.JSONEncoder()
mock_registry.json_decoder = json.JSONDecoder()
mock_registry.dispatch.return_value = json.dumps({
"jsonrpc": "2.0",
"id": "foo",
"result": "bar"
})
return mock_registry
def test_wsgi_app_invalid_endpoint(self):
environ = {
"SERVER_NAME": "localhost",
"SERVER_PORT": "5060",
"PATH_INFO": "/bogus",
"REQUEST_METHOD": "POST",
"wsgi.url_scheme": "http",
}
mock_registry = self._create_mock_registry()
server = Server(mock_registry, "/foo")
with pytest.raises(HTTPException) as excinfo:
server(environ, None)
assert excinfo.value.code == 404
def test_wsgi_app_dispatch(self):
environ = {
"SERVER_NAME": "localhost",
"SERVER_PORT": "5060",
"PATH_INFO": "/foo",
"REQUEST_METHOD": "POST",
"wsgi.url_scheme": "http",
}
mock_registry = self._create_mock_registry()
server = Server(mock_registry, "/foo")
mock_start_response = mock.Mock()
server(environ, mock_start_response)
mock_registry.dispatch.assert_called_once_with(mock.ANY)
def test_before_first_request_funcs(self):
environ = {
"SERVER_NAME": "localhost",
"SERVER_PORT": "5060",
"PATH_INFO": "/foo",
"REQUEST_METHOD": "POST",
"wsgi.url_scheme": "http",
}
mock_registry = self._create_mock_registry()
mock_start = mock.Mock()
mock_start.return_value(None)
server = Server(mock_registry, "/foo")
server.register_before_first_request(mock_start)
mock_start_response = mock.Mock()
server(environ, mock_start_response)
server(environ, mock_start_response)
mock_start.assert_called_once_with()
def test_http_status_code_empty_response(self):
mock_registry = self._create_mock_registry()
mock_registry.dispatch.return_value = None
server = Server(mock_registry, "/foo")
app = TestApp(server)
app.post("/foo", status=204)
def test_http_status_code_success_response(self):
mock_registry = self._create_mock_registry()
server = Server(mock_registry, "/foo")
app = TestApp(server)
app.post("/foo", status=200)
def test_http_status_code_batched_response_half_success(self):
mock_registry = self._create_mock_registry()
server = Server(mock_registry, "/foo")
mock_registry.dispatch.return_value = json.dumps([
{
"jsonrpc": "2.0",
"id": "foo",
"result": "bar"
}, {
"jsonrpc": "2.0",
"id": "bar",
"error": typedjsonrpc.errors.MethodNotFoundError().as_error_object()
}
])
app = TestApp(server)
app.post("/foo", status=200)
def test_http_status_code_batched_response_all_failed(self):
mock_registry = self._create_mock_registry()
server = Server(mock_registry, "/foo")
mock_registry.dispatch.return_value = json.dumps([
{
"jsonrpc": "2.0",
"id": "foo",
"error": typedjsonrpc.errors.MethodNotFoundError().as_error_object()
}, {
"jsonrpc": "2.0",
"id": "bar",
"error": typedjsonrpc.errors.MethodNotFoundError().as_error_object()
}
])
app = TestApp(server)
app.post("/foo", status=200)
def test_http_status_code_method_not_found(self):
mock_registry = self._create_mock_registry()
server = Server(mock_registry, "/foo")
mock_registry.dispatch.return_value = json.dumps({
"jsonrpc": "2.0",
"id": "foo",
"error": typedjsonrpc.errors.MethodNotFoundError().as_error_object()
})
app = TestApp(server)
app.post("/foo", status=404)
def test_http_status_code_parse_error(self):
mock_registry = self._create_mock_registry()
server = Server(mock_registry, "/foo")
mock_registry.dispatch.return_value = json.dumps({
"jsonrpc": "2.0",
"id": "foo",
"error": typedjsonrpc.errors.ParseError().as_error_object()
})
app = TestApp(server)
app.post("/foo", status=400)
def test_http_status_code_invalid_request_error(self):
mock_registry = self._create_mock_registry()
server = Server(mock_registry, "/foo")
mock_registry.dispatch.return_value = json.dumps({
"jsonrpc": "2.0",
"id": "foo",
"error": typedjsonrpc.errors.InvalidRequestError().as_error_object()
})
app = TestApp(server)
app.post("/foo", status=400)
def test_http_status_code_other_errors(self):
other_error_types = [
typedjsonrpc.errors.InvalidReturnTypeError,
typedjsonrpc.errors.InvalidParamsError,
typedjsonrpc.errors.ServerError,
typedjsonrpc.errors.InternalError,
typedjsonrpc.errors.Error,
]
mock_registry = self._create_mock_registry()
server = Server(mock_registry, "/foo")
for error_type in other_error_types:
mock_registry.dispatch.return_value = json.dumps({
"jsonrpc": "2.0",
"id": "foo",
"error": error_type().as_error_object()
})
app = TestApp(server)
app.post("/foo", status=500)
class TestCurrentRequest(object):
def test_current_request_set(self):
registry = Registry()
server = Server(registry)
def fake_dispatch_request(request):
assert current_request == request
return Response()
server._dispatch_request = fake_dispatch_request
environ = {
"SERVER_NAME": "localhost",
"SERVER_PORT": "5060",
"PATH_INFO": "/foo",
"REQUEST_METHOD": "POST",
"wsgi.url_scheme": "http",
}
mock_start_response = mock.Mock()
server(environ, mock_start_response)
def test_current_request_passed_to_registry(self):
registry = Registry()
server = Server(registry)
def fake_dispatch(request):
assert current_request == request
return json.dumps({
"jsonrpc": "2.0",
"id": "foo",
"result": "bar"
})
registry.dispatch = fake_dispatch
environ = {
"SERVER_NAME": "localhost",
"SERVER_PORT": "5060",
"PATH_INFO": "/api",
"REQUEST_METHOD": "POST",
"wsgi.url_scheme": "http",
}
mock_start_response = mock.Mock()
server(environ, mock_start_response)
|
palantir/typedjsonrpc
|
tests/test_server.py
|
Python
|
apache-2.0
| 10,750
| 0.001023
|
#!/usr/bin/env python
import unittest
from pycoin.ecdsa import generator_secp256k1, sign, verify, public_pair_for_secret_exponent
class ECDSATestCase(unittest.TestCase):
def test_sign_verify(self):
def do_test(secret_exponent, val_list):
public_point = public_pair_for_secret_exponent(generator_secp256k1, secret_exponent)
for v in val_list:
signature = sign(generator_secp256k1, secret_exponent, v)
r = verify(generator_secp256k1, public_point, v, signature)
# Check that the 's' value is 'low', to prevent possible transaction malleability as per
# https://github.com/bitcoin/bips/blob/master/bip-0062.mediawiki#low-s-values-in-signatures
assert signature[1] <= 0x7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0
assert r == True
signature = signature[0],signature[1]+1
r = verify(generator_secp256k1, public_point, v, signature)
assert r == False
val_list = [100,20000,30000000,400000000000,50000000000000000,60000000000000000000000]
do_test(0x1111111111111111111111111111111111111111111111111111111111111111, val_list)
do_test(0xdddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddd, val_list)
do_test(0x47f7616ea6f9b923076625b4488115de1ef1187f760e65f89eb6f4f7ff04b012, val_list)
if __name__ == '__main__':
unittest.main()
|
greenaddress/pycoin
|
pycoin/test/ecdsa_test.py
|
Python
|
mit
| 1,478
| 0.012855
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------
# File Name: utils.py
# Author: Zhao Yanbai
# Thu Oct 30 06:33:24 2014
# Description: none
# ------------------------------------------------------------------------
import logging
import struct
import socket
import web
import MySQLdb
import commands
import json
import time
from mail import SendMail
from weixin import SendWeiXinMsg
def init_logging(path) :
logging.basicConfig(filename=path, level = logging.INFO, format ='%(levelname)s\t%(asctime)s: %(message)s')
def SendMsg(title, msg) :
if not SendWeiXinMsg(msg) :
SendMail(title, msg)
class Storage(dict) :
def __getattr__(self, key) :
try :
return self[key]
except KeyError, k:
raise AttributeError, k
def __setattr__(self, key, value) :
self[key] = value
def __delattr__(self, key) :
try :
del self[key]
except KeyError, k:
raise AttributeError, k
def __repr__(self) :
return '<Storage ' + dict.__repr__(self) + '>'
def ip2int(ip) :
return struct.unpack("!I", socket.inet_aton(ip))[0]
def int2ip(i) :
print i
return str(socket.inet_ntoa(struct.pack("!I", int(i))))
def INET_ATON(ipstr) :
ip = ip2int(ipstr)
return str(ip)
def INET_NTOA(ip) :
ipstr = int2ip(int(ip) & 0xFFFFFFFF)
return ipstr
def CheckIP(s) :
try :
return len([i for i in s.split('.') if (0<= int(i)<= 255)])== 4
except :
return False
def CheckPort(port) :
return port.isdigit() and int(port) > 0 and int(port) < 65536
def CheckLogic(logic) :
if not logic.isdigit() :
return False
logic = int(logic)
return (logic == 0 or logic == 1 or logic == 2)
class PageBase(object):
def __init__(self) :
self.ActionMap = { }
self.action = ''
self.SetActionHandler('New', self.New)
self.SetActionHandler('Add', self.Add)
self.SetActionHandler('Del', self.Del)
self.SetActionHandler('Edit', self.Edit)
self.SetActionHandler('List', self.List)
self.SetActionHandler('Search', self.Search)
self.SetActionHandler('UNIMPLEMENTED', self.UNIMPLEMENTED)
self.Ret = {
'Err' : -1,
'Msg' : 'Unknown'
}
def ErrMsg(self, msg) :
self.Ret['Err'] = 1
self.Ret['Msg'] = msg
return json.dumps(self.Ret, ensure_ascii=False)
return self.Ret
def SucMsg(self, msg) :
self.Ret['Err'] = 0
self.Ret['Msg'] = msg
return json.dumps(self.Ret, ensure_ascii=False)
return self.Ret
def SucJsonData(self, data) :
self.Ret['Err'] = 0
self.Ret['Msg'] = 'success'
self.Ret['Data'] = data
r = json.dumps(self.Ret, ensure_ascii=False)
return r
def AuthorizedUser(self) :
return True
def UNIMPLEMENTED(self) :
if len(self.action) == 0 :
return "UNIMPLEMENTED"
return "UNIMPLEMENTED HANDLER FOR THE ACTION: {0}".format(self.action)
def REQUEST_HANDLER(self) :
self.action = web.input().get('action', '').strip()
return self.ActionMap.get(self.action, self.List)()
def GET(self) :
if not self.AuthorizedUser() :
return "UNAUTHORIZED USER"
return self.REQUEST_HANDLER()
def POST(self) :
if not self.AuthorizedUser() :
return "UNAUTHORIZED USER"
return self.REQUEST_HANDLER()
def SetActionHandler(self, action, handler) :
self.ActionMap[action] = handler
def New(self) :
return "YOU MUST IMPLEMENTED HANDLER FOR THE ACTION: {0}".format(self.action)
def Add(self) :
return "YOU MUST IMPLEMENTED HANDLER FOR THE ACTION: {0}".format(self.action)
def Del(self) :
return "YOU MUST IMPLEMENTED HANDLER FOR THE ACTION: {0}".format(self.action)
def Edit(self) :
return "YOU MUST IMPLEMENTED HANDLER FOR THE ACTION: {0}".format(self.action)
def List(self) :
return "YOU MUST IMPLEMENTED HANDLER FOR THE ACTION: {0}".format(self.action)
def Update(self) :
return "YOU MUST IMPLEMENTED HANDLER FOR THE ACTION: {0}".format(self.action)
def Search(self) :
return "YOU MUST IMPLEMENTED HANDLER FOR THE ACTION: {0}".format(self.action)
class DBBase(object):
def __init__(self, db) :
self.db = db
self.ret = {
"Err" : 0,
"Msg" : "No Error",
}
def SetSuccMsg(self, msg) :
self.ret["Err"] = 0
self.ret["Msg"] = msg
def SetFailMsg(self, msg) :
self.ret["Err"] = 1
self.ret["Msg"] = msg
def IsFail(self) :
return self.ret['Err'] == 1
def Fail(self, msg='UnSetErrReason') :
self.ret['Err'] = 1
self.ret['Msg'] = msg
return self.ret
def Ret(self) :
return self.ret
def GetRetMsg(self) :
return self.ret['Msg']
def Result(self, url='') :
if self.IsFail() :
return self.GetRetMsg()
#return config.render.ErrMsg(msg=self.GetRetMsg())
else :
#return config.render.Msg(msg=self.GetRetMsg(), url = url)
web.seeother(url)
def Read(self, sql, sidx="", sord="") :
if sidx != "" :
sord = sord.upper()
if sord != "ASC" and sord != "DESC" :
sord = "ASC"
sql = sql + " ORDER BY " + sidx + " " + sord
try :
#print sql
records = list(self.db.query(sql))
except MySQLdb.ProgrammingError :
records = []
return records
def Modify(self, sql) :
sqls = sql.split(';')
for sql in sqls :
if len(sql) < 5 :
break
#self.db.query(sql)
#return
try :
#print sql
self.db.query(sql)
self.SetSuccMsg(u"操作完成")
except MySQLdb.ProgrammingError :
self.SetFailMsg("MySQL Programming Error")
except MySQLdb.IntegrityError :
self.SetFailMsg("Duplicate Record")
except :
self.SetFailMsg("Unknown Error")
if self.IsFail() :
break
return self.ret
def GetSvrOutputLines(cmd) :
lines = []
o = commands.getoutput(cmd)
#print o
for line in o.splitlines() :
if len(line) == 0 :
break
if line[0] != '>' :
continue
line = line[1:]
line = line.strip()
lines.append(line)
return lines
def Ts2TmStr(ts=int(time.time())) :
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts))
|
acevest/monitor
|
utils.py
|
Python
|
gpl-2.0
| 6,848
| 0.017251
|
from __future__ import print_function
from SimpleCV import *
print("")
print("This program runs a list of test for machine learning on")
print("the SimpleCV library. Not all scores will be high, this")
print("is just to ensure that the libraries are functioning correctly")
print("on your system")
print("")
print("***** WARNING *****")
print("This program is about to download a large data set to run it's test")
inp = raw_input("Do you want to continue [Y/n]")
if not (inp == "" or inp.lower() == "y"):
print("Exiting the program")
sys.exit()
machine_learning_data_set = "https://github.com/downloads/sightmachine/SimpleCV/machine_learning_dataset.zip"
data_path = download_and_extract(machine_learning_data_set)
w = 800
h = 600
n=50
display = Display(resolution = (w,h))
hue = HueHistogramFeatureExtractor(mNBins=16)
edge = EdgeHistogramFeatureExtractor()
bof = BOFFeatureExtractor()
bof.load('../Features/cbdata.txt')
haar = HaarLikeFeatureExtractor(fname="../Features/haar.txt")
morph = MorphologyFeatureExtractor()
spath = data_path + "/data/structured/"
upath = data_path + "/data/unstructured/"
ball_path = spath+"ball/"
basket_path = spath+"basket/"
boat_path = spath+"boat/"
cactus_path = spath +"cactus/"
cup_path = spath+"cup/"
duck_path = spath+"duck/"
gb_path = spath+"greenblock/"
match_path = spath+"matches/"
rb_path = spath+"redblock/"
s1_path = spath+"stuffed/"
s2_path = spath+"stuffed2/"
s3_path = spath+"stuffed3/"
arbor_path = upath+"arborgreens/"
football_path = upath+"football/"
sanjuan_path = upath+"sanjuans/"
print('SVMPoly')
#Set up am SVM with a poly kernel
extractors = [hue]
path = [cactus_path,cup_path,basket_path]
classes = ['cactus','cup','basket']
props ={
'KernelType':'Poly', #default is a RBF Kernel
'SVMType':'C', #default is C
'nu':None, # NU for SVM NU
'c':None, #C for SVM C - the slack variable
'degree':3, #degree for poly kernels - defaults to 3
'coef':None, #coef for Poly/Sigmoid defaults to 0
'gamma':None, #kernel param for poly/rbf/sigma - default is 1/#samples
}
print('Train')
classifierSVMP = SVMClassifier(extractors,props)
data = []
for p in path:
data.append(ImageSet(p))
classifierSVMP.train(data,classes,disp=display,subset=n) #train
print('Test')
[pos,neg,confuse] = classifierSVMP.test(data,classes,disp=display,subset=n)
files = []
for ext in IMAGE_FORMATS:
files.extend(glob.glob( os.path.join(path[0], ext)))
for i in range(10):
img = Image(files[i])
cname = classifierSVMP.classify(img)
print(files[i]+' -> '+cname)
classifierSVMP.save('PolySVM.pkl')
print('Reloading from file')
testSVM = SVMClassifier.load('PolySVM.pkl')
#testSVM.setFeatureExtractors(extractors)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = testSVM.classify(img)
print(files[i]+' -> '+cname)
print('###############################################################################')
print('SVMRBF ')
# now try an RBF kernel
extractors = [hue,edge]
path = [cactus_path,cup_path,basket_path]
classes = ['cactus','cup','basket']
props ={
'KernelType':'RBF', #default is a RBF Kernel
'SVMType':'NU', #default is C
'nu':None, # NU for SVM NU
'c':None, #C for SVM C - the slack variable
'degree':None, #degree for poly kernels - defaults to 3
'coef':None, #coef for Poly/Sigmoid defaults to 0
'gamma':None, #kernel param for poly/rbf/sigma
}
print('Train')
classifierSVMRBF = SVMClassifier(extractors,props)
data = []
for p in path:
data.append(ImageSet(p))
classifierSVMRBF.train(data,classes,disp=display,subset=n) #train
print('Test')
[pos,neg,confuse] = classifierSVMRBF.test(data,classes,disp=display,subset=n)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = classifierSVMRBF.classify(img)
print(files[i]+' -> '+cname)
classifierSVMRBF.save('RBFSVM.pkl')
print('Reloading from file')
testSVMRBF = SVMClassifier.load('RBFSVM.pkl')
#testSVMRBF.setFeatureExtractors(extractors)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = testSVMRBF.classify(img)
print(files[i]+' -> '+cname)
print('###############################################################################')
print('Bayes')
extractors = [haar]
classifierBayes = NaiveBayesClassifier(extractors)#
print('Train')
path = [arbor_path,football_path,sanjuan_path]
classes = ['arbor','football','sanjuan']
classifierBayes.train(path,classes,disp=display,subset=n) #train
print('Test')
[pos,neg,confuse] = classifierBayes.test(path,classes,disp=display,subset=n)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = classifierBayes.classify(img)
print(files[i]+' -> '+cname)
classifierBayes.save('Bayes.pkl')
print('Reloading from file')
testBayes = NaiveBayesClassifier.load('Bayes.pkl')
testBayes.setFeatureExtractors(extractors)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = testBayes.classify(img)
print(files[i]+' -> '+cname)
print('###############################################################################')
print('###############################################################################')
print('Forest')
extractors = [morph]
classifierForest = TreeClassifier(extractors,flavor='Forest')#
print('Train')
path = [s1_path,s2_path,s3_path]
classes = ['s1','s2','s3']
classifierForest.train(path,classes,disp=display,subset=n) #train
print('Test')
[pos,neg,confuse] = classifierForest.test(path,classes,disp=display,subset=n)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = classifierForest.classify(img)
print(files[i]+' -> '+cname)
classifierForest.save('forest.pkl')
print('Reloading from file')
testForest = TreeClassifier.load('forest.pkl')
testForest.setFeatureExtractors(extractors)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = testForest.classify(img)
print(files[i]+' -> '+cname)
print('###############################################################################')
print('Bagged Tree')
extractors = [haar]
classifierBagTree = TreeClassifier(extractors,flavor='Bagged')#
print('Train')
path = [s1_path,s2_path,s3_path]
classes = ['s1','s2','s3']
classifierBagTree.train(path,classes,disp=display,subset=n) #train
print('Test')
[pos,neg,confuse] = classifierBagTree.test(path,classes,disp=display,subset=n)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = classifierBagTree.classify(img)
print(files[i]+' -> '+cname)
classifierBagTree.save('bagtree.pkl')
print('Reloading from file')
testBagTree = TreeClassifier.load('bagtree.pkl')
testBagTree.setFeatureExtractors(extractors)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = testBagTree.classify(img)
print(files[i]+' -> '+cname)
print('###############################################################################')
print('Vanilla Tree')
extractors = [haar]
classifierTree = TreeClassifier(featureExtractors=extractors)
print('Train')
path = [s1_path,s2_path,s3_path]
classes = ['s1','s2','s3']
classifierTree.train(path,classes,disp=display,subset=n) #train
print('Test')
[pos,neg,confuse] = classifierTree.test(path,classes,disp=display,subset=n)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = classifierTree.classify(img)
print(files[i]+' -> '+cname)
print('Reloading from file')
classifierTree.save('tree.pkl')
testTree = TreeClassifier.load('tree.pkl')
testTree.setFeatureExtractors(extractors)
for i in range(10):
img = Image(files[i])
cname = testTree.classify(img)
print(files[i]+' -> '+cname)
print('###############################################################################')
print('Boosted Tree')
extractors = [haar]
classifierBTree = TreeClassifier(extractors,flavor='Boosted')#
print('Train')
path = [s1_path,s2_path,s3_path]
classes = ['s1','s2','s3']
classifierBTree.train(path,classes,disp=display,subset=n) #train
print('Test')
[pos,neg,confuse] = classifierBTree.test(path,classes,disp=display,subset=n)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = classifierBTree.classify(img)
print(files[i]+' -> '+cname)
classifierBTree.save('btree.pkl')
print('Reloading from file')
testBoostTree = TreeClassifier.load('btree.pkl')
testBoostTree.setFeatureExtractors(extractors)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = testBoostTree.classify(img)
print(files[i]+' -> '+cname)
print('###############################################################################')
print('KNN')
extractors = [hue,edge]
classifierKNN = KNNClassifier(extractors)#
print('Train')
path = [s1_path,s2_path,s3_path]
classes = ['s1','s2','s3']
classifierKNN.train(path,classes,disp=display,subset=n) #train
print('Test')
[pos,neg,confuse] = classifierKNN.test(path,classes,disp=display,subset=n)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = classifierKNN.classify(img)
print(files[i]+' -> '+cname)
classifierKNN.save('knn.pkl')
print('Reloading from file')
testKNN = KNNClassifier.load('knn.pkl')
testKNN.setFeatureExtractors(extractors)
files = glob.glob( os.path.join(path[0], '*.jpg'))
for i in range(10):
img = Image(files[i])
cname = testKNN.classify(img)
print(files[i]+' -> '+cname)
print("")
print("All the machine learning test have ran correctly")
|
tpltnt/SimpleCV
|
SimpleCV/MachineLearning/MLTestSuite.py
|
Python
|
bsd-3-clause
| 9,967
| 0.019063
|
# -*- coding: utf-8 -*-
# © 2004-2009 Tiny SPRL (<http://tiny.be>).
# © 2015 Agile Business Group <http://www.agilebg.com>
# © 2016 Grupo ESOC Ingeniería de Servicios, S.L.U. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/gpl.html).
import os
import shutil
import tempfile
import traceback
from contextlib import contextmanager
from datetime import datetime, timedelta
from glob import iglob
from openerp import exceptions, models, fields, api, _, tools
from openerp.service import db
import logging
_logger = logging.getLogger(__name__)
try:
import pysftp
except ImportError:
_logger.warning('Cannot import pysftp')
class DbBackup(models.Model):
_name = 'db.backup'
_inherit = "mail.thread"
_sql_constraints = [
("name_unique", "UNIQUE(name)", "Cannot duplicate a configuration."),
("days_to_keep_positive", "CHECK(days_to_keep >= 0)",
"I cannot remove backups from the future. Ask Doc for that."),
]
name = fields.Char(
string="Name",
compute="_compute_name",
store=True,
help="Summary of this backup process",
)
folder = fields.Char(
default=lambda self: self._default_folder(),
oldname="bkp_dir",
help='Absolute path for storing the backups',
required=True
)
days_to_keep = fields.Integer(
oldname="daystokeep",
required=True,
default=0,
help="Backups older than this will be deleted automatically. "
"Set 0 to disable autodeletion.",
)
method = fields.Selection(
selection=[("local", "Local disk"), ("sftp", "Remote SFTP server")],
default="local",
help="Choose the storage method for this backup.",
)
sftp_host = fields.Char(
string='SFTP Server',
oldname="sftpip",
help=(
"The host name or IP address from your remote"
" server. For example 192.168.0.1"
)
)
sftp_port = fields.Integer(
string="SFTP Port",
default=22,
oldname="sftpport",
help="The port on the FTP server that accepts SSH/SFTP calls."
)
sftp_user = fields.Char(
string='Username in the SFTP Server',
oldname="sftpusername",
help=(
"The username where the SFTP connection "
"should be made with. This is the user on the external server."
)
)
sftp_password = fields.Char(
string="SFTP Password",
oldname="sftppassword",
help="The password for the SFTP connection. If you specify a private "
"key file, then this is the password to decrypt it.",
)
sftp_private_key = fields.Char(
string="Private key location",
help="Path to the private key file. Only the Odoo user should have "
"read permissions for that file.",
)
@api.model
def _default_folder(self):
"""Default to ``backups`` folder inside current server datadir."""
return os.path.join(
tools.config["data_dir"],
"backups",
self.env.cr.dbname)
@api.multi
@api.depends("folder", "method", "sftp_host", "sftp_port", "sftp_user")
def _compute_name(self):
"""Get the right summary for this job."""
for rec in self:
if rec.method == "local":
rec.name = "%s @ localhost" % rec.folder
elif rec.method == "sftp":
rec.name = "sftp://%s@%s:%d%s" % (
rec.sftp_user, rec.sftp_host, rec.sftp_port, rec.folder)
@api.constrains("folder", "method")
@api.multi
def _check_folder(self):
"""Do not use the filestore or you will backup your backups."""
for s in self:
if (s.method == "local" and
s.folder.startswith(
tools.config.filestore(self.env.cr.dbname))):
raise exceptions.ValidationError(
_("Do not save backups on your filestore, or you will "
"backup your backups too!"))
@api.multi
def action_sftp_test_connection(self):
"""Check if the SFTP settings are correct."""
try:
# Just open and close the connection
with self.sftp_connection():
raise exceptions.Warning(_("Connection Test Succeeded!"))
except (pysftp.CredentialException, pysftp.ConnectionException):
_logger.info("Connection Test Failed!", exc_info=True)
raise exceptions.Warning(_("Connection Test Failed!"))
@api.multi
def action_backup(self):
"""Run selected backups."""
backup = None
filename = self.filename(datetime.now())
successful = self.browse()
# Start with local storage
for rec in self.filtered(lambda r: r.method == "local"):
with rec.backup_log():
# Directory must exist
try:
os.makedirs(rec.folder)
except OSError:
pass
with open(os.path.join(rec.folder, filename),
'wb') as destiny:
# Copy the cached backup
if backup:
with open(backup) as cached:
shutil.copyfileobj(cached, destiny)
# Generate new backup
else:
db.dump_db(self.env.cr.dbname, destiny)
backup = backup or destiny.name
successful |= rec
# Ensure a local backup exists if we are going to write it remotely
sftp = self.filtered(lambda r: r.method == "sftp")
if sftp:
if backup:
cached = open(backup)
else:
cached = tempfile.TemporaryFile()
db.dump_db(self.env.cr.dbname, cached)
with cached:
for rec in sftp:
with rec.backup_log():
with rec.sftp_connection() as remote:
# Directory must exist
try:
remote.makedirs(rec.folder)
except pysftp.ConnectionException:
pass
# Copy cached backup to remote server
with remote.open(
os.path.join(rec.folder, filename),
"wb") as destiny:
shutil.copyfileobj(cached, destiny)
successful |= rec
# Remove old files for successful backups
successful.cleanup()
@api.model
def action_backup_all(self):
"""Run all scheduled backups."""
return self.search([]).action_backup()
@api.multi
@contextmanager
def backup_log(self):
"""Log a backup result."""
try:
_logger.info("Starting database backup: %s", self.name)
yield
except:
_logger.exception("Database backup failed: %s", self.name)
escaped_tb = tools.html_escape(traceback.format_exc())
self.message_post(
"<p>%s</p><pre>%s</pre>" % (
_("Database backup failed."),
escaped_tb),
subtype=self.env.ref("auto_backup.failure"))
else:
_logger.info("Database backup succeeded: %s", self.name)
self.message_post(_("Database backup succeeded."))
@api.multi
def cleanup(self):
"""Clean up old backups."""
now = datetime.now()
for rec in self.filtered("days_to_keep"):
with rec.cleanup_log():
oldest = self.filename(now - timedelta(days=rec.days_to_keep))
if rec.method == "local":
for name in iglob(os.path.join(rec.folder,
"*.dump.zip")):
if os.path.basename(name) < oldest:
os.unlink(name)
elif rec.method == "sftp":
with rec.sftp_connection() as remote:
for name in remote.listdir(rec.folder):
if (name.endswith(".dump.zip") and
os.path.basename(name) < oldest):
remote.unlink(name)
@api.multi
@contextmanager
def cleanup_log(self):
"""Log a possible cleanup failure."""
try:
_logger.info("Starting cleanup process after database backup: %s",
self.name)
yield
except:
_logger.exception("Cleanup of old database backups failed: %s")
escaped_tb = tools.html_escape(traceback.format_exc())
self.message_post(
"<p>%s</p><pre>%s</pre>" % (
_("Cleanup of old database backups failed."),
escaped_tb),
subtype=self.env.ref("auto_backup.failure"))
else:
_logger.info("Cleanup of old database backups succeeded: %s",
self.name)
@api.model
def filename(self, when):
"""Generate a file name for a backup.
:param datetime.datetime when:
Use this datetime instead of :meth:`datetime.datetime.now`.
"""
return "{:%Y_%m_%d_%H_%M_%S}.dump.zip".format(when)
@api.multi
def sftp_connection(self):
"""Return a new SFTP connection with found parameters."""
params = {
"host": self.sftp_host,
"username": self.sftp_user,
"port": self.sftp_port,
}
_logger.debug(
"Trying to connect to sftp://%(username)s@%(host)s:%(port)d",
extra=params)
if self.sftp_private_key:
params["private_key"] = self.sftp_private_key
if self.sftp_password:
params["private_key_pass"] = self.sftp_password
else:
params["password"] = self.sftp_password
return pysftp.Connection(**params)
|
eezee-it/server-tools
|
auto_backup/models/db_backup.py
|
Python
|
agpl-3.0
| 10,250
| 0.000195
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Fri Dec 2 15:05:18 2011 by generateDS.py version 2.7b.
#
import sys
import getopt
import re as re_
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError("Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# User methods
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError, exp:
class GeneratedsSuper(object):
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_integer_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of integers')
return input_data
def gds_format_float(self, input_data, input_name=''):
return '%f' % input_data
def gds_validate_float(self, input_data, node, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_float_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of floats')
return input_data
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_double_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
try:
fvalue = float(value)
except (TypeError, ValueError), exp:
raise_parse_error(node, 'Requires sequence of doubles')
return input_data
def gds_format_boolean(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean(self, input_data, node, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % input_data
def gds_validate_boolean_list(self, input_data, node, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(node, 'Requires sequence of booleans ("true", "1", "false", "0")')
return input_data
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'ascii'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
#
# Support/utility functions.
#
def showIndent(outfile, level):
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
if not inStr:
return ''
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, basestring) and inStr or
'%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if XMLParser_import_library == XMLParser_import_lxml:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
else:
msg = '%s (element %s)' % (msg, node.tag, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(outfile, level, namespace,name)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (self.name, self.value, self.name))
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s", "%s"),\n' % \
(self.category, self.content_type, self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write('model_.MixedContainer(%d, %d, "%s",\n' % \
(self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0):
self.name = name
self.data_type = data_type
self.container = container
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class EnvelopeType(GeneratedsSuper):
"""Root OVF descriptor type"""
subclass = None
superclass = None
def __init__(self, lang='en-US', References=None, Section=None, Content=None, Strings=None):
self.lang = _cast(None, lang)
self.References = References
if Section is None:
self.Section = []
else:
self.Section = Section
self.Content = Content
if Strings is None:
self.Strings = []
else:
self.Strings = Strings
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if EnvelopeType.subclass:
return EnvelopeType.subclass(*args_, **kwargs_)
else:
return EnvelopeType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_References(self): return self.References
def set_References(self, References): self.References = References
def get_Section(self): return self.Section
def set_Section(self, Section): self.Section = Section
def add_Section(self, value): self.Section.append(value)
def insert_Section(self, index, value): self.Section[index] = value
def get_Content(self): return self.Content
def set_Content(self, Content): self.Content = Content
def get_Strings(self): return self.Strings
def set_Strings(self, Strings): self.Strings = Strings
def add_Strings(self, value): self.Strings.append(value)
def insert_Strings(self, index, value): self.Strings[index] = value
def get_lang(self): return self.lang
def set_lang(self, lang): self.lang = lang
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='EnvelopeType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EnvelopeType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='EnvelopeType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
outfile.write(' lang=%s' % (self.gds_format_string(quote_attrib(self.lang).encode(ExternalEncoding), input_name='lang'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='EnvelopeType', fromsubclass_=False):
if self.References is not None:
self.References.export(outfile, level, namespace_, name_='References', )
for Section_ in self.Section:
Section_.export(outfile, level, namespace_, name_='Section')
if self.Content is not None:
self.Content.export(outfile, level, namespace_, name_='Content', )
for Strings_ in self.Strings:
Strings_.export(outfile, level, namespace_, name_='Strings')
def hasContent_(self):
if (
self.References is not None or
self.Section or
self.Content is not None or
self.Strings
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='EnvelopeType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
showIndent(outfile, level)
outfile.write('lang = "%s",\n' % (self.lang,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
if self.References is not None:
showIndent(outfile, level)
outfile.write('References=model_.References_Type(\n')
self.References.exportLiteral(outfile, level, name_='References')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Section=[\n')
level += 1
for Section_ in self.Section:
showIndent(outfile, level)
outfile.write('model_.Section(\n')
Section_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.Content is not None:
showIndent(outfile, level)
outfile.write('Content=model_.Content(\n')
self.Content.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Strings=[\n')
level += 1
for Strings_ in self.Strings:
showIndent(outfile, level)
outfile.write('model_.Strings_Type(\n')
Strings_.exportLiteral(outfile, level, name_='Strings_Type')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('lang', node)
if value is not None and 'lang' not in already_processed:
already_processed.append('lang')
self.lang = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'References':
obj_ = References_Type.factory()
obj_.build(child_)
self.set_References(obj_)
elif nodeName_ == 'Section':
class_obj_ = self.get_class_obj_(child_, Section_Type)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Section.append(obj_)
elif nodeName_ == 'Content':
class_obj_ = self.get_class_obj_(child_, Content_Type)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_Content(obj_)
elif nodeName_ == 'Strings':
obj_ = Strings_Type.factory()
obj_.build(child_)
self.Strings.append(obj_)
# end class EnvelopeType
class References_Type(GeneratedsSuper):
"""Type for list of external resources"""
subclass = None
superclass = None
def __init__(self, File=None, anytypeobjs_=None):
if File is None:
self.File = []
else:
self.File = File
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if References_Type.subclass:
return References_Type.subclass(*args_, **kwargs_)
else:
return References_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_File(self): return self.File
def set_File(self, File): self.File = File
def add_File(self, value): self.File.append(value)
def insert_File(self, index, value): self.File[index] = value
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='References_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='References_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='References_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='References_Type', fromsubclass_=False):
for File_ in self.File:
File_.export(outfile, level, namespace_, name_='File')
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.File or
self.anytypeobjs_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='References_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('File=[\n')
level += 1
for File_ in self.File:
showIndent(outfile, level)
outfile.write('model_.File_Type(\n')
File_.exportLiteral(outfile, level, name_='File_Type')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'File':
obj_ = File_Type.factory()
obj_.build(child_)
self.File.append(obj_)
else:
obj_ = self.gds_build_any(child_, 'References_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class References_Type
class File_Type(GeneratedsSuper):
"""Type for an external reference to a resourceReference key used in
other parts of the packageLocation of external resourceSize in
bytes of the files (if known)Compression type (gzip, bzip2, or
none if empty or not specified)Chunk size (except for last
chunk)"""
subclass = None
superclass = None
def __init__(self, compression='', href=None, chunkSize=None, id=None, size=None, anytypeobjs_=None):
self.compression = _cast(None, compression)
self.href = _cast(None, href)
self.chunkSize = _cast(int, chunkSize)
self.id = _cast(None, id)
self.size = _cast(int, size)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if File_Type.subclass:
return File_Type.subclass(*args_, **kwargs_)
else:
return File_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_compression(self): return self.compression
def set_compression(self, compression): self.compression = compression
def get_href(self): return self.href
def set_href(self, href): self.href = href
def get_chunkSize(self): return self.chunkSize
def set_chunkSize(self, chunkSize): self.chunkSize = chunkSize
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_size(self): return self.size
def set_size(self, size): self.size = size
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='File_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='File_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='File_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.compression is not None and 'compression' not in already_processed:
already_processed.append('compression')
outfile.write(' compression=%s' % (self.gds_format_string(quote_attrib(self.compression).encode(ExternalEncoding), input_name='compression'), ))
if self.href is not None and 'href' not in already_processed:
already_processed.append('href')
outfile.write(' href=%s' % (self.gds_format_string(quote_attrib(self.href).encode(ExternalEncoding), input_name='href'), ))
if self.chunkSize is not None and 'chunkSize' not in already_processed:
already_processed.append('chunkSize')
outfile.write(' chunkSize="%s"' % self.gds_format_integer(self.chunkSize, input_name='chunkSize'))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
if self.size is not None and 'size' not in already_processed:
already_processed.append('size')
outfile.write(' size="%s"' % self.gds_format_integer(self.size, input_name='size'))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='File_Type', fromsubclass_=False):
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.anytypeobjs_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='File_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.compression is not None and 'compression' not in already_processed:
already_processed.append('compression')
showIndent(outfile, level)
outfile.write('compression = "%s",\n' % (self.compression,))
if self.href is not None and 'href' not in already_processed:
already_processed.append('href')
showIndent(outfile, level)
outfile.write('href = "%s",\n' % (self.href,))
if self.chunkSize is not None and 'chunkSize' not in already_processed:
already_processed.append('chunkSize')
showIndent(outfile, level)
outfile.write('chunkSize = %d,\n' % (self.chunkSize,))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
if self.size is not None and 'size' not in already_processed:
already_processed.append('size')
showIndent(outfile, level)
outfile.write('size = %d,\n' % (self.size,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('compression', node)
if value is not None and 'compression' not in already_processed:
already_processed.append('compression')
self.compression = value
value = find_attr_value_('href', node)
if value is not None and 'href' not in already_processed:
already_processed.append('href')
self.href = value
value = find_attr_value_('chunkSize', node)
if value is not None and 'chunkSize' not in already_processed:
already_processed.append('chunkSize')
try:
self.chunkSize = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
value = find_attr_value_('size', node)
if value is not None and 'size' not in already_processed:
already_processed.append('size')
try:
self.size = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'File_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class File_Type
class Content_Type(GeneratedsSuper):
"""Base class for content"""
subclass = None
superclass = None
def __init__(self, id=None, Info=None, Name=None, Section=None, extensiontype_=None):
self.id = _cast(None, id)
self.Info = Info
self.Name = Name
if Section is None:
self.Section = []
else:
self.Section = Section
self.anyAttributes_ = {}
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if Content_Type.subclass:
return Content_Type.subclass(*args_, **kwargs_)
else:
return Content_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Info(self): return self.Info
def set_Info(self, Info): self.Info = Info
def get_Name(self): return self.Name
def set_Name(self, Name): self.Name = Name
def get_Section(self): return self.Section
def set_Section(self, Section): self.Section = Section
def add_Section(self, value): self.Section.append(value)
def insert_Section(self, index, value): self.Section[index] = value
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='ovf:', name_='Content_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Content_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='Content_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='ovf:', name_='Content_Type', fromsubclass_=False):
if self.Info is not None:
self.Info.export(outfile, level, namespace_, name_='Info', )
if self.Name is not None:
self.Name.export(outfile, level, namespace_, name_='Name')
for Section_ in self.Section:
Section_.export(outfile, level, namespace_, name_='Section')
def hasContent_(self):
if (
self.Info is not None or
self.Name is not None or
self.Section
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Content_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
if self.Info is not None:
showIndent(outfile, level)
outfile.write('Info=model_.Msg_Type(\n')
self.Info.exportLiteral(outfile, level, name_='Info')
showIndent(outfile, level)
outfile.write('),\n')
if self.Name is not None:
showIndent(outfile, level)
outfile.write('Name=model_.Msg_Type(\n')
self.Name.exportLiteral(outfile, level, name_='Name')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Section=[\n')
level += 1
for Section_ in self.Section:
showIndent(outfile, level)
outfile.write('model_.Section(\n')
Section_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Info':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Info(obj_)
elif nodeName_ == 'Name':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Name(obj_)
elif nodeName_ == 'Section':
class_obj_ = self.get_class_obj_(child_, Section_Type)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Section.append(obj_)
# end class Content_Type
class VirtualSystem_Type(Content_Type):
"""Content describing a virtual system"""
subclass = None
superclass = Content_Type
def __init__(self, id=None, Info=None, Name=None, Section=None):
super(VirtualSystem_Type, self).__init__(id, Info, Name, Section, )
pass
def factory(*args_, **kwargs_):
if VirtualSystem_Type.subclass:
return VirtualSystem_Type.subclass(*args_, **kwargs_)
else:
return VirtualSystem_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='ovf:', name_='VirtualSystem_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualSystem_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='VirtualSystem_Type'):
super(VirtualSystem_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualSystem_Type')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='VirtualSystem_Type', fromsubclass_=False):
super(VirtualSystem_Type, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(VirtualSystem_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='VirtualSystem_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(VirtualSystem_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(VirtualSystem_Type, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(VirtualSystem_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(VirtualSystem_Type, self).buildChildren(child_, node, nodeName_, True)
pass
# end class VirtualSystem_Type
class VirtualSystemCollection_Type(Content_Type):
"""A collection of Content."""
subclass = None
superclass = Content_Type
def __init__(self, id=None, Info=None, Name=None, Section=None, Content=None):
super(VirtualSystemCollection_Type, self).__init__(id, Info, Name, Section, )
if Content is None:
self.Content = []
else:
self.Content = Content
def factory(*args_, **kwargs_):
if VirtualSystemCollection_Type.subclass:
return VirtualSystemCollection_Type.subclass(*args_, **kwargs_)
else:
return VirtualSystemCollection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Content(self): return self.Content
def set_Content(self, Content): self.Content = Content
def add_Content(self, value): self.Content.append(value)
def insert_Content(self, index, value): self.Content[index] = value
def export(self, outfile, level, namespace_='ovf:', name_='VirtualSystemCollection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualSystemCollection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='VirtualSystemCollection_Type'):
super(VirtualSystemCollection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualSystemCollection_Type')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='VirtualSystemCollection_Type', fromsubclass_=False):
super(VirtualSystemCollection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
for Content_ in self.Content:
Content_.export(outfile, level, namespace_, name_='Content')
def hasContent_(self):
if (
self.Content or
super(VirtualSystemCollection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='VirtualSystemCollection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(VirtualSystemCollection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(VirtualSystemCollection_Type, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Content=[\n')
level += 1
for Content_ in self.Content:
showIndent(outfile, level)
outfile.write('model_.Content(\n')
Content_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(VirtualSystemCollection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Content':
class_obj_ = self.get_class_obj_(child_, Content_Type)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Content.append(obj_)
super(VirtualSystemCollection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class VirtualSystemCollection_Type
class Strings_Type(GeneratedsSuper):
"""Type for string resource bundleLocale for this string resource
bundleReference to external resource bundle"""
subclass = None
superclass = None
def __init__(self, lang=None, fileRef=None, Msg=None):
self.lang = _cast(None, lang)
self.fileRef = _cast(None, fileRef)
if Msg is None:
self.Msg = []
else:
self.Msg = Msg
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if Strings_Type.subclass:
return Strings_Type.subclass(*args_, **kwargs_)
else:
return Strings_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Msg(self): return self.Msg
def set_Msg(self, Msg): self.Msg = Msg
def add_Msg(self, value): self.Msg.append(value)
def insert_Msg(self, index, value): self.Msg[index] = value
def get_lang(self): return self.lang
def set_lang(self, lang): self.lang = lang
def get_fileRef(self): return self.fileRef
def set_fileRef(self, fileRef): self.fileRef = fileRef
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='Strings_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Strings_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='Strings_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
outfile.write(' lang=%s' % (self.gds_format_string(quote_attrib(self.lang).encode(ExternalEncoding), input_name='lang'), ))
if self.fileRef is not None and 'fileRef' not in already_processed:
already_processed.append('fileRef')
outfile.write(' fileRef=%s' % (self.gds_format_string(quote_attrib(self.fileRef).encode(ExternalEncoding), input_name='fileRef'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='Strings_Type', fromsubclass_=False):
for Msg_ in self.Msg:
Msg_.export(outfile, level, namespace_, name_='Msg')
def hasContent_(self):
if (
self.Msg
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Strings_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.lang is not None and 'lang' not in already_processed:
already_processed.append('lang')
showIndent(outfile, level)
outfile.write('lang = "%s",\n' % (self.lang,))
if self.fileRef is not None and 'fileRef' not in already_processed:
already_processed.append('fileRef')
showIndent(outfile, level)
outfile.write('fileRef = "%s",\n' % (self.fileRef,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('Msg=[\n')
level += 1
for Msg_ in self.Msg:
showIndent(outfile, level)
outfile.write('model_.MsgType(\n')
Msg_.exportLiteral(outfile, level, name_='MsgType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('lang', node)
if value is not None and 'lang' not in already_processed:
already_processed.append('lang')
self.lang = value
value = find_attr_value_('fileRef', node)
if value is not None and 'fileRef' not in already_processed:
already_processed.append('fileRef')
self.fileRef = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Msg':
obj_ = MsgType.factory()
obj_.build(child_)
self.Msg.append(obj_)
# end class Strings_Type
class Section_Type(GeneratedsSuper):
"""Base type for Sections, subclassing this is the most common form of
extensibility. Subtypes define more specific elements."""
subclass = None
superclass = None
def __init__(self, required=None, Info=None, extensiontype_=None):
self.required = _cast(None, required)
self.Info = Info
self.anyAttributes_ = {}
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if Section_Type.subclass:
return Section_Type.subclass(*args_, **kwargs_)
else:
return Section_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Info(self): return self.Info
def set_Info(self, Info): self.Info = Info
def get_required(self): return self.required
def set_required(self, required): self.required = required
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='ovf:', name_='Section_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Section_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='Section_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.required is not None and 'required' not in already_processed:
already_processed.append('required')
outfile.write(' required=%s' % (self.gds_format_string(quote_attrib(self.required).encode(ExternalEncoding), input_name='required'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='ovf:', name_='Section_Type', fromsubclass_=False):
if self.Info is not None:
self.Info.export(outfile, level, namespace_, name_='Info', )
def hasContent_(self):
if (
self.Info is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Section_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.required is not None and 'required' not in already_processed:
already_processed.append('required')
showIndent(outfile, level)
outfile.write('required = "%s",\n' % (self.required,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
if self.Info is not None:
showIndent(outfile, level)
outfile.write('Info=model_.Msg_Type(\n')
self.Info.exportLiteral(outfile, level, name_='Info')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('required', node)
if value is not None and 'required' not in already_processed:
already_processed.append('required')
self.required = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Info':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Info(obj_)
# end class Section_Type
class Msg_Type(GeneratedsSuper):
"""Type for localizable stringDefault string valueIdentifier for lookup
in string resource bundle for alternate locale"""
subclass = None
superclass = None
def __init__(self, msgid='', valueOf_=None):
self.msgid = _cast(None, msgid)
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if Msg_Type.subclass:
return Msg_Type.subclass(*args_, **kwargs_)
else:
return Msg_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_msgid(self): return self.msgid
def set_msgid(self, msgid): self.msgid = msgid
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='Msg_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Msg_Type')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='Msg_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.msgid is not None and 'msgid' not in already_processed:
already_processed.append('msgid')
outfile.write(' msgid=%s' % (self.gds_format_string(quote_attrib(self.msgid).encode(ExternalEncoding), input_name='msgid'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='Msg_Type', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Msg_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.msgid is not None and 'msgid' not in already_processed:
already_processed.append('msgid')
showIndent(outfile, level)
outfile.write('msgid = "%s",\n' % (self.msgid,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('msgid', node)
if value is not None and 'msgid' not in already_processed:
already_processed.append('msgid')
self.msgid = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Msg_Type
class AnnotationSection_Type(Section_Type):
"""User defined annotation"""
subclass = None
superclass = Section_Type
def __init__(self, required=None, Info=None, Annotation=None, anytypeobjs_=None):
super(AnnotationSection_Type, self).__init__(required, Info, )
self.Annotation = Annotation
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if AnnotationSection_Type.subclass:
return AnnotationSection_Type.subclass(*args_, **kwargs_)
else:
return AnnotationSection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Annotation(self): return self.Annotation
def set_Annotation(self, Annotation): self.Annotation = Annotation
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def export(self, outfile, level, namespace_='ovf:', name_='AnnotationSection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='AnnotationSection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='AnnotationSection_Type'):
super(AnnotationSection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='AnnotationSection_Type')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='AnnotationSection_Type', fromsubclass_=False):
super(AnnotationSection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
if self.Annotation is not None:
self.Annotation.export(outfile, level, namespace_, name_='Annotation', )
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.Annotation is not None or
self.anytypeobjs_ or
super(AnnotationSection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='AnnotationSection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(AnnotationSection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(AnnotationSection_Type, self).exportLiteralChildren(outfile, level, name_)
if self.Annotation is not None:
showIndent(outfile, level)
outfile.write('Annotation=model_.Msg_Type(\n')
self.Annotation.exportLiteral(outfile, level, name_='Annotation')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(AnnotationSection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Annotation':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Annotation(obj_)
else:
obj_ = self.gds_build_any(child_, 'AnnotationSection_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(AnnotationSection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class AnnotationSection_Type
class ProductSection_Type(Section_Type):
"""Product information for a virtual applianceProperties for
application-level customizationProperty identifier
prefixProperty identifier suffix"""
subclass = None
superclass = Section_Type
def __init__(self, required=None, Info=None, instance='', classxx='', Product=None, Vendor=None, Version=None, FullVersion=None, ProductUrl=None, VendorUrl=None, AppUrl=None, Icon=None, Category=None, Property=None, anytypeobjs_=None):
super(ProductSection_Type, self).__init__(required, Info, )
self.instance = _cast(None, instance)
self.classxx = _cast(None, classxx)
self.Product = Product
self.Vendor = Vendor
self.Version = Version
self.FullVersion = FullVersion
self.ProductUrl = ProductUrl
self.VendorUrl = VendorUrl
self.AppUrl = AppUrl
if Icon is None:
self.Icon = []
else:
self.Icon = Icon
if Category is None:
self.Category = []
else:
self.Category = Category
if Property is None:
self.Property = []
else:
self.Property = Property
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if ProductSection_Type.subclass:
return ProductSection_Type.subclass(*args_, **kwargs_)
else:
return ProductSection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Product(self): return self.Product
def set_Product(self, Product): self.Product = Product
def get_Vendor(self): return self.Vendor
def set_Vendor(self, Vendor): self.Vendor = Vendor
def get_Version(self): return self.Version
def set_Version(self, Version): self.Version = Version
def get_FullVersion(self): return self.FullVersion
def set_FullVersion(self, FullVersion): self.FullVersion = FullVersion
def get_ProductUrl(self): return self.ProductUrl
def set_ProductUrl(self, ProductUrl): self.ProductUrl = ProductUrl
def get_VendorUrl(self): return self.VendorUrl
def set_VendorUrl(self, VendorUrl): self.VendorUrl = VendorUrl
def get_AppUrl(self): return self.AppUrl
def set_AppUrl(self, AppUrl): self.AppUrl = AppUrl
def get_Icon(self): return self.Icon
def set_Icon(self, Icon): self.Icon = Icon
def add_Icon(self, value): self.Icon.append(value)
def insert_Icon(self, index, value): self.Icon[index] = value
def get_Category(self): return self.Category
def set_Category(self, Category): self.Category = Category
def add_Category(self, value): self.Category.append(value)
def insert_Category(self, index, value): self.Category[index] = value
def get_Property(self): return self.Property
def set_Property(self, Property): self.Property = Property
def add_Property(self, value): self.Property.append(value)
def insert_Property(self, index, value): self.Property[index] = value
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_instance(self): return self.instance
def set_instance(self, instance): self.instance = instance
def get_class(self): return self.classxx
def set_class(self, classxx): self.classxx = classxx
def export(self, outfile, level, namespace_='ovf:', name_='ProductSection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ProductSection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='ProductSection_Type'):
super(ProductSection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ProductSection_Type')
if self.instance is not None and 'instance' not in already_processed:
already_processed.append('instance')
outfile.write(' instance=%s' % (self.gds_format_string(quote_attrib(self.instance).encode(ExternalEncoding), input_name='instance'), ))
if self.classxx is not None and 'classxx' not in already_processed:
already_processed.append('classxx')
outfile.write(' class=%s' % (self.gds_format_string(quote_attrib(self.classxx).encode(ExternalEncoding), input_name='class'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='ProductSection_Type', fromsubclass_=False):
super(ProductSection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
if self.Product is not None:
self.Product.export(outfile, level, namespace_, name_='Product')
if self.Vendor is not None:
self.Vendor.export(outfile, level, namespace_, name_='Vendor')
if self.Version is not None:
self.Version.export(outfile, level, namespace_, name_='Version')
if self.FullVersion is not None:
self.FullVersion.export(outfile, level, namespace_, name_='FullVersion')
if self.ProductUrl is not None:
self.ProductUrl.export(outfile, level, namespace_, name_='ProductUrl')
if self.VendorUrl is not None:
self.VendorUrl.export(outfile, level, namespace_, name_='VendorUrl')
if self.AppUrl is not None:
self.AppUrl.export(outfile, level, namespace_, name_='AppUrl')
for Icon_ in self.Icon:
Icon_.export(outfile, level, namespace_, name_='Icon')
for Category_ in self.Category:
Category_.export(outfile, level, namespace_, name_='Category')
for Property_ in self.Property:
Property_.export(outfile, level, namespace_, name_='Property')
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.Product is not None or
self.Vendor is not None or
self.Version is not None or
self.FullVersion is not None or
self.ProductUrl is not None or
self.VendorUrl is not None or
self.AppUrl is not None or
self.Icon or
self.Category or
self.Property or
self.anytypeobjs_ or
super(ProductSection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ProductSection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.instance is not None and 'instance' not in already_processed:
already_processed.append('instance')
showIndent(outfile, level)
outfile.write('instance = "%s",\n' % (self.instance,))
if self.classxx is not None and 'classxx' not in already_processed:
already_processed.append('classxx')
showIndent(outfile, level)
outfile.write('classxx = "%s",\n' % (self.classxx,))
super(ProductSection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ProductSection_Type, self).exportLiteralChildren(outfile, level, name_)
if self.Product is not None:
showIndent(outfile, level)
outfile.write('Product=model_.Msg_Type(\n')
self.Product.exportLiteral(outfile, level, name_='Product')
showIndent(outfile, level)
outfile.write('),\n')
if self.Vendor is not None:
showIndent(outfile, level)
outfile.write('Vendor=model_.Msg_Type(\n')
self.Vendor.exportLiteral(outfile, level, name_='Vendor')
showIndent(outfile, level)
outfile.write('),\n')
if self.Version is not None:
showIndent(outfile, level)
outfile.write('Version=model_.cimString(\n')
self.Version.exportLiteral(outfile, level, name_='Version')
showIndent(outfile, level)
outfile.write('),\n')
if self.FullVersion is not None:
showIndent(outfile, level)
outfile.write('FullVersion=model_.cimString(\n')
self.FullVersion.exportLiteral(outfile, level, name_='FullVersion')
showIndent(outfile, level)
outfile.write('),\n')
if self.ProductUrl is not None:
showIndent(outfile, level)
outfile.write('ProductUrl=model_.cimString(\n')
self.ProductUrl.exportLiteral(outfile, level, name_='ProductUrl')
showIndent(outfile, level)
outfile.write('),\n')
if self.VendorUrl is not None:
showIndent(outfile, level)
outfile.write('VendorUrl=model_.cimString(\n')
self.VendorUrl.exportLiteral(outfile, level, name_='VendorUrl')
showIndent(outfile, level)
outfile.write('),\n')
if self.AppUrl is not None:
showIndent(outfile, level)
outfile.write('AppUrl=model_.cimString(\n')
self.AppUrl.exportLiteral(outfile, level, name_='AppUrl')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Icon=[\n')
level += 1
for Icon_ in self.Icon:
showIndent(outfile, level)
outfile.write('model_.IconType(\n')
Icon_.exportLiteral(outfile, level, name_='IconType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Category=[\n')
level += 1
for Category_ in self.Category:
showIndent(outfile, level)
outfile.write('model_.Msg_Type(\n')
Category_.exportLiteral(outfile, level, name_='Msg_Type')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('Property=[\n')
level += 1
for Property_ in self.Property:
showIndent(outfile, level)
outfile.write('model_.PropertyType(\n')
Property_.exportLiteral(outfile, level, name_='PropertyType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('instance', node)
if value is not None and 'instance' not in already_processed:
already_processed.append('instance')
self.instance = value
value = find_attr_value_('class', node)
if value is not None and 'class' not in already_processed:
already_processed.append('class')
self.classxx = value
super(ProductSection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Product':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Product(obj_)
elif nodeName_ == 'Vendor':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Vendor(obj_)
elif nodeName_ == 'Version':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_Version(obj_)
elif nodeName_ == 'FullVersion':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_FullVersion(obj_)
elif nodeName_ == 'ProductUrl':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_ProductUrl(obj_)
elif nodeName_ == 'VendorUrl':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_VendorUrl(obj_)
elif nodeName_ == 'AppUrl':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_AppUrl(obj_)
elif nodeName_ == 'Icon':
obj_ = IconType.factory()
obj_.build(child_)
self.Icon.append(obj_)
elif nodeName_ == 'Category':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.Category.append(obj_)
elif nodeName_ == 'Property':
obj_ = PropertyType.factory()
obj_.build(child_)
self.Property.append(obj_)
else:
obj_ = self.gds_build_any(child_, 'ProductSection_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(ProductSection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class ProductSection_Type
class PropertyConfigurationValue_Type(GeneratedsSuper):
"""Type for alternative default values for properties when
DeploymentOptionSection is usedAlternative default property
valueConfiguration from DeploymentOptionSection in which this
value is default"""
subclass = None
superclass = None
def __init__(self, configuration=None, value=None, anytypeobjs_=None):
self.configuration = _cast(None, configuration)
self.value = _cast(None, value)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if PropertyConfigurationValue_Type.subclass:
return PropertyConfigurationValue_Type.subclass(*args_, **kwargs_)
else:
return PropertyConfigurationValue_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_configuration(self): return self.configuration
def set_configuration(self, configuration): self.configuration = configuration
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='PropertyConfigurationValue_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PropertyConfigurationValue_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='PropertyConfigurationValue_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.configuration is not None and 'configuration' not in already_processed:
already_processed.append('configuration')
outfile.write(' configuration=%s' % (self.gds_format_string(quote_attrib(self.configuration).encode(ExternalEncoding), input_name='configuration'), ))
if self.value is not None and 'value' not in already_processed:
already_processed.append('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='PropertyConfigurationValue_Type', fromsubclass_=False):
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.anytypeobjs_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PropertyConfigurationValue_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.configuration is not None and 'configuration' not in already_processed:
already_processed.append('configuration')
showIndent(outfile, level)
outfile.write('configuration = "%s",\n' % (self.configuration,))
if self.value is not None and 'value' not in already_processed:
already_processed.append('value')
showIndent(outfile, level)
outfile.write('value = "%s",\n' % (self.value,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('configuration', node)
if value is not None and 'configuration' not in already_processed:
already_processed.append('configuration')
self.configuration = value
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.append('value')
self.value = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'PropertyConfigurationValue_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class PropertyConfigurationValue_Type
class NetworkSection_Type(Section_Type):
"""Descriptions of logical networks used within the package"""
subclass = None
superclass = Section_Type
def __init__(self, required=None, Info=None, Network=None, anytypeobjs_=None):
super(NetworkSection_Type, self).__init__(required, Info, )
if Network is None:
self.Network = []
else:
self.Network = Network
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if NetworkSection_Type.subclass:
return NetworkSection_Type.subclass(*args_, **kwargs_)
else:
return NetworkSection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Network(self): return self.Network
def set_Network(self, Network): self.Network = Network
def add_Network(self, value): self.Network.append(value)
def insert_Network(self, index, value): self.Network[index] = value
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def export(self, outfile, level, namespace_='ovf:', name_='NetworkSection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='NetworkSection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='NetworkSection_Type'):
super(NetworkSection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='NetworkSection_Type')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='NetworkSection_Type', fromsubclass_=False):
super(NetworkSection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
for Network_ in self.Network:
Network_.export(outfile, level, namespace_, name_='Network')
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.Network or
self.anytypeobjs_ or
super(NetworkSection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='NetworkSection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(NetworkSection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(NetworkSection_Type, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Network=[\n')
level += 1
for Network_ in self.Network:
showIndent(outfile, level)
outfile.write('model_.NetworkType(\n')
Network_.exportLiteral(outfile, level, name_='NetworkType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(NetworkSection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Network':
obj_ = NetworkType.factory()
obj_.build(child_)
self.Network.append(obj_)
else:
obj_ = self.gds_build_any(child_, 'NetworkSection_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(NetworkSection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class NetworkSection_Type
class DiskSection_Type(Section_Type):
"""Descriptions of virtual disks used within the package"""
subclass = None
superclass = Section_Type
def __init__(self, required=None, Info=None, Disk=None, anytypeobjs_=None):
super(DiskSection_Type, self).__init__(required, Info, )
if Disk is None:
self.Disk = []
else:
self.Disk = Disk
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if DiskSection_Type.subclass:
return DiskSection_Type.subclass(*args_, **kwargs_)
else:
return DiskSection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Disk(self): return self.Disk
def set_Disk(self, Disk): self.Disk = Disk
def add_Disk(self, value): self.Disk.append(value)
def insert_Disk(self, index, value): self.Disk[index] = value
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def export(self, outfile, level, namespace_='ovf:', name_='DiskSection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DiskSection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='DiskSection_Type'):
super(DiskSection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DiskSection_Type')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='DiskSection_Type', fromsubclass_=False):
super(DiskSection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
for Disk_ in self.Disk:
Disk_.export(outfile, level, namespace_, name_='Disk')
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.Disk or
self.anytypeobjs_ or
super(DiskSection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DiskSection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DiskSection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DiskSection_Type, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Disk=[\n')
level += 1
for Disk_ in self.Disk:
showIndent(outfile, level)
outfile.write('model_.VirtualDiskDesc_Type(\n')
Disk_.exportLiteral(outfile, level, name_='VirtualDiskDesc_Type')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DiskSection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Disk':
obj_ = VirtualDiskDesc_Type.factory()
obj_.build(child_)
self.Disk.append(obj_)
else:
obj_ = self.gds_build_any(child_, 'DiskSection_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(DiskSection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class DiskSection_Type
class VirtualDiskDesc_Type(GeneratedsSuper):
"""Type for virtual disk descriptorIdentifier for virtual diskReference
to virtual disk content. If not specified a blank virtual disk
is created of size given by capacity attributeVirtual disk
capacity, can be specified as either an xs:long size or as a
reference to a property using ${property_name}. Unit of
allocation for ovf:capacity. If not specified default value is
bytes. Value shall match a recognized value for the UNITS
qualifier in DSP0004.Format of virtual disk given as a URI that
identifies the disk typeEstimated populated size of disk in
bytesReference to potential parent disk"""
subclass = None
superclass = None
def __init__(self, capacityAllocationUnits='byte', capacity=None, format=None, parentRef=None, fileRef=None, populatedSize=None, diskId=None, anytypeobjs_=None):
self.capacityAllocationUnits = _cast(None, capacityAllocationUnits)
self.capacity = _cast(None, capacity)
self.format = _cast(None, format)
self.parentRef = _cast(None, parentRef)
self.fileRef = _cast(None, fileRef)
self.populatedSize = _cast(int, populatedSize)
self.diskId = _cast(None, diskId)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if VirtualDiskDesc_Type.subclass:
return VirtualDiskDesc_Type.subclass(*args_, **kwargs_)
else:
return VirtualDiskDesc_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_capacityAllocationUnits(self): return self.capacityAllocationUnits
def set_capacityAllocationUnits(self, capacityAllocationUnits): self.capacityAllocationUnits = capacityAllocationUnits
def get_capacity(self): return self.capacity
def set_capacity(self, capacity): self.capacity = capacity
def get_format(self): return self.format
def set_format(self, format): self.format = format
def get_parentRef(self): return self.parentRef
def set_parentRef(self, parentRef): self.parentRef = parentRef
def get_fileRef(self): return self.fileRef
def set_fileRef(self, fileRef): self.fileRef = fileRef
def get_populatedSize(self): return self.populatedSize
def set_populatedSize(self, populatedSize): self.populatedSize = populatedSize
def get_diskId(self): return self.diskId
def set_diskId(self, diskId): self.diskId = diskId
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='VirtualDiskDesc_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualDiskDesc_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='VirtualDiskDesc_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.capacityAllocationUnits is not None and 'capacityAllocationUnits' not in already_processed:
already_processed.append('capacityAllocationUnits')
outfile.write(' capacityAllocationUnits=%s' % (self.gds_format_string(quote_attrib(self.capacityAllocationUnits).encode(ExternalEncoding), input_name='capacityAllocationUnits'), ))
if self.capacity is not None and 'capacity' not in already_processed:
already_processed.append('capacity')
outfile.write(' capacity=%s' % (self.gds_format_string(quote_attrib(self.capacity).encode(ExternalEncoding), input_name='capacity'), ))
if self.format is not None and 'format' not in already_processed:
already_processed.append('format')
outfile.write(' format=%s' % (self.gds_format_string(quote_attrib(self.format).encode(ExternalEncoding), input_name='format'), ))
if self.parentRef is not None and 'parentRef' not in already_processed:
already_processed.append('parentRef')
outfile.write(' parentRef=%s' % (self.gds_format_string(quote_attrib(self.parentRef).encode(ExternalEncoding), input_name='parentRef'), ))
if self.fileRef is not None and 'fileRef' not in already_processed:
already_processed.append('fileRef')
outfile.write(' fileRef=%s' % (self.gds_format_string(quote_attrib(self.fileRef).encode(ExternalEncoding), input_name='fileRef'), ))
if self.populatedSize is not None and 'populatedSize' not in already_processed:
already_processed.append('populatedSize')
outfile.write(' populatedSize="%s"' % self.gds_format_integer(self.populatedSize, input_name='populatedSize'))
if self.diskId is not None and 'diskId' not in already_processed:
already_processed.append('diskId')
outfile.write(' diskId=%s' % (self.gds_format_string(quote_attrib(self.diskId).encode(ExternalEncoding), input_name='diskId'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='VirtualDiskDesc_Type', fromsubclass_=False):
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.anytypeobjs_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='VirtualDiskDesc_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.capacityAllocationUnits is not None and 'capacityAllocationUnits' not in already_processed:
already_processed.append('capacityAllocationUnits')
showIndent(outfile, level)
outfile.write('capacityAllocationUnits = "%s",\n' % (self.capacityAllocationUnits,))
if self.capacity is not None and 'capacity' not in already_processed:
already_processed.append('capacity')
showIndent(outfile, level)
outfile.write('capacity = "%s",\n' % (self.capacity,))
if self.format is not None and 'format' not in already_processed:
already_processed.append('format')
showIndent(outfile, level)
outfile.write('format = "%s",\n' % (self.format,))
if self.parentRef is not None and 'parentRef' not in already_processed:
already_processed.append('parentRef')
showIndent(outfile, level)
outfile.write('parentRef = "%s",\n' % (self.parentRef,))
if self.fileRef is not None and 'fileRef' not in already_processed:
already_processed.append('fileRef')
showIndent(outfile, level)
outfile.write('fileRef = "%s",\n' % (self.fileRef,))
if self.populatedSize is not None and 'populatedSize' not in already_processed:
already_processed.append('populatedSize')
showIndent(outfile, level)
outfile.write('populatedSize = %d,\n' % (self.populatedSize,))
if self.diskId is not None and 'diskId' not in already_processed:
already_processed.append('diskId')
showIndent(outfile, level)
outfile.write('diskId = "%s",\n' % (self.diskId,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('capacityAllocationUnits', node)
if value is not None and 'capacityAllocationUnits' not in already_processed:
already_processed.append('capacityAllocationUnits')
self.capacityAllocationUnits = value
value = find_attr_value_('capacity', node)
if value is not None and 'capacity' not in already_processed:
already_processed.append('capacity')
self.capacity = value
value = find_attr_value_('format', node)
if value is not None and 'format' not in already_processed:
already_processed.append('format')
self.format = value
value = find_attr_value_('parentRef', node)
if value is not None and 'parentRef' not in already_processed:
already_processed.append('parentRef')
self.parentRef = value
value = find_attr_value_('fileRef', node)
if value is not None and 'fileRef' not in already_processed:
already_processed.append('fileRef')
self.fileRef = value
value = find_attr_value_('populatedSize', node)
if value is not None and 'populatedSize' not in already_processed:
already_processed.append('populatedSize')
try:
self.populatedSize = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('diskId', node)
if value is not None and 'diskId' not in already_processed:
already_processed.append('diskId')
self.diskId = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'VirtualDiskDesc_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class VirtualDiskDesc_Type
class OperatingSystemSection_Type(Section_Type):
"""Specification of the operating system installed in the
guestIdentifier defined by the CIM_OperatingSystem.OsType
enumerationVersion defined by the CIM_OperatingSystem.Version
field"""
subclass = None
superclass = Section_Type
def __init__(self, required=None, Info=None, version=None, id=None, Description=None, anytypeobjs_=None):
super(OperatingSystemSection_Type, self).__init__(required, Info, )
self.version = _cast(None, version)
self.id = _cast(int, id)
self.Description = Description
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if OperatingSystemSection_Type.subclass:
return OperatingSystemSection_Type.subclass(*args_, **kwargs_)
else:
return OperatingSystemSection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_version(self): return self.version
def set_version(self, version): self.version = version
def get_id(self): return self.id
def set_id(self, id): self.id = id
def export(self, outfile, level, namespace_='ovf:', name_='OperatingSystemSection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='OperatingSystemSection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='OperatingSystemSection_Type'):
super(OperatingSystemSection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='OperatingSystemSection_Type')
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
outfile.write(' version=%s' % (self.gds_format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id="%s"' % self.gds_format_integer(self.id, input_name='id'))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='OperatingSystemSection_Type', fromsubclass_=False):
super(OperatingSystemSection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
if self.Description is not None:
self.Description.export(outfile, level, namespace_, name_='Description')
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.Description is not None or
self.anytypeobjs_ or
super(OperatingSystemSection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='OperatingSystemSection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.version is not None and 'version' not in already_processed:
already_processed.append('version')
showIndent(outfile, level)
outfile.write('version = "%s",\n' % (self.version,))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = %d,\n' % (self.id,))
super(OperatingSystemSection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(OperatingSystemSection_Type, self).exportLiteralChildren(outfile, level, name_)
if self.Description is not None:
showIndent(outfile, level)
outfile.write('Description=model_.Msg_Type(\n')
self.Description.exportLiteral(outfile, level, name_='Description')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('version', node)
if value is not None and 'version' not in already_processed:
already_processed.append('version')
self.version = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
try:
self.id = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(OperatingSystemSection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Description':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Description(obj_)
else:
obj_ = self.gds_build_any(child_, 'OperatingSystemSection_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(OperatingSystemSection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class OperatingSystemSection_Type
class EulaSection_Type(Section_Type):
"""End-User License Agreement"""
subclass = None
superclass = Section_Type
def __init__(self, required=None, Info=None, License=None, anytypeobjs_=None):
super(EulaSection_Type, self).__init__(required, Info, )
self.License = License
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if EulaSection_Type.subclass:
return EulaSection_Type.subclass(*args_, **kwargs_)
else:
return EulaSection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_License(self): return self.License
def set_License(self, License): self.License = License
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def export(self, outfile, level, namespace_='ovf:', name_='EulaSection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='EulaSection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='EulaSection_Type'):
super(EulaSection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='EulaSection_Type')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='EulaSection_Type', fromsubclass_=False):
super(EulaSection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
if self.License is not None:
self.License.export(outfile, level, namespace_, name_='License', )
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.License is not None or
self.anytypeobjs_ or
super(EulaSection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='EulaSection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(EulaSection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(EulaSection_Type, self).exportLiteralChildren(outfile, level, name_)
if self.License is not None:
showIndent(outfile, level)
outfile.write('License=model_.Msg_Type(\n')
self.License.exportLiteral(outfile, level, name_='License')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(EulaSection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'License':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_License(obj_)
else:
obj_ = self.gds_build_any(child_, 'EulaSection_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(EulaSection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class EulaSection_Type
class VirtualHardwareSection_Type(Section_Type):
"""Specifies virtual hardware requirements for a virtual machineUnique
identifier of this VirtualHardwareSection (within a
VirtualSystem)"""
subclass = None
superclass = Section_Type
def __init__(self, required=None, Info=None, id='', transport=None, System=None, Item=None, anytypeobjs_=None):
super(VirtualHardwareSection_Type, self).__init__(required, Info, )
self.id = _cast(None, id)
self.transport = _cast(None, transport)
self.System = System
if Item is None:
self.Item = []
else:
self.Item = Item
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if VirtualHardwareSection_Type.subclass:
return VirtualHardwareSection_Type.subclass(*args_, **kwargs_)
else:
return VirtualHardwareSection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_System(self): return self.System
def set_System(self, System): self.System = System
def get_Item(self): return self.Item
def set_Item(self, Item): self.Item = Item
def add_Item(self, value): self.Item.append(value)
def insert_Item(self, index, value): self.Item[index] = value
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_transport(self): return self.transport
def set_transport(self, transport): self.transport = transport
def export(self, outfile, level, namespace_='ovf:', name_='VirtualHardwareSection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualHardwareSection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='VirtualHardwareSection_Type'):
super(VirtualHardwareSection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='VirtualHardwareSection_Type')
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
if self.transport is not None and 'transport' not in already_processed:
already_processed.append('transport')
outfile.write(' transport=%s' % (self.gds_format_string(quote_attrib(self.transport).encode(ExternalEncoding), input_name='transport'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='VirtualHardwareSection_Type', fromsubclass_=False):
super(VirtualHardwareSection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
if self.System is not None:
self.System.export(outfile, level, namespace_, name_='System')
for Item_ in self.Item:
Item_.export(outfile, level, namespace_, name_='Item')
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.System is not None or
self.Item or
self.anytypeobjs_ or
super(VirtualHardwareSection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='VirtualHardwareSection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
if self.transport is not None and 'transport' not in already_processed:
already_processed.append('transport')
showIndent(outfile, level)
outfile.write('transport = "%s",\n' % (self.transport,))
super(VirtualHardwareSection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(VirtualHardwareSection_Type, self).exportLiteralChildren(outfile, level, name_)
if self.System is not None:
showIndent(outfile, level)
outfile.write('System=model_.VSSD_Type(\n')
self.System.exportLiteral(outfile, level, name_='System')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Item=[\n')
level += 1
for Item_ in self.Item:
showIndent(outfile, level)
outfile.write('model_.RASD_Type(\n')
Item_.exportLiteral(outfile, level, name_='RASD_Type')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
value = find_attr_value_('transport', node)
if value is not None and 'transport' not in already_processed:
already_processed.append('transport')
self.transport = value
super(VirtualHardwareSection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'System':
obj_ = VSSD_Type.factory()
obj_.build(child_)
self.set_System(obj_)
elif nodeName_ == 'Item':
obj_ = RASD_Type.factory()
obj_.build(child_)
self.Item.append(obj_)
else:
obj_ = self.gds_build_any(child_, 'VirtualHardwareSection_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(VirtualHardwareSection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class VirtualHardwareSection_Type
class ResourceAllocationSection_Type(Section_Type):
"""Resource constraints on a VirtualSystemCollection"""
subclass = None
superclass = Section_Type
def __init__(self, required=None, Info=None, Item=None, anytypeobjs_=None):
super(ResourceAllocationSection_Type, self).__init__(required, Info, )
if Item is None:
self.Item = []
else:
self.Item = Item
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if ResourceAllocationSection_Type.subclass:
return ResourceAllocationSection_Type.subclass(*args_, **kwargs_)
else:
return ResourceAllocationSection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Item(self): return self.Item
def set_Item(self, Item): self.Item = Item
def add_Item(self, value): self.Item.append(value)
def insert_Item(self, index, value): self.Item[index] = value
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def export(self, outfile, level, namespace_='ovf:', name_='ResourceAllocationSection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceAllocationSection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='ResourceAllocationSection_Type'):
super(ResourceAllocationSection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='ResourceAllocationSection_Type')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='ResourceAllocationSection_Type', fromsubclass_=False):
super(ResourceAllocationSection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
for Item_ in self.Item:
Item_.export(outfile, level, namespace_, name_='Item')
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.Item or
self.anytypeobjs_ or
super(ResourceAllocationSection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ResourceAllocationSection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(ResourceAllocationSection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(ResourceAllocationSection_Type, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Item=[\n')
level += 1
for Item_ in self.Item:
showIndent(outfile, level)
outfile.write('model_.RASD_Type(\n')
Item_.exportLiteral(outfile, level, name_='RASD_Type')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(ResourceAllocationSection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Item':
obj_ = RASD_Type.factory()
obj_.build(child_)
self.Item.append(obj_)
else:
obj_ = self.gds_build_any(child_, 'ResourceAllocationSection_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(ResourceAllocationSection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class ResourceAllocationSection_Type
class InstallSection_Type(Section_Type):
"""If present indicates that the virtual machine needs to be initially
booted to install and configure the softwareDelay in seconds to
wait for power off to complete after initial boot"""
subclass = None
superclass = Section_Type
def __init__(self, required=None, Info=None, initialBootStopDelay=0, anytypeobjs_=None):
super(InstallSection_Type, self).__init__(required, Info, )
self.initialBootStopDelay = _cast(int, initialBootStopDelay)
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if InstallSection_Type.subclass:
return InstallSection_Type.subclass(*args_, **kwargs_)
else:
return InstallSection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_initialBootStopDelay(self): return self.initialBootStopDelay
def set_initialBootStopDelay(self, initialBootStopDelay): self.initialBootStopDelay = initialBootStopDelay
def export(self, outfile, level, namespace_='ovf:', name_='InstallSection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='InstallSection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='InstallSection_Type'):
super(InstallSection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='InstallSection_Type')
if self.initialBootStopDelay is not None and 'initialBootStopDelay' not in already_processed:
already_processed.append('initialBootStopDelay')
outfile.write(' initialBootStopDelay="%s"' % self.gds_format_integer(self.initialBootStopDelay, input_name='initialBootStopDelay'))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='InstallSection_Type', fromsubclass_=False):
super(InstallSection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.anytypeobjs_ or
super(InstallSection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='InstallSection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.initialBootStopDelay is not None and 'initialBootStopDelay' not in already_processed:
already_processed.append('initialBootStopDelay')
showIndent(outfile, level)
outfile.write('initialBootStopDelay = %d,\n' % (self.initialBootStopDelay,))
super(InstallSection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(InstallSection_Type, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('initialBootStopDelay', node)
if value is not None and 'initialBootStopDelay' not in already_processed:
already_processed.append('initialBootStopDelay')
try:
self.initialBootStopDelay = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
super(InstallSection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'InstallSection_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(InstallSection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class InstallSection_Type
class StartupSection_Type(Section_Type):
"""Specifies the order in which entities in a VirtualSystemCollection
are powered on and shut down"""
subclass = None
superclass = Section_Type
def __init__(self, required=None, Info=None, Item=None, anytypeobjs_=None):
super(StartupSection_Type, self).__init__(required, Info, )
if Item is None:
self.Item = []
else:
self.Item = Item
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if StartupSection_Type.subclass:
return StartupSection_Type.subclass(*args_, **kwargs_)
else:
return StartupSection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Item(self): return self.Item
def set_Item(self, Item): self.Item = Item
def add_Item(self, value): self.Item.append(value)
def insert_Item(self, index, value): self.Item[index] = value
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def export(self, outfile, level, namespace_='ovf:', name_='StartupSection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='StartupSection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='StartupSection_Type'):
super(StartupSection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='StartupSection_Type')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='StartupSection_Type', fromsubclass_=False):
super(StartupSection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
for Item_ in self.Item:
Item_.export(outfile, level, namespace_, name_='Item')
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.Item or
self.anytypeobjs_ or
super(StartupSection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='StartupSection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(StartupSection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(StartupSection_Type, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Item=[\n')
level += 1
for Item_ in self.Item:
showIndent(outfile, level)
outfile.write('model_.ItemType(\n')
Item_.exportLiteral(outfile, level, name_='ItemType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(StartupSection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Item':
obj_ = ItemType.factory()
obj_.build(child_)
self.Item.append(obj_)
else:
obj_ = self.gds_build_any(child_, 'StartupSection_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(StartupSection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class StartupSection_Type
class DeploymentOptionSection_Type(Section_Type):
"""Enumeration of discrete deployment options"""
subclass = None
superclass = Section_Type
def __init__(self, required=None, Info=None, Configuration=None, anytypeobjs_=None):
super(DeploymentOptionSection_Type, self).__init__(required, Info, )
if Configuration is None:
self.Configuration = []
else:
self.Configuration = Configuration
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
def factory(*args_, **kwargs_):
if DeploymentOptionSection_Type.subclass:
return DeploymentOptionSection_Type.subclass(*args_, **kwargs_)
else:
return DeploymentOptionSection_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Configuration(self): return self.Configuration
def set_Configuration(self, Configuration): self.Configuration = Configuration
def add_Configuration(self, value): self.Configuration.append(value)
def insert_Configuration(self, index, value): self.Configuration[index] = value
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def export(self, outfile, level, namespace_='ovf:', name_='DeploymentOptionSection_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='DeploymentOptionSection_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='DeploymentOptionSection_Type'):
super(DeploymentOptionSection_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='DeploymentOptionSection_Type')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='DeploymentOptionSection_Type', fromsubclass_=False):
super(DeploymentOptionSection_Type, self).exportChildren(outfile, level, namespace_, name_, True)
for Configuration_ in self.Configuration:
Configuration_.export(outfile, level, namespace_, name_='Configuration')
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.Configuration or
self.anytypeobjs_ or
super(DeploymentOptionSection_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='DeploymentOptionSection_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(DeploymentOptionSection_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(DeploymentOptionSection_Type, self).exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('Configuration=[\n')
level += 1
for Configuration_ in self.Configuration:
showIndent(outfile, level)
outfile.write('model_.ConfigurationType(\n')
Configuration_.exportLiteral(outfile, level, name_='ConfigurationType')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(DeploymentOptionSection_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Configuration':
obj_ = ConfigurationType.factory()
obj_.build(child_)
self.Configuration.append(obj_)
else:
obj_ = self.gds_build_any(child_, 'DeploymentOptionSection_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
super(DeploymentOptionSection_Type, self).buildChildren(child_, node, nodeName_, True)
# end class DeploymentOptionSection_Type
class cimDateTime(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, CIM_DateTime=None, Interval=None, Date=None, Time=None, Datetime=None):
self.CIM_DateTime = CIM_DateTime
self.Interval = Interval
self.Date = Date
self.Time = Time
self.Datetime = Datetime
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimDateTime.subclass:
return cimDateTime.subclass(*args_, **kwargs_)
else:
return cimDateTime(*args_, **kwargs_)
factory = staticmethod(factory)
def get_CIM_DateTime(self): return self.CIM_DateTime
def set_CIM_DateTime(self, CIM_DateTime): self.CIM_DateTime = CIM_DateTime
def get_Interval(self): return self.Interval
def set_Interval(self, Interval): self.Interval = Interval
def get_Date(self): return self.Date
def set_Date(self, Date): self.Date = Date
def get_Time(self): return self.Time
def set_Time(self, Time): self.Time = Time
def get_Datetime(self): return self.Datetime
def set_Datetime(self, Datetime): self.Datetime = Datetime
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimDateTime', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimDateTime')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimDateTime'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimDateTime', fromsubclass_=False):
if self.CIM_DateTime is not None:
showIndent(outfile, level)
outfile.write('<%sCIM_DateTime>%s</%sCIM_DateTime>\n' % (namespace_, self.gds_format_string(quote_xml(self.CIM_DateTime).encode(ExternalEncoding), input_name='CIM_DateTime'), namespace_))
if self.Interval is not None:
showIndent(outfile, level)
outfile.write('<%sInterval>%s</%sInterval>\n' % (namespace_, self.gds_format_string(quote_xml(self.Interval).encode(ExternalEncoding), input_name='Interval'), namespace_))
if self.Date is not None:
showIndent(outfile, level)
outfile.write('<%sDate>%s</%sDate>\n' % (namespace_, self.gds_format_string(quote_xml(self.Date).encode(ExternalEncoding), input_name='Date'), namespace_))
if self.Time is not None:
showIndent(outfile, level)
outfile.write('<%sTime>%s</%sTime>\n' % (namespace_, self.gds_format_string(quote_xml(self.Time).encode(ExternalEncoding), input_name='Time'), namespace_))
if self.Datetime is not None:
showIndent(outfile, level)
outfile.write('<%sDatetime>%s</%sDatetime>\n' % (namespace_, self.gds_format_string(quote_xml(self.Datetime).encode(ExternalEncoding), input_name='Datetime'), namespace_))
def hasContent_(self):
if (
self.CIM_DateTime is not None or
self.Interval is not None or
self.Date is not None or
self.Time is not None or
self.Datetime is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimDateTime'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
if self.CIM_DateTime is not None:
showIndent(outfile, level)
outfile.write('CIM_DateTime=%s,\n' % quote_python(self.CIM_DateTime).encode(ExternalEncoding))
if self.Interval is not None:
showIndent(outfile, level)
outfile.write('Interval=%s,\n' % quote_python(self.Interval).encode(ExternalEncoding))
if self.Date is not None:
showIndent(outfile, level)
outfile.write('Date=%s,\n' % quote_python(self.Date).encode(ExternalEncoding))
if self.Time is not None:
showIndent(outfile, level)
outfile.write('Time=%s,\n' % quote_python(self.Time).encode(ExternalEncoding))
if self.Datetime is not None:
showIndent(outfile, level)
outfile.write('Datetime=%s,\n' % quote_python(self.Datetime).encode(ExternalEncoding))
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'CIM_DateTime':
CIM_DateTime_ = child_.text
CIM_DateTime_ = self.gds_validate_string(CIM_DateTime_, node, 'CIM_DateTime')
self.CIM_DateTime = CIM_DateTime_
elif nodeName_ == 'Interval':
Interval_ = child_.text
Interval_ = self.gds_validate_string(Interval_, node, 'Interval')
self.Interval = Interval_
elif nodeName_ == 'Date':
Date_ = child_.text
Date_ = self.gds_validate_string(Date_, node, 'Date')
self.Date = Date_
elif nodeName_ == 'Time':
Time_ = child_.text
Time_ = self.gds_validate_string(Time_, node, 'Time')
self.Time = Time_
elif nodeName_ == 'Datetime':
Datetime_ = child_.text
Datetime_ = self.gds_validate_string(Datetime_, node, 'Datetime')
self.Datetime = Datetime_
# end class cimDateTime
class cimUnsignedByte(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimUnsignedByte.subclass:
return cimUnsignedByte.subclass(*args_, **kwargs_)
else:
return cimUnsignedByte(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimUnsignedByte', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimUnsignedByte')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimUnsignedByte'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimUnsignedByte', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimUnsignedByte'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimUnsignedByte
class cimByte(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimByte.subclass:
return cimByte.subclass(*args_, **kwargs_)
else:
return cimByte(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimByte', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimByte')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimByte'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimByte', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimByte'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimByte
class cimUnsignedShort(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimUnsignedShort.subclass:
return cimUnsignedShort.subclass(*args_, **kwargs_)
else:
return cimUnsignedShort(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimUnsignedShort', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimUnsignedShort')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimUnsignedShort'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimUnsignedShort', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimUnsignedShort'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimUnsignedShort
class cimShort(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimShort.subclass:
return cimShort.subclass(*args_, **kwargs_)
else:
return cimShort(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimShort', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimShort')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimShort'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimShort', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimShort'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimShort
class cimUnsignedInt(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None, extensiontype_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if cimUnsignedInt.subclass:
return cimUnsignedInt.subclass(*args_, **kwargs_)
else:
return cimUnsignedInt(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='ovf:', name_='cimUnsignedInt', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimUnsignedInt')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimUnsignedInt'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimUnsignedInt', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimUnsignedInt'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimUnsignedInt
class cimInt(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimInt.subclass:
return cimInt.subclass(*args_, **kwargs_)
else:
return cimInt(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimInt', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimInt')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimInt'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimInt', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimInt'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimInt
class cimUnsignedLong(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimUnsignedLong.subclass:
return cimUnsignedLong.subclass(*args_, **kwargs_)
else:
return cimUnsignedLong(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimUnsignedLong', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimUnsignedLong')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimUnsignedLong'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimUnsignedLong', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimUnsignedLong'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimUnsignedLong
class cimLong(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None, extensiontype_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if cimLong.subclass:
return cimLong.subclass(*args_, **kwargs_)
else:
return cimLong(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='ovf:', name_='cimLong', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimLong')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimLong'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimLong', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimLong'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimLong
class cimString(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None, extensiontype_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if cimString.subclass:
return cimString.subclass(*args_, **kwargs_)
else:
return cimString(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='ovf:', name_='cimString', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimString')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimString'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimString', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimString'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimString
class cimBoolean(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None, extensiontype_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if cimBoolean.subclass:
return cimBoolean.subclass(*args_, **kwargs_)
else:
return cimBoolean(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='ovf:', name_='cimBoolean', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimBoolean')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimBoolean'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimBoolean', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimBoolean'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimBoolean
class cimFloat(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimFloat.subclass:
return cimFloat.subclass(*args_, **kwargs_)
else:
return cimFloat(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimFloat', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimFloat')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimFloat'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimFloat', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimFloat'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimFloat
class cimDouble(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimDouble.subclass:
return cimDouble.subclass(*args_, **kwargs_)
else:
return cimDouble(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimDouble', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimDouble')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimDouble'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimDouble', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimDouble'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimDouble
class cimChar16(cimString):
subclass = None
superclass = cimString
def __init__(self, valueOf_=None):
super(cimChar16, self).__init__(valueOf_, )
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimChar16.subclass:
return cimChar16.subclass(*args_, **kwargs_)
else:
return cimChar16(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimChar16', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimChar16')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimChar16'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
super(cimChar16, self).exportAttributes(outfile, level, already_processed, namespace_, name_='cimChar16')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimChar16', fromsubclass_=False):
super(cimChar16, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
self.valueOf_ or
super(cimChar16, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimChar16'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
super(cimChar16, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(cimChar16, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
super(cimChar16, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimChar16
class cimBase64Binary(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimBase64Binary.subclass:
return cimBase64Binary.subclass(*args_, **kwargs_)
else:
return cimBase64Binary(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimBase64Binary', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimBase64Binary')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimBase64Binary'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimBase64Binary', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimBase64Binary'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimBase64Binary
class cimReference(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, anytypeobjs_=None):
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimReference.subclass:
return cimReference.subclass(*args_, **kwargs_)
else:
return cimReference(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimReference', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimReference')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimReference'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimReference', fromsubclass_=False):
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.anytypeobjs_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimReference'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
obj_ = self.gds_build_any(child_, 'cimReference')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class cimReference
class cimHexBinary(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimHexBinary.subclass:
return cimHexBinary.subclass(*args_, **kwargs_)
else:
return cimHexBinary(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimHexBinary', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimHexBinary')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimHexBinary'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimHexBinary', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimHexBinary'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimHexBinary
class cimAnySimpleType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, valueOf_=None):
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if cimAnySimpleType.subclass:
return cimAnySimpleType.subclass(*args_, **kwargs_)
else:
return cimAnySimpleType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='cimAnySimpleType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='cimAnySimpleType')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='cimAnySimpleType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='cimAnySimpleType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='cimAnySimpleType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class cimAnySimpleType
class qualifierString(cimString):
subclass = None
superclass = cimString
def __init__(self, qualifier=None, valueOf_=None, extensiontype_=None):
super(qualifierString, self).__init__(valueOf_, extensiontype_, )
self.qualifier = _cast(None, qualifier)
self.valueOf_ = valueOf_
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if qualifierString.subclass:
return qualifierString.subclass(*args_, **kwargs_)
else:
return qualifierString(*args_, **kwargs_)
factory = staticmethod(factory)
def get_qualifier(self): return self.qualifier
def set_qualifier(self, qualifier): self.qualifier = qualifier
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='ovf:', name_='qualifierString', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='qualifierString')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='qualifierString'):
super(qualifierString, self).exportAttributes(outfile, level, already_processed, namespace_, name_='qualifierString')
if self.qualifier is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
outfile.write(' qualifier=%s' % (self.gds_format_string(quote_attrib(self.qualifier).encode(ExternalEncoding), input_name='qualifier'), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
def exportChildren(self, outfile, level, namespace_='ovf:', name_='qualifierString', fromsubclass_=False):
super(qualifierString, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
self.valueOf_ or
super(qualifierString, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='qualifierString'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.qualifier is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
showIndent(outfile, level)
outfile.write('qualifier = "%s",\n' % (self.qualifier,))
super(qualifierString, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(qualifierString, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('qualifier', node)
if value is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
self.qualifier = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
super(qualifierString, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class qualifierString
class qualifierBoolean(cimBoolean):
subclass = None
superclass = cimBoolean
def __init__(self, qualifier=None, valueOf_=None):
super(qualifierBoolean, self).__init__(valueOf_, )
self.qualifier = _cast(None, qualifier)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if qualifierBoolean.subclass:
return qualifierBoolean.subclass(*args_, **kwargs_)
else:
return qualifierBoolean(*args_, **kwargs_)
factory = staticmethod(factory)
def get_qualifier(self): return self.qualifier
def set_qualifier(self, qualifier): self.qualifier = qualifier
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='ovf:', name_='qualifierBoolean', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='qualifierBoolean')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='qualifierBoolean'):
super(qualifierBoolean, self).exportAttributes(outfile, level, already_processed, namespace_, name_='qualifierBoolean')
if self.qualifier is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
outfile.write(' qualifier=%s' % (self.gds_format_string(quote_attrib(self.qualifier).encode(ExternalEncoding), input_name='qualifier'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='qualifierBoolean', fromsubclass_=False):
super(qualifierBoolean, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
self.valueOf_ or
super(qualifierBoolean, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='qualifierBoolean'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.qualifier is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
showIndent(outfile, level)
outfile.write('qualifier = "%s",\n' % (self.qualifier,))
super(qualifierBoolean, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(qualifierBoolean, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('qualifier', node)
if value is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
self.qualifier = value
super(qualifierBoolean, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class qualifierBoolean
class qualifierUInt32(cimUnsignedInt):
subclass = None
superclass = cimUnsignedInt
def __init__(self, qualifier=None, valueOf_=None):
super(qualifierUInt32, self).__init__(valueOf_, )
self.qualifier = _cast(None, qualifier)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if qualifierUInt32.subclass:
return qualifierUInt32.subclass(*args_, **kwargs_)
else:
return qualifierUInt32(*args_, **kwargs_)
factory = staticmethod(factory)
def get_qualifier(self): return self.qualifier
def set_qualifier(self, qualifier): self.qualifier = qualifier
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='ovf:', name_='qualifierUInt32', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='qualifierUInt32')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='qualifierUInt32'):
super(qualifierUInt32, self).exportAttributes(outfile, level, already_processed, namespace_, name_='qualifierUInt32')
if self.qualifier is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
outfile.write(' qualifier=%s' % (self.gds_format_string(quote_attrib(self.qualifier).encode(ExternalEncoding), input_name='qualifier'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='qualifierUInt32', fromsubclass_=False):
super(qualifierUInt32, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
self.valueOf_ or
super(qualifierUInt32, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='qualifierUInt32'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.qualifier is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
showIndent(outfile, level)
outfile.write('qualifier = "%s",\n' % (self.qualifier,))
super(qualifierUInt32, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(qualifierUInt32, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('qualifier', node)
if value is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
self.qualifier = value
super(qualifierUInt32, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class qualifierUInt32
class qualifierSInt64(cimLong):
subclass = None
superclass = cimLong
def __init__(self, qualifier=None, valueOf_=None):
super(qualifierSInt64, self).__init__(valueOf_, )
self.qualifier = _cast(None, qualifier)
self.valueOf_ = valueOf_
def factory(*args_, **kwargs_):
if qualifierSInt64.subclass:
return qualifierSInt64.subclass(*args_, **kwargs_)
else:
return qualifierSInt64(*args_, **kwargs_)
factory = staticmethod(factory)
def get_qualifier(self): return self.qualifier
def set_qualifier(self, qualifier): self.qualifier = qualifier
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def export(self, outfile, level, namespace_='ovf:', name_='qualifierSInt64', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='qualifierSInt64')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='qualifierSInt64'):
super(qualifierSInt64, self).exportAttributes(outfile, level, already_processed, namespace_, name_='qualifierSInt64')
if self.qualifier is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
outfile.write(' qualifier=%s' % (self.gds_format_string(quote_attrib(self.qualifier).encode(ExternalEncoding), input_name='qualifier'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='qualifierSInt64', fromsubclass_=False):
super(qualifierSInt64, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
self.valueOf_ or
super(qualifierSInt64, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='qualifierSInt64'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.qualifier is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
showIndent(outfile, level)
outfile.write('qualifier = "%s",\n' % (self.qualifier,))
super(qualifierSInt64, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(qualifierSInt64, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('qualifier', node)
if value is not None and 'qualifier' not in already_processed:
already_processed.append('qualifier')
self.qualifier = value
super(qualifierSInt64, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class qualifierSInt64
class qualifierSArray(qualifierString):
subclass = None
superclass = qualifierString
def __init__(self, qualifier=None):
super(qualifierSArray, self).__init__(qualifier, )
pass
def factory(*args_, **kwargs_):
if qualifierSArray.subclass:
return qualifierSArray.subclass(*args_, **kwargs_)
else:
return qualifierSArray(*args_, **kwargs_)
factory = staticmethod(factory)
def export(self, outfile, level, namespace_='ovf:', name_='qualifierSArray', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='qualifierSArray')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='qualifierSArray'):
super(qualifierSArray, self).exportAttributes(outfile, level, already_processed, namespace_, name_='qualifierSArray')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='qualifierSArray', fromsubclass_=False):
super(qualifierSArray, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
super(qualifierSArray, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='qualifierSArray'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
super(qualifierSArray, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(qualifierSArray, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
super(qualifierSArray, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(qualifierSArray, self).buildChildren(child_, node, nodeName_, True)
pass
# end class qualifierSArray
class Caption(cimString):
subclass = None
superclass = cimString
def __init__(self, valueOf_=None):
super(Caption, self).__init__(valueOf_, )
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if Caption.subclass:
return Caption.subclass(*args_, **kwargs_)
else:
return Caption(*args_, **kwargs_)
factory = staticmethod(factory)
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='Caption', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='Caption')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='Caption'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
super(Caption, self).exportAttributes(outfile, level, already_processed, namespace_, name_='Caption')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='Caption', fromsubclass_=False):
super(Caption, self).exportChildren(outfile, level, namespace_, name_, True)
pass
def hasContent_(self):
if (
self.valueOf_ or
super(Caption, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='Caption'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
super(Caption, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(Caption, self).exportLiteralChildren(outfile, level, name_)
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
super(Caption, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class Caption
class CIM_VirtualSystemSettingData_Type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, AutomaticRecoveryAction=None, AutomaticShutdownAction=None, AutomaticStartupAction=None, AutomaticStartupActionDelay=None, AutomaticStartupActionSequenceNumber=None, Caption=None, ConfigurationDataRoot=None, ConfigurationFile=None, ConfigurationID=None, CreationTime=None, Description=None, ElementName=None, InstanceID=None, LogDataRoot=None, Notes=None, RecoveryFile=None, SnapshotDataRoot=None, SuspendDataRoot=None, SwapFileDataRoot=None, VirtualSystemIdentifier=None, VirtualSystemType=None, anytypeobjs_=None, extensiontype_=None):
self.AutomaticRecoveryAction = AutomaticRecoveryAction
self.AutomaticShutdownAction = AutomaticShutdownAction
self.AutomaticStartupAction = AutomaticStartupAction
self.AutomaticStartupActionDelay = AutomaticStartupActionDelay
self.AutomaticStartupActionSequenceNumber = AutomaticStartupActionSequenceNumber
self.Caption = Caption
self.ConfigurationDataRoot = ConfigurationDataRoot
self.ConfigurationFile = ConfigurationFile
self.ConfigurationID = ConfigurationID
self.CreationTime = CreationTime
self.Description = Description
self.ElementName = ElementName
self.InstanceID = InstanceID
self.LogDataRoot = LogDataRoot
if Notes is None:
self.Notes = []
else:
self.Notes = Notes
self.RecoveryFile = RecoveryFile
self.SnapshotDataRoot = SnapshotDataRoot
self.SuspendDataRoot = SuspendDataRoot
self.SwapFileDataRoot = SwapFileDataRoot
self.VirtualSystemIdentifier = VirtualSystemIdentifier
self.VirtualSystemType = VirtualSystemType
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.anyAttributes_ = {}
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CIM_VirtualSystemSettingData_Type.subclass:
return CIM_VirtualSystemSettingData_Type.subclass(*args_, **kwargs_)
else:
return CIM_VirtualSystemSettingData_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_AutomaticRecoveryAction(self): return self.AutomaticRecoveryAction
def set_AutomaticRecoveryAction(self, AutomaticRecoveryAction): self.AutomaticRecoveryAction = AutomaticRecoveryAction
def validate_AutomaticRecoveryAction(self, value):
# Validate type AutomaticRecoveryAction, a restriction on xs:unsignedShort.
pass
def get_AutomaticShutdownAction(self): return self.AutomaticShutdownAction
def set_AutomaticShutdownAction(self, AutomaticShutdownAction): self.AutomaticShutdownAction = AutomaticShutdownAction
def validate_AutomaticShutdownAction(self, value):
# Validate type AutomaticShutdownAction, a restriction on xs:unsignedShort.
pass
def get_AutomaticStartupAction(self): return self.AutomaticStartupAction
def set_AutomaticStartupAction(self, AutomaticStartupAction): self.AutomaticStartupAction = AutomaticStartupAction
def validate_AutomaticStartupAction(self, value):
# Validate type AutomaticStartupAction, a restriction on xs:unsignedShort.
pass
def get_AutomaticStartupActionDelay(self): return self.AutomaticStartupActionDelay
def set_AutomaticStartupActionDelay(self, AutomaticStartupActionDelay): self.AutomaticStartupActionDelay = AutomaticStartupActionDelay
def get_AutomaticStartupActionSequenceNumber(self): return self.AutomaticStartupActionSequenceNumber
def set_AutomaticStartupActionSequenceNumber(self, AutomaticStartupActionSequenceNumber): self.AutomaticStartupActionSequenceNumber = AutomaticStartupActionSequenceNumber
def get_Caption(self): return self.Caption
def set_Caption(self, Caption): self.Caption = Caption
def get_ConfigurationDataRoot(self): return self.ConfigurationDataRoot
def set_ConfigurationDataRoot(self, ConfigurationDataRoot): self.ConfigurationDataRoot = ConfigurationDataRoot
def get_ConfigurationFile(self): return self.ConfigurationFile
def set_ConfigurationFile(self, ConfigurationFile): self.ConfigurationFile = ConfigurationFile
def get_ConfigurationID(self): return self.ConfigurationID
def set_ConfigurationID(self, ConfigurationID): self.ConfigurationID = ConfigurationID
def get_CreationTime(self): return self.CreationTime
def set_CreationTime(self, CreationTime): self.CreationTime = CreationTime
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_ElementName(self): return self.ElementName
def set_ElementName(self, ElementName): self.ElementName = ElementName
def get_InstanceID(self): return self.InstanceID
def set_InstanceID(self, InstanceID): self.InstanceID = InstanceID
def get_LogDataRoot(self): return self.LogDataRoot
def set_LogDataRoot(self, LogDataRoot): self.LogDataRoot = LogDataRoot
def get_Notes(self): return self.Notes
def set_Notes(self, Notes): self.Notes = Notes
def add_Notes(self, value): self.Notes.append(value)
def insert_Notes(self, index, value): self.Notes[index] = value
def get_RecoveryFile(self): return self.RecoveryFile
def set_RecoveryFile(self, RecoveryFile): self.RecoveryFile = RecoveryFile
def get_SnapshotDataRoot(self): return self.SnapshotDataRoot
def set_SnapshotDataRoot(self, SnapshotDataRoot): self.SnapshotDataRoot = SnapshotDataRoot
def get_SuspendDataRoot(self): return self.SuspendDataRoot
def set_SuspendDataRoot(self, SuspendDataRoot): self.SuspendDataRoot = SuspendDataRoot
def get_SwapFileDataRoot(self): return self.SwapFileDataRoot
def set_SwapFileDataRoot(self, SwapFileDataRoot): self.SwapFileDataRoot = SwapFileDataRoot
def get_VirtualSystemIdentifier(self): return self.VirtualSystemIdentifier
def set_VirtualSystemIdentifier(self, VirtualSystemIdentifier): self.VirtualSystemIdentifier = VirtualSystemIdentifier
def get_VirtualSystemType(self): return self.VirtualSystemType
def set_VirtualSystemType(self, VirtualSystemType): self.VirtualSystemType = VirtualSystemType
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='ovf:', name_='CIM_VirtualSystemSettingData_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CIM_VirtualSystemSettingData_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='CIM_VirtualSystemSettingData_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='CIM_VirtualSystemSettingData_Type', fromsubclass_=False):
if self.AutomaticRecoveryAction is not None:
showIndent(outfile, level)
outfile.write('<%sAutomaticRecoveryAction>%s</%sAutomaticRecoveryAction>\n' % (namespace_, self.gds_format_integer(self.AutomaticRecoveryAction, input_name='AutomaticRecoveryAction'), namespace_))
if self.AutomaticShutdownAction is not None:
showIndent(outfile, level)
outfile.write('<%sAutomaticShutdownAction>%s</%sAutomaticShutdownAction>\n' % (namespace_, self.gds_format_integer(self.AutomaticShutdownAction, input_name='AutomaticShutdownAction'), namespace_))
if self.AutomaticStartupAction is not None:
showIndent(outfile, level)
outfile.write('<%sAutomaticStartupAction>%s</%sAutomaticStartupAction>\n' % (namespace_, self.gds_format_integer(self.AutomaticStartupAction, input_name='AutomaticStartupAction'), namespace_))
if self.AutomaticStartupActionDelay is not None:
self.AutomaticStartupActionDelay.export(outfile, level, namespace_, name_='AutomaticStartupActionDelay')
if self.AutomaticStartupActionSequenceNumber is not None:
self.AutomaticStartupActionSequenceNumber.export(outfile, level, namespace_, name_='AutomaticStartupActionSequenceNumber')
if self.Caption is not None:
self.Caption.export(outfile, level, namespace_, name_='Caption')
if self.ConfigurationDataRoot is not None:
self.ConfigurationDataRoot.export(outfile, level, namespace_, name_='ConfigurationDataRoot')
if self.ConfigurationFile is not None:
self.ConfigurationFile.export(outfile, level, namespace_, name_='ConfigurationFile')
if self.ConfigurationID is not None:
self.ConfigurationID.export(outfile, level, namespace_, name_='ConfigurationID')
if self.CreationTime is not None:
self.CreationTime.export(outfile, level, namespace_, name_='CreationTime')
if self.Description is not None:
self.Description.export(outfile, level, namespace_, name_='Description')
if self.ElementName is not None:
self.ElementName.export(outfile, level, namespace_, name_='ElementName', )
if self.InstanceID is not None:
self.InstanceID.export(outfile, level, namespace_, name_='InstanceID', )
if self.LogDataRoot is not None:
self.LogDataRoot.export(outfile, level, namespace_, name_='LogDataRoot')
for Notes_ in self.Notes:
Notes_.export(outfile, level, namespace_, name_='Notes')
if self.RecoveryFile is not None:
self.RecoveryFile.export(outfile, level, namespace_, name_='RecoveryFile')
if self.SnapshotDataRoot is not None:
self.SnapshotDataRoot.export(outfile, level, namespace_, name_='SnapshotDataRoot')
if self.SuspendDataRoot is not None:
self.SuspendDataRoot.export(outfile, level, namespace_, name_='SuspendDataRoot')
if self.SwapFileDataRoot is not None:
self.SwapFileDataRoot.export(outfile, level, namespace_, name_='SwapFileDataRoot')
if self.VirtualSystemIdentifier is not None:
self.VirtualSystemIdentifier.export(outfile, level, namespace_, name_='VirtualSystemIdentifier')
if self.VirtualSystemType is not None:
self.VirtualSystemType.export(outfile, level, namespace_, name_='VirtualSystemType')
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.AutomaticRecoveryAction is not None or
self.AutomaticShutdownAction is not None or
self.AutomaticStartupAction is not None or
self.AutomaticStartupActionDelay is not None or
self.AutomaticStartupActionSequenceNumber is not None or
self.Caption is not None or
self.ConfigurationDataRoot is not None or
self.ConfigurationFile is not None or
self.ConfigurationID is not None or
self.CreationTime is not None or
self.Description is not None or
self.ElementName is not None or
self.InstanceID is not None or
self.LogDataRoot is not None or
self.Notes or
self.RecoveryFile is not None or
self.SnapshotDataRoot is not None or
self.SuspendDataRoot is not None or
self.SwapFileDataRoot is not None or
self.VirtualSystemIdentifier is not None or
self.VirtualSystemType is not None or
self.anytypeobjs_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='CIM_VirtualSystemSettingData_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
if self.AutomaticRecoveryAction is not None:
showIndent(outfile, level)
outfile.write('AutomaticRecoveryAction=%d,\n' % self.AutomaticRecoveryAction)
if self.AutomaticShutdownAction is not None:
showIndent(outfile, level)
outfile.write('AutomaticShutdownAction=%d,\n' % self.AutomaticShutdownAction)
if self.AutomaticStartupAction is not None:
showIndent(outfile, level)
outfile.write('AutomaticStartupAction=%d,\n' % self.AutomaticStartupAction)
if self.AutomaticStartupActionDelay is not None:
showIndent(outfile, level)
outfile.write('AutomaticStartupActionDelay=model_.AutomaticStartupActionDelay(\n')
self.AutomaticStartupActionDelay.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.AutomaticStartupActionSequenceNumber is not None:
showIndent(outfile, level)
outfile.write('AutomaticStartupActionSequenceNumber=model_.AutomaticStartupActionSequenceNumber(\n')
self.AutomaticStartupActionSequenceNumber.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Caption is not None:
showIndent(outfile, level)
outfile.write('Caption=model_.Caption(\n')
self.Caption.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ConfigurationDataRoot is not None:
showIndent(outfile, level)
outfile.write('ConfigurationDataRoot=model_.ConfigurationDataRoot(\n')
self.ConfigurationDataRoot.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ConfigurationFile is not None:
showIndent(outfile, level)
outfile.write('ConfigurationFile=model_.ConfigurationFile(\n')
self.ConfigurationFile.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ConfigurationID is not None:
showIndent(outfile, level)
outfile.write('ConfigurationID=model_.ConfigurationID(\n')
self.ConfigurationID.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.CreationTime is not None:
showIndent(outfile, level)
outfile.write('CreationTime=model_.CreationTime(\n')
self.CreationTime.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Description is not None:
showIndent(outfile, level)
outfile.write('Description=model_.Description(\n')
self.Description.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ElementName is not None:
showIndent(outfile, level)
outfile.write('ElementName=model_.ElementName(\n')
self.ElementName.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.InstanceID is not None:
showIndent(outfile, level)
outfile.write('InstanceID=model_.InstanceID(\n')
self.InstanceID.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.LogDataRoot is not None:
showIndent(outfile, level)
outfile.write('LogDataRoot=model_.LogDataRoot(\n')
self.LogDataRoot.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Notes=[\n')
level += 1
for Notes_ in self.Notes:
showIndent(outfile, level)
outfile.write('model_.Notes(\n')
Notes_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.RecoveryFile is not None:
showIndent(outfile, level)
outfile.write('RecoveryFile=model_.RecoveryFile(\n')
self.RecoveryFile.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.SnapshotDataRoot is not None:
showIndent(outfile, level)
outfile.write('SnapshotDataRoot=model_.SnapshotDataRoot(\n')
self.SnapshotDataRoot.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.SuspendDataRoot is not None:
showIndent(outfile, level)
outfile.write('SuspendDataRoot=model_.SuspendDataRoot(\n')
self.SuspendDataRoot.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.SwapFileDataRoot is not None:
showIndent(outfile, level)
outfile.write('SwapFileDataRoot=model_.SwapFileDataRoot(\n')
self.SwapFileDataRoot.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.VirtualSystemIdentifier is not None:
showIndent(outfile, level)
outfile.write('VirtualSystemIdentifier=model_.VirtualSystemIdentifier(\n')
self.VirtualSystemIdentifier.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.VirtualSystemType is not None:
showIndent(outfile, level)
outfile.write('VirtualSystemType=model_.VirtualSystemType(\n')
self.VirtualSystemType.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'AutomaticRecoveryAction':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'AutomaticRecoveryAction')
self.AutomaticRecoveryAction = ival_
self.validate_AutomaticRecoveryAction(self.AutomaticRecoveryAction) # validate type AutomaticRecoveryAction
elif nodeName_ == 'AutomaticShutdownAction':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'AutomaticShutdownAction')
self.AutomaticShutdownAction = ival_
self.validate_AutomaticShutdownAction(self.AutomaticShutdownAction) # validate type AutomaticShutdownAction
elif nodeName_ == 'AutomaticStartupAction':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'AutomaticStartupAction')
self.AutomaticStartupAction = ival_
self.validate_AutomaticStartupAction(self.AutomaticStartupAction) # validate type AutomaticStartupAction
elif nodeName_ == 'AutomaticStartupActionDelay':
obj_ = cimDateTime.factory()
obj_.build(child_)
self.set_AutomaticStartupActionDelay(obj_)
elif nodeName_ == 'AutomaticStartupActionSequenceNumber':
obj_ = cimUnsignedShort.factory()
obj_.build(child_)
self.set_AutomaticStartupActionSequenceNumber(obj_)
elif nodeName_ == 'Caption':
obj_ = Caption.factory()
obj_.build(child_)
self.set_Caption(obj_)
elif nodeName_ == 'ConfigurationDataRoot':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_ConfigurationDataRoot(obj_)
elif nodeName_ == 'ConfigurationFile':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_ConfigurationFile(obj_)
elif nodeName_ == 'ConfigurationID':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_ConfigurationID(obj_)
elif nodeName_ == 'CreationTime':
obj_ = cimDateTime.factory()
obj_.build(child_)
self.set_CreationTime(obj_)
elif nodeName_ == 'Description':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_Description(obj_)
elif nodeName_ == 'ElementName':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_ElementName(obj_)
elif nodeName_ == 'InstanceID':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_InstanceID(obj_)
elif nodeName_ == 'LogDataRoot':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_LogDataRoot(obj_)
elif nodeName_ == 'Notes':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Notes.append(obj_)
elif nodeName_ == 'RecoveryFile':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_RecoveryFile(obj_)
elif nodeName_ == 'SnapshotDataRoot':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_SnapshotDataRoot(obj_)
elif nodeName_ == 'SuspendDataRoot':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_SuspendDataRoot(obj_)
elif nodeName_ == 'SwapFileDataRoot':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_SwapFileDataRoot(obj_)
elif nodeName_ == 'VirtualSystemIdentifier':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_VirtualSystemIdentifier(obj_)
elif nodeName_ == 'VirtualSystemType':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_VirtualSystemType(obj_)
else:
obj_ = self.gds_build_any(child_, 'CIM_VirtualSystemSettingData_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class CIM_VirtualSystemSettingData_Type
class CIM_ResourceAllocationSettingData_Type(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, Address=None, AddressOnParent=None, AllocationUnits=None, AutomaticAllocation=None, AutomaticDeallocation=None, Caption=None, Connection=None, ConsumerVisibility=None, Description=None, ElementName=None, HostResource=None, InstanceID=None, Limit=None, MappingBehavior=None, OtherResourceType=None, Parent=None, PoolID=None, Reservation=None, ResourceSubType=None, ResourceType=None, VirtualQuantity=None, VirtualQuantityUnits=None, Weight=None, anytypeobjs_=None, extensiontype_=None):
self.Address = Address
self.AddressOnParent = AddressOnParent
self.AllocationUnits = AllocationUnits
self.AutomaticAllocation = AutomaticAllocation
self.AutomaticDeallocation = AutomaticDeallocation
self.Caption = Caption
if Connection is None:
self.Connection = []
else:
self.Connection = Connection
self.ConsumerVisibility = ConsumerVisibility
self.Description = Description
self.ElementName = ElementName
if HostResource is None:
self.HostResource = []
else:
self.HostResource = HostResource
self.InstanceID = InstanceID
self.Limit = Limit
self.MappingBehavior = MappingBehavior
self.OtherResourceType = OtherResourceType
self.Parent = Parent
self.PoolID = PoolID
self.Reservation = Reservation
self.ResourceSubType = ResourceSubType
self.ResourceType = ResourceType
self.VirtualQuantity = VirtualQuantity
self.VirtualQuantityUnits = VirtualQuantityUnits
self.Weight = Weight
if anytypeobjs_ is None:
self.anytypeobjs_ = []
else:
self.anytypeobjs_ = anytypeobjs_
self.anyAttributes_ = {}
self.extensiontype_ = extensiontype_
def factory(*args_, **kwargs_):
if CIM_ResourceAllocationSettingData_Type.subclass:
return CIM_ResourceAllocationSettingData_Type.subclass(*args_, **kwargs_)
else:
return CIM_ResourceAllocationSettingData_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Address(self): return self.Address
def set_Address(self, Address): self.Address = Address
def get_AddressOnParent(self): return self.AddressOnParent
def set_AddressOnParent(self, AddressOnParent): self.AddressOnParent = AddressOnParent
def get_AllocationUnits(self): return self.AllocationUnits
def set_AllocationUnits(self, AllocationUnits): self.AllocationUnits = AllocationUnits
def get_AutomaticAllocation(self): return self.AutomaticAllocation
def set_AutomaticAllocation(self, AutomaticAllocation): self.AutomaticAllocation = AutomaticAllocation
def get_AutomaticDeallocation(self): return self.AutomaticDeallocation
def set_AutomaticDeallocation(self, AutomaticDeallocation): self.AutomaticDeallocation = AutomaticDeallocation
def get_Caption(self): return self.Caption
def set_Caption(self, Caption): self.Caption = Caption
def get_Connection(self): return self.Connection
def set_Connection(self, Connection): self.Connection = Connection
def add_Connection(self, value): self.Connection.append(value)
def insert_Connection(self, index, value): self.Connection[index] = value
def get_ConsumerVisibility(self): return self.ConsumerVisibility
def set_ConsumerVisibility(self, ConsumerVisibility): self.ConsumerVisibility = ConsumerVisibility
def validate_ConsumerVisibility(self, value):
# Validate type ConsumerVisibility, a restriction on xs:unsignedShort.
pass
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_ElementName(self): return self.ElementName
def set_ElementName(self, ElementName): self.ElementName = ElementName
def get_HostResource(self): return self.HostResource
def set_HostResource(self, HostResource): self.HostResource = HostResource
def add_HostResource(self, value): self.HostResource.append(value)
def insert_HostResource(self, index, value): self.HostResource[index] = value
def get_InstanceID(self): return self.InstanceID
def set_InstanceID(self, InstanceID): self.InstanceID = InstanceID
def get_Limit(self): return self.Limit
def set_Limit(self, Limit): self.Limit = Limit
def get_MappingBehavior(self): return self.MappingBehavior
def set_MappingBehavior(self, MappingBehavior): self.MappingBehavior = MappingBehavior
def validate_MappingBehavior(self, value):
# Validate type MappingBehavior, a restriction on xs:unsignedShort.
pass
def get_OtherResourceType(self): return self.OtherResourceType
def set_OtherResourceType(self, OtherResourceType): self.OtherResourceType = OtherResourceType
def get_Parent(self): return self.Parent
def set_Parent(self, Parent): self.Parent = Parent
def get_PoolID(self): return self.PoolID
def set_PoolID(self, PoolID): self.PoolID = PoolID
def get_Reservation(self): return self.Reservation
def set_Reservation(self, Reservation): self.Reservation = Reservation
def get_ResourceSubType(self): return self.ResourceSubType
def set_ResourceSubType(self, ResourceSubType): self.ResourceSubType = ResourceSubType
def get_ResourceType(self): return self.ResourceType
def set_ResourceType(self, ResourceType): self.ResourceType = ResourceType
def validate_ResourceType(self, value):
# Validate type ResourceType, a restriction on xs:unsignedShort.
pass
def get_VirtualQuantity(self): return self.VirtualQuantity
def set_VirtualQuantity(self, VirtualQuantity): self.VirtualQuantity = VirtualQuantity
def get_VirtualQuantityUnits(self): return self.VirtualQuantityUnits
def set_VirtualQuantityUnits(self, VirtualQuantityUnits): self.VirtualQuantityUnits = VirtualQuantityUnits
def get_Weight(self): return self.Weight
def set_Weight(self, Weight): self.Weight = Weight
def get_anytypeobjs_(self): return self.anytypeobjs_
def set_anytypeobjs_(self, anytypeobjs_): self.anytypeobjs_ = anytypeobjs_
def add_anytypeobjs_(self, value): self.anytypeobjs_.append(value)
def insert_anytypeobjs_(self, index, value): self._anytypeobjs_[index] = value
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def get_extensiontype_(self): return self.extensiontype_
def set_extensiontype_(self, extensiontype_): self.extensiontype_ = extensiontype_
def export(self, outfile, level, namespace_='ovf:', name_='CIM_ResourceAllocationSettingData_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='CIM_ResourceAllocationSettingData_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='CIM_ResourceAllocationSettingData_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.extensiontype_ is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
outfile.write(' xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"')
outfile.write(' xsi:type="%s"' % self.extensiontype_)
pass
def exportChildren(self, outfile, level, namespace_='ovf:', name_='CIM_ResourceAllocationSettingData_Type', fromsubclass_=False):
if self.Address is not None:
self.Address.export(outfile, level, namespace_, name_='Address')
if self.AddressOnParent is not None:
self.AddressOnParent.export(outfile, level, namespace_, name_='AddressOnParent')
if self.AllocationUnits is not None:
self.AllocationUnits.export(outfile, level, namespace_, name_='AllocationUnits')
if self.AutomaticAllocation is not None:
self.AutomaticAllocation.export(outfile, level, namespace_, name_='AutomaticAllocation')
if self.AutomaticDeallocation is not None:
self.AutomaticDeallocation.export(outfile, level, namespace_, name_='AutomaticDeallocation')
if self.Caption is not None:
self.Caption.export(outfile, level, namespace_, name_='Caption')
for Connection_ in self.Connection:
Connection_.export(outfile, level, namespace_, name_='Connection')
if self.ConsumerVisibility is not None:
showIndent(outfile, level)
outfile.write('<%sConsumerVisibility>%s</%sConsumerVisibility>\n' % (namespace_, self.gds_format_integer(self.ConsumerVisibility, input_name='ConsumerVisibility'), namespace_))
if self.Description is not None:
self.Description.export(outfile, level, namespace_, name_='Description')
if self.ElementName is not None:
self.ElementName.export(outfile, level, namespace_, name_='ElementName', )
for HostResource_ in self.HostResource:
HostResource_.export(outfile, level, namespace_, name_='HostResource')
if self.InstanceID is not None:
self.InstanceID.export(outfile, level, namespace_, name_='InstanceID', )
if self.Limit is not None:
self.Limit.export(outfile, level, namespace_, name_='Limit')
if self.MappingBehavior is not None:
showIndent(outfile, level)
outfile.write('<%sMappingBehavior>%s</%sMappingBehavior>\n' % (namespace_, self.gds_format_integer(self.MappingBehavior, input_name='MappingBehavior'), namespace_))
if self.OtherResourceType is not None:
self.OtherResourceType.export(outfile, level, namespace_, name_='OtherResourceType')
if self.Parent is not None:
self.Parent.export(outfile, level, namespace_, name_='Parent')
if self.PoolID is not None:
self.PoolID.export(outfile, level, namespace_, name_='PoolID')
if self.Reservation is not None:
self.Reservation.export(outfile, level, namespace_, name_='Reservation')
if self.ResourceSubType is not None:
self.ResourceSubType.export(outfile, level, namespace_, name_='ResourceSubType')
if self.ResourceType is not None:
showIndent(outfile, level)
outfile.write('<%sResourceType>%s</%sResourceType>\n' % (namespace_, self.gds_format_integer(self.ResourceType, input_name='ResourceType'), namespace_))
if self.VirtualQuantity is not None:
self.VirtualQuantity.export(outfile, level, namespace_, name_='VirtualQuantity')
if self.VirtualQuantityUnits is not None:
self.VirtualQuantityUnits.export(outfile, level, namespace_, name_='VirtualQuantityUnits')
if self.Weight is not None:
self.Weight.export(outfile, level, namespace_, name_='Weight')
for obj_ in self.anytypeobjs_:
obj_.export(outfile, level, namespace_)
def hasContent_(self):
if (
self.Address is not None or
self.AddressOnParent is not None or
self.AllocationUnits is not None or
self.AutomaticAllocation is not None or
self.AutomaticDeallocation is not None or
self.Caption is not None or
self.Connection or
self.ConsumerVisibility is not None or
self.Description is not None or
self.ElementName is not None or
self.HostResource or
self.InstanceID is not None or
self.Limit is not None or
self.MappingBehavior is not None or
self.OtherResourceType is not None or
self.Parent is not None or
self.PoolID is not None or
self.Reservation is not None or
self.ResourceSubType is not None or
self.ResourceType is not None or
self.VirtualQuantity is not None or
self.VirtualQuantityUnits is not None or
self.Weight is not None or
self.anytypeobjs_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='CIM_ResourceAllocationSettingData_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
if self.Address is not None:
showIndent(outfile, level)
outfile.write('Address=model_.Address(\n')
self.Address.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.AddressOnParent is not None:
showIndent(outfile, level)
outfile.write('AddressOnParent=model_.AddressOnParent(\n')
self.AddressOnParent.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.AllocationUnits is not None:
showIndent(outfile, level)
outfile.write('AllocationUnits=model_.AllocationUnits(\n')
self.AllocationUnits.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.AutomaticAllocation is not None:
showIndent(outfile, level)
outfile.write('AutomaticAllocation=model_.AutomaticAllocation(\n')
self.AutomaticAllocation.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.AutomaticDeallocation is not None:
showIndent(outfile, level)
outfile.write('AutomaticDeallocation=model_.AutomaticDeallocation(\n')
self.AutomaticDeallocation.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Caption is not None:
showIndent(outfile, level)
outfile.write('Caption=model_.Caption(\n')
self.Caption.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Connection=[\n')
level += 1
for Connection_ in self.Connection:
showIndent(outfile, level)
outfile.write('model_.Connection(\n')
Connection_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.ConsumerVisibility is not None:
showIndent(outfile, level)
outfile.write('ConsumerVisibility=%d,\n' % self.ConsumerVisibility)
if self.Description is not None:
showIndent(outfile, level)
outfile.write('Description=model_.Description(\n')
self.Description.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ElementName is not None:
showIndent(outfile, level)
outfile.write('ElementName=model_.ElementName(\n')
self.ElementName.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('HostResource=[\n')
level += 1
for HostResource_ in self.HostResource:
showIndent(outfile, level)
outfile.write('model_.HostResource(\n')
HostResource_.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
if self.InstanceID is not None:
showIndent(outfile, level)
outfile.write('InstanceID=model_.InstanceID(\n')
self.InstanceID.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Limit is not None:
showIndent(outfile, level)
outfile.write('Limit=model_.Limit(\n')
self.Limit.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.MappingBehavior is not None:
showIndent(outfile, level)
outfile.write('MappingBehavior=%d,\n' % self.MappingBehavior)
if self.OtherResourceType is not None:
showIndent(outfile, level)
outfile.write('OtherResourceType=model_.OtherResourceType(\n')
self.OtherResourceType.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Parent is not None:
showIndent(outfile, level)
outfile.write('Parent=model_.Parent(\n')
self.Parent.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.PoolID is not None:
showIndent(outfile, level)
outfile.write('PoolID=model_.PoolID(\n')
self.PoolID.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Reservation is not None:
showIndent(outfile, level)
outfile.write('Reservation=model_.Reservation(\n')
self.Reservation.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ResourceSubType is not None:
showIndent(outfile, level)
outfile.write('ResourceSubType=model_.ResourceSubType(\n')
self.ResourceSubType.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.ResourceType is not None:
showIndent(outfile, level)
outfile.write('ResourceType=%d,\n' % self.ResourceType)
if self.VirtualQuantity is not None:
showIndent(outfile, level)
outfile.write('VirtualQuantity=model_.VirtualQuantity(\n')
self.VirtualQuantity.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.VirtualQuantityUnits is not None:
showIndent(outfile, level)
outfile.write('VirtualQuantityUnits=model_.VirtualQuantityUnits(\n')
self.VirtualQuantityUnits.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
if self.Weight is not None:
showIndent(outfile, level)
outfile.write('Weight=model_.Weight(\n')
self.Weight.exportLiteral(outfile, level)
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('anytypeobjs_=[\n')
level += 1
for anytypeobjs_ in self.anytypeobjs_:
anytypeobjs_.exportLiteral(outfile, level)
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
value = find_attr_value_('xsi:type', node)
if value is not None and 'xsi:type' not in already_processed:
already_processed.append('xsi:type')
self.extensiontype_ = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Address':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_Address(obj_)
elif nodeName_ == 'AddressOnParent':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_AddressOnParent(obj_)
elif nodeName_ == 'AllocationUnits':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_AllocationUnits(obj_)
elif nodeName_ == 'AutomaticAllocation':
class_obj_ = self.get_class_obj_(child_, cimBoolean)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_AutomaticAllocation(obj_)
elif nodeName_ == 'AutomaticDeallocation':
class_obj_ = self.get_class_obj_(child_, cimBoolean)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_AutomaticDeallocation(obj_)
elif nodeName_ == 'Caption':
obj_ = Caption.factory()
obj_.build(child_)
self.set_Caption(obj_)
elif nodeName_ == 'Connection':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.Connection.append(obj_)
elif nodeName_ == 'ConsumerVisibility':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'ConsumerVisibility')
self.ConsumerVisibility = ival_
self.validate_ConsumerVisibility(self.ConsumerVisibility) # validate type ConsumerVisibility
elif nodeName_ == 'Description':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_Description(obj_)
elif nodeName_ == 'ElementName':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_ElementName(obj_)
elif nodeName_ == 'HostResource':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.HostResource.append(obj_)
elif nodeName_ == 'InstanceID':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_InstanceID(obj_)
elif nodeName_ == 'Limit':
obj_ = cimUnsignedLong.factory()
obj_.build(child_)
self.set_Limit(obj_)
elif nodeName_ == 'MappingBehavior':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'MappingBehavior')
self.MappingBehavior = ival_
self.validate_MappingBehavior(self.MappingBehavior) # validate type MappingBehavior
elif nodeName_ == 'OtherResourceType':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_OtherResourceType(obj_)
elif nodeName_ == 'Parent':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_Parent(obj_)
elif nodeName_ == 'PoolID':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_PoolID(obj_)
elif nodeName_ == 'Reservation':
obj_ = cimUnsignedLong.factory()
obj_.build(child_)
self.set_Reservation(obj_)
elif nodeName_ == 'ResourceSubType':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_ResourceSubType(obj_)
elif nodeName_ == 'ResourceType':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError), exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'ResourceType')
self.ResourceType = ival_
self.validate_ResourceType(self.ResourceType) # validate type ResourceType
elif nodeName_ == 'VirtualQuantity':
obj_ = cimUnsignedLong.factory()
obj_.build(child_)
self.set_VirtualQuantity(obj_)
elif nodeName_ == 'VirtualQuantityUnits':
class_obj_ = self.get_class_obj_(child_, cimString)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_VirtualQuantityUnits(obj_)
elif nodeName_ == 'Weight':
class_obj_ = self.get_class_obj_(child_, cimUnsignedInt)
obj_ = class_obj_.factory()
obj_.build(child_)
self.set_Weight(obj_)
else:
obj_ = self.gds_build_any(child_, 'CIM_ResourceAllocationSettingData_Type')
if obj_ is not None:
self.add_anytypeobjs_(obj_)
# end class CIM_ResourceAllocationSettingData_Type
class MsgType(GeneratedsSuper):
"""String element valueString element identifier"""
subclass = None
superclass = None
def __init__(self, msgid=None, valueOf_=None):
self.msgid = _cast(None, msgid)
self.valueOf_ = valueOf_
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if MsgType.subclass:
return MsgType.subclass(*args_, **kwargs_)
else:
return MsgType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_msgid(self): return self.msgid
def set_msgid(self, msgid): self.msgid = msgid
def get_valueOf_(self): return self.valueOf_
def set_valueOf_(self, valueOf_): self.valueOf_ = valueOf_
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='MsgType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='MsgType')
if self.hasContent_():
outfile.write('>')
outfile.write(str(self.valueOf_).encode(ExternalEncoding))
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='MsgType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.msgid is not None and 'msgid' not in already_processed:
already_processed.append('msgid')
outfile.write(' msgid=%s' % (self.gds_format_string(quote_attrib(self.msgid).encode(ExternalEncoding), input_name='msgid'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='MsgType', fromsubclass_=False):
pass
def hasContent_(self):
if (
self.valueOf_
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='MsgType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
showIndent(outfile, level)
outfile.write('valueOf_ = """%s""",\n' % (self.valueOf_,))
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.msgid is not None and 'msgid' not in already_processed:
already_processed.append('msgid')
showIndent(outfile, level)
outfile.write('msgid = "%s",\n' % (self.msgid,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
self.valueOf_ = get_all_text_(node)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('msgid', node)
if value is not None and 'msgid' not in already_processed:
already_processed.append('msgid')
self.msgid = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class MsgType
class IconType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, mimeType=None, width=None, fileRef=None, height=None):
self.mimeType = _cast(None, mimeType)
self.width = _cast(int, width)
self.fileRef = _cast(None, fileRef)
self.height = _cast(int, height)
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if IconType.subclass:
return IconType.subclass(*args_, **kwargs_)
else:
return IconType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_mimeType(self): return self.mimeType
def set_mimeType(self, mimeType): self.mimeType = mimeType
def get_width(self): return self.width
def set_width(self, width): self.width = width
def get_fileRef(self): return self.fileRef
def set_fileRef(self, fileRef): self.fileRef = fileRef
def get_height(self): return self.height
def set_height(self, height): self.height = height
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='IconType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='IconType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='IconType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.mimeType is not None and 'mimeType' not in already_processed:
already_processed.append('mimeType')
outfile.write(' mimeType=%s' % (self.gds_format_string(quote_attrib(self.mimeType).encode(ExternalEncoding), input_name='mimeType'), ))
if self.width is not None and 'width' not in already_processed:
already_processed.append('width')
outfile.write(' width="%s"' % self.gds_format_integer(self.width, input_name='width'))
if self.fileRef is not None and 'fileRef' not in already_processed:
already_processed.append('fileRef')
outfile.write(' fileRef=%s' % (self.gds_format_string(quote_attrib(self.fileRef).encode(ExternalEncoding), input_name='fileRef'), ))
if self.height is not None and 'height' not in already_processed:
already_processed.append('height')
outfile.write(' height="%s"' % self.gds_format_integer(self.height, input_name='height'))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='IconType', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='IconType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.mimeType is not None and 'mimeType' not in already_processed:
already_processed.append('mimeType')
showIndent(outfile, level)
outfile.write('mimeType = "%s",\n' % (self.mimeType,))
if self.width is not None and 'width' not in already_processed:
already_processed.append('width')
showIndent(outfile, level)
outfile.write('width = %d,\n' % (self.width,))
if self.fileRef is not None and 'fileRef' not in already_processed:
already_processed.append('fileRef')
showIndent(outfile, level)
outfile.write('fileRef = "%s",\n' % (self.fileRef,))
if self.height is not None and 'height' not in already_processed:
already_processed.append('height')
showIndent(outfile, level)
outfile.write('height = %d,\n' % (self.height,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('mimeType', node)
if value is not None and 'mimeType' not in already_processed:
already_processed.append('mimeType')
self.mimeType = value
value = find_attr_value_('width', node)
if value is not None and 'width' not in already_processed:
already_processed.append('width')
try:
self.width = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('fileRef', node)
if value is not None and 'fileRef' not in already_processed:
already_processed.append('fileRef')
self.fileRef = value
value = find_attr_value_('height', node)
if value is not None and 'height' not in already_processed:
already_processed.append('height')
try:
self.height = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class IconType
class PropertyType(GeneratedsSuper):
"""Property identifierProperty typeA comma-separated set of type
qualifiersDetermines whether the property value is configurable
during installationDefault value for propertyDetermines whether
the property value should be obscured during deployment"""
subclass = None
superclass = None
def __init__(self, userConfigurable=False, value='', key=None, password=False, type_=None, qualifiers=None, Label=None, Description=None, Value=None):
self.userConfigurable = _cast(bool, userConfigurable)
self.value = _cast(None, value)
self.key = _cast(None, key)
self.password = _cast(bool, password)
self.type_ = _cast(None, type_)
self.qualifiers = _cast(None, qualifiers)
self.Label = Label
self.Description = Description
if Value is None:
self.Value = []
else:
self.Value = Value
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if PropertyType.subclass:
return PropertyType.subclass(*args_, **kwargs_)
else:
return PropertyType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Label(self): return self.Label
def set_Label(self, Label): self.Label = Label
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_Value(self): return self.Value
def set_Value(self, Value): self.Value = Value
def add_Value(self, value): self.Value.append(value)
def insert_Value(self, index, value): self.Value[index] = value
def get_userConfigurable(self): return self.userConfigurable
def set_userConfigurable(self, userConfigurable): self.userConfigurable = userConfigurable
def get_value(self): return self.value
def set_value(self, value): self.value = value
def get_key(self): return self.key
def set_key(self, key): self.key = key
def get_password(self): return self.password
def set_password(self, password): self.password = password
def get_type(self): return self.type_
def set_type(self, type_): self.type_ = type_
def get_qualifiers(self): return self.qualifiers
def set_qualifiers(self, qualifiers): self.qualifiers = qualifiers
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='PropertyType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PropertyType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='PropertyType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.userConfigurable is not None and 'userConfigurable' not in already_processed:
already_processed.append('userConfigurable')
outfile.write(' userConfigurable="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.userConfigurable)), input_name='userConfigurable'))
if self.value is not None and 'value' not in already_processed:
already_processed.append('value')
outfile.write(' value=%s' % (self.gds_format_string(quote_attrib(self.value).encode(ExternalEncoding), input_name='value'), ))
if self.key is not None and 'key' not in already_processed:
already_processed.append('key')
outfile.write(' key=%s' % (self.gds_format_string(quote_attrib(self.key).encode(ExternalEncoding), input_name='key'), ))
if self.password is not None and 'password' not in already_processed:
already_processed.append('password')
outfile.write(' password="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.password)), input_name='password'))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
outfile.write(' type=%s' % (self.gds_format_string(quote_attrib(self.type_).encode(ExternalEncoding), input_name='type'), ))
if self.qualifiers is not None and 'qualifiers' not in already_processed:
already_processed.append('qualifiers')
outfile.write(' qualifiers=%s' % (self.gds_format_string(quote_attrib(self.qualifiers).encode(ExternalEncoding), input_name='qualifiers'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='PropertyType', fromsubclass_=False):
if self.Label is not None:
self.Label.export(outfile, level, namespace_, name_='Label')
if self.Description is not None:
self.Description.export(outfile, level, namespace_, name_='Description')
for Value_ in self.Value:
Value_.export(outfile, level, namespace_, name_='Value')
def hasContent_(self):
if (
self.Label is not None or
self.Description is not None or
self.Value
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='PropertyType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.userConfigurable is not None and 'userConfigurable' not in already_processed:
already_processed.append('userConfigurable')
showIndent(outfile, level)
outfile.write('userConfigurable = %s,\n' % (self.userConfigurable,))
if self.value is not None and 'value' not in already_processed:
already_processed.append('value')
showIndent(outfile, level)
outfile.write('value = "%s",\n' % (self.value,))
if self.key is not None and 'key' not in already_processed:
already_processed.append('key')
showIndent(outfile, level)
outfile.write('key = "%s",\n' % (self.key,))
if self.password is not None and 'password' not in already_processed:
already_processed.append('password')
showIndent(outfile, level)
outfile.write('password = %s,\n' % (self.password,))
if self.type_ is not None and 'type_' not in already_processed:
already_processed.append('type_')
showIndent(outfile, level)
outfile.write('type_ = "%s",\n' % (self.type_,))
if self.qualifiers is not None and 'qualifiers' not in already_processed:
already_processed.append('qualifiers')
showIndent(outfile, level)
outfile.write('qualifiers = "%s",\n' % (self.qualifiers,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
if self.Label is not None:
showIndent(outfile, level)
outfile.write('Label=model_.Msg_Type(\n')
self.Label.exportLiteral(outfile, level, name_='Label')
showIndent(outfile, level)
outfile.write('),\n')
if self.Description is not None:
showIndent(outfile, level)
outfile.write('Description=model_.Msg_Type(\n')
self.Description.exportLiteral(outfile, level, name_='Description')
showIndent(outfile, level)
outfile.write('),\n')
showIndent(outfile, level)
outfile.write('Value=[\n')
level += 1
for Value_ in self.Value:
showIndent(outfile, level)
outfile.write('model_.PropertyConfigurationValue_Type(\n')
Value_.exportLiteral(outfile, level, name_='PropertyConfigurationValue_Type')
showIndent(outfile, level)
outfile.write('),\n')
level -= 1
showIndent(outfile, level)
outfile.write('],\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('userConfigurable', node)
if value is not None and 'userConfigurable' not in already_processed:
already_processed.append('userConfigurable')
if value in ('true', '1'):
self.userConfigurable = True
elif value in ('false', '0'):
self.userConfigurable = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('value', node)
if value is not None and 'value' not in already_processed:
already_processed.append('value')
self.value = value
value = find_attr_value_('key', node)
if value is not None and 'key' not in already_processed:
already_processed.append('key')
self.key = value
value = find_attr_value_('password', node)
if value is not None and 'password' not in already_processed:
already_processed.append('password')
if value in ('true', '1'):
self.password = True
elif value in ('false', '0'):
self.password = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('type', node)
if value is not None and 'type' not in already_processed:
already_processed.append('type')
self.type_ = value
value = find_attr_value_('qualifiers', node)
if value is not None and 'qualifiers' not in already_processed:
already_processed.append('qualifiers')
self.qualifiers = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Label':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Label(obj_)
elif nodeName_ == 'Description':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Description(obj_)
elif nodeName_ == 'Value':
obj_ = PropertyConfigurationValue_Type.factory()
obj_.build(child_)
self.Value.append(obj_)
# end class PropertyType
class NetworkType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, name=None, Description=None):
self.name = _cast(None, name)
self.Description = Description
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if NetworkType.subclass:
return NetworkType.subclass(*args_, **kwargs_)
else:
return NetworkType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_name(self): return self.name
def set_name(self, name): self.name = name
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='NetworkType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='NetworkType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='NetworkType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
outfile.write(' name=%s' % (self.gds_format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='NetworkType', fromsubclass_=False):
if self.Description is not None:
self.Description.export(outfile, level, namespace_, name_='Description')
def hasContent_(self):
if (
self.Description is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='NetworkType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.name is not None and 'name' not in already_processed:
already_processed.append('name')
showIndent(outfile, level)
outfile.write('name = "%s",\n' % (self.name,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
if self.Description is not None:
showIndent(outfile, level)
outfile.write('Description=model_.Msg_Type(\n')
self.Description.exportLiteral(outfile, level, name_='Description')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('name', node)
if value is not None and 'name' not in already_processed:
already_processed.append('name')
self.name = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Description':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Description(obj_)
# end class NetworkType
class ItemType(GeneratedsSuper):
"""Unique identifier of the content (within a VirtualSystemCollection)
Startup order. Entities are started up starting with lower-
numbers first, starting from 0. Items with same order identifier
may be started up concurrently or in any order. The order is
reversed for shutdown.Delay in seconds to wait for power on to
completeResumes power-on sequence if guest software reports
okDelay in seconds to wait for power off to completeStart action
to use, valid values are: 'powerOn', 'none' Stop action to use,
valid values are: ''powerOff' , 'guestShutdown', 'none'"""
subclass = None
superclass = None
def __init__(self, stopDelay=0, order=None, startAction='powerOn', startDelay=0, waitingForGuest=False, stopAction='powerOff', id=None):
self.stopDelay = _cast(int, stopDelay)
self.order = _cast(int, order)
self.startAction = _cast(None, startAction)
self.startDelay = _cast(int, startDelay)
self.waitingForGuest = _cast(bool, waitingForGuest)
self.stopAction = _cast(None, stopAction)
self.id = _cast(None, id)
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if ItemType.subclass:
return ItemType.subclass(*args_, **kwargs_)
else:
return ItemType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_stopDelay(self): return self.stopDelay
def set_stopDelay(self, stopDelay): self.stopDelay = stopDelay
def get_order(self): return self.order
def set_order(self, order): self.order = order
def get_startAction(self): return self.startAction
def set_startAction(self, startAction): self.startAction = startAction
def get_startDelay(self): return self.startDelay
def set_startDelay(self, startDelay): self.startDelay = startDelay
def get_waitingForGuest(self): return self.waitingForGuest
def set_waitingForGuest(self, waitingForGuest): self.waitingForGuest = waitingForGuest
def get_stopAction(self): return self.stopAction
def set_stopAction(self, stopAction): self.stopAction = stopAction
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='ItemType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ItemType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='ItemType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.stopDelay is not None and 'stopDelay' not in already_processed:
already_processed.append('stopDelay')
outfile.write(' stopDelay="%s"' % self.gds_format_integer(self.stopDelay, input_name='stopDelay'))
if self.order is not None and 'order' not in already_processed:
already_processed.append('order')
outfile.write(' order="%s"' % self.gds_format_integer(self.order, input_name='order'))
if self.startAction is not None and 'startAction' not in already_processed:
already_processed.append('startAction')
outfile.write(' startAction=%s' % (self.gds_format_string(quote_attrib(self.startAction).encode(ExternalEncoding), input_name='startAction'), ))
if self.startDelay is not None and 'startDelay' not in already_processed:
already_processed.append('startDelay')
outfile.write(' startDelay="%s"' % self.gds_format_integer(self.startDelay, input_name='startDelay'))
if self.waitingForGuest is not None and 'waitingForGuest' not in already_processed:
already_processed.append('waitingForGuest')
outfile.write(' waitingForGuest="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.waitingForGuest)), input_name='waitingForGuest'))
if self.stopAction is not None and 'stopAction' not in already_processed:
already_processed.append('stopAction')
outfile.write(' stopAction=%s' % (self.gds_format_string(quote_attrib(self.stopAction).encode(ExternalEncoding), input_name='stopAction'), ))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='ItemType', fromsubclass_=False):
pass
def hasContent_(self):
if (
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ItemType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.stopDelay is not None and 'stopDelay' not in already_processed:
already_processed.append('stopDelay')
showIndent(outfile, level)
outfile.write('stopDelay = %d,\n' % (self.stopDelay,))
if self.order is not None and 'order' not in already_processed:
already_processed.append('order')
showIndent(outfile, level)
outfile.write('order = %d,\n' % (self.order,))
if self.startAction is not None and 'startAction' not in already_processed:
already_processed.append('startAction')
showIndent(outfile, level)
outfile.write('startAction = "%s",\n' % (self.startAction,))
if self.startDelay is not None and 'startDelay' not in already_processed:
already_processed.append('startDelay')
showIndent(outfile, level)
outfile.write('startDelay = %d,\n' % (self.startDelay,))
if self.waitingForGuest is not None and 'waitingForGuest' not in already_processed:
already_processed.append('waitingForGuest')
showIndent(outfile, level)
outfile.write('waitingForGuest = %s,\n' % (self.waitingForGuest,))
if self.stopAction is not None and 'stopAction' not in already_processed:
already_processed.append('stopAction')
showIndent(outfile, level)
outfile.write('stopAction = "%s",\n' % (self.stopAction,))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
pass
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('stopDelay', node)
if value is not None and 'stopDelay' not in already_processed:
already_processed.append('stopDelay')
try:
self.stopDelay = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('order', node)
if value is not None and 'order' not in already_processed:
already_processed.append('order')
try:
self.order = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('startAction', node)
if value is not None and 'startAction' not in already_processed:
already_processed.append('startAction')
self.startAction = value
value = find_attr_value_('startDelay', node)
if value is not None and 'startDelay' not in already_processed:
already_processed.append('startDelay')
try:
self.startDelay = int(value)
except ValueError, exp:
raise_parse_error(node, 'Bad integer attribute: %s' % exp)
value = find_attr_value_('waitingForGuest', node)
if value is not None and 'waitingForGuest' not in already_processed:
already_processed.append('waitingForGuest')
if value in ('true', '1'):
self.waitingForGuest = True
elif value in ('false', '0'):
self.waitingForGuest = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('stopAction', node)
if value is not None and 'stopAction' not in already_processed:
already_processed.append('stopAction')
self.stopAction = value
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
pass
# end class ItemType
class ConfigurationType(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, default=False, id=None, Label=None, Description=None):
self.default = _cast(bool, default)
self.id = _cast(None, id)
self.Label = Label
self.Description = Description
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if ConfigurationType.subclass:
return ConfigurationType.subclass(*args_, **kwargs_)
else:
return ConfigurationType(*args_, **kwargs_)
factory = staticmethod(factory)
def get_Label(self): return self.Label
def set_Label(self, Label): self.Label = Label
def get_Description(self): return self.Description
def set_Description(self, Description): self.Description = Description
def get_default(self): return self.default
def set_default(self, default): self.default = default
def get_id(self): return self.id
def set_id(self, id): self.id = id
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='ConfigurationType', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='ConfigurationType')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='ConfigurationType'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
if self.default is not None and 'default' not in already_processed:
already_processed.append('default')
outfile.write(' default="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.default)), input_name='default'))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
outfile.write(' id=%s' % (self.gds_format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='ConfigurationType', fromsubclass_=False):
if self.Label is not None:
self.Label.export(outfile, level, namespace_, name_='Label', )
if self.Description is not None:
self.Description.export(outfile, level, namespace_, name_='Description', )
def hasContent_(self):
if (
self.Label is not None or
self.Description is not None
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='ConfigurationType'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.default is not None and 'default' not in already_processed:
already_processed.append('default')
showIndent(outfile, level)
outfile.write('default = %s,\n' % (self.default,))
if self.id is not None and 'id' not in already_processed:
already_processed.append('id')
showIndent(outfile, level)
outfile.write('id = "%s",\n' % (self.id,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
def exportLiteralChildren(self, outfile, level, name_):
if self.Label is not None:
showIndent(outfile, level)
outfile.write('Label=model_.Msg_Type(\n')
self.Label.exportLiteral(outfile, level, name_='Label')
showIndent(outfile, level)
outfile.write('),\n')
if self.Description is not None:
showIndent(outfile, level)
outfile.write('Description=model_.Msg_Type(\n')
self.Description.exportLiteral(outfile, level, name_='Description')
showIndent(outfile, level)
outfile.write('),\n')
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('default', node)
if value is not None and 'default' not in already_processed:
already_processed.append('default')
if value in ('true', '1'):
self.default = True
elif value in ('false', '0'):
self.default = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('id', node)
if value is not None and 'id' not in already_processed:
already_processed.append('id')
self.id = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'Label':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Label(obj_)
elif nodeName_ == 'Description':
obj_ = Msg_Type.factory()
obj_.build(child_)
self.set_Description(obj_)
# end class ConfigurationType
class RASD_Type(CIM_ResourceAllocationSettingData_Type):
"""Wrapper for CIM_ResourceAllocationSettingData_TypeDetermines whether
import should fail if entry is not understoodConfiguration from
DeploymentOptionSection this entry is valid forStates that this
entry is a range marker"""
subclass = None
superclass = CIM_ResourceAllocationSettingData_Type
def __init__(self, Address=None, AddressOnParent=None, AllocationUnits=None, AutomaticAllocation=None, AutomaticDeallocation=None, Caption=None, Connection=None, ConsumerVisibility=None, Description=None, ElementName=None, HostResource=None, InstanceID=None, Limit=None, MappingBehavior=None, OtherResourceType=None, Parent=None, PoolID=None, Reservation=None, ResourceSubType=None, ResourceType=None, VirtualQuantity=None, VirtualQuantityUnits=None, Weight=None, anytypeobjs_=None, required=True, bound=None, configuration=None):
super(RASD_Type, self).__init__(Address, AddressOnParent, AllocationUnits, AutomaticAllocation, AutomaticDeallocation, Caption, Connection, ConsumerVisibility, Description, ElementName, HostResource, InstanceID, Limit, MappingBehavior, OtherResourceType, Parent, PoolID, Reservation, ResourceSubType, ResourceType, VirtualQuantity, VirtualQuantityUnits, Weight, anytypeobjs_, )
self.required = _cast(bool, required)
self.bound = _cast(None, bound)
self.configuration = _cast(None, configuration)
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if RASD_Type.subclass:
return RASD_Type.subclass(*args_, **kwargs_)
else:
return RASD_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_required(self): return self.required
def set_required(self, required): self.required = required
def get_bound(self): return self.bound
def set_bound(self, bound): self.bound = bound
def get_configuration(self): return self.configuration
def set_configuration(self, configuration): self.configuration = configuration
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='RASD_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='RASD_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='RASD_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
super(RASD_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='RASD_Type')
if self.required is not None and 'required' not in already_processed:
already_processed.append('required')
outfile.write(' required="%s"' % self.gds_format_boolean(self.gds_str_lower(str(self.required)), input_name='required'))
if self.bound is not None and 'bound' not in already_processed:
already_processed.append('bound')
outfile.write(' bound=%s' % (self.gds_format_string(quote_attrib(self.bound).encode(ExternalEncoding), input_name='bound'), ))
if self.configuration is not None and 'configuration' not in already_processed:
already_processed.append('configuration')
outfile.write(' configuration=%s' % (self.gds_format_string(quote_attrib(self.configuration).encode(ExternalEncoding), input_name='configuration'), ))
def exportChildren(self, outfile, level, namespace_='ovf:', name_='RASD_Type', fromsubclass_=False):
super(RASD_Type, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(RASD_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='RASD_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
if self.required is not None and 'required' not in already_processed:
already_processed.append('required')
showIndent(outfile, level)
outfile.write('required = %s,\n' % (self.required,))
if self.bound is not None and 'bound' not in already_processed:
already_processed.append('bound')
showIndent(outfile, level)
outfile.write('bound = "%s",\n' % (self.bound,))
if self.configuration is not None and 'configuration' not in already_processed:
already_processed.append('configuration')
showIndent(outfile, level)
outfile.write('configuration = "%s",\n' % (self.configuration,))
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
super(RASD_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(RASD_Type, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('required', node)
if value is not None and 'required' not in already_processed:
already_processed.append('required')
if value in ('true', '1'):
self.required = True
elif value in ('false', '0'):
self.required = False
else:
raise_parse_error(node, 'Bad boolean attribute')
value = find_attr_value_('bound', node)
if value is not None and 'bound' not in already_processed:
already_processed.append('bound')
self.bound = value
value = find_attr_value_('configuration', node)
if value is not None and 'configuration' not in already_processed:
already_processed.append('configuration')
self.configuration = value
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
super(RASD_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(RASD_Type, self).buildChildren(child_, node, nodeName_, True)
pass
# end class RASD_Type
class VSSD_Type(CIM_VirtualSystemSettingData_Type):
"""Wrapper for CIM_VirtualSystemSettingData_Type"""
subclass = None
superclass = CIM_VirtualSystemSettingData_Type
def __init__(self, AutomaticRecoveryAction=None, AutomaticShutdownAction=None, AutomaticStartupAction=None, AutomaticStartupActionDelay=None, AutomaticStartupActionSequenceNumber=None, Caption=None, ConfigurationDataRoot=None, ConfigurationFile=None, ConfigurationID=None, CreationTime=None, Description=None, ElementName=None, InstanceID=None, LogDataRoot=None, Notes=None, RecoveryFile=None, SnapshotDataRoot=None, SuspendDataRoot=None, SwapFileDataRoot=None, VirtualSystemIdentifier=None, VirtualSystemType=None, anytypeobjs_=None):
super(VSSD_Type, self).__init__(AutomaticRecoveryAction, AutomaticShutdownAction, AutomaticStartupAction, AutomaticStartupActionDelay, AutomaticStartupActionSequenceNumber, Caption, ConfigurationDataRoot, ConfigurationFile, ConfigurationID, CreationTime, Description, ElementName, InstanceID, LogDataRoot, Notes, RecoveryFile, SnapshotDataRoot, SuspendDataRoot, SwapFileDataRoot, VirtualSystemIdentifier, VirtualSystemType, anytypeobjs_, )
self.anyAttributes_ = {}
def factory(*args_, **kwargs_):
if VSSD_Type.subclass:
return VSSD_Type.subclass(*args_, **kwargs_)
else:
return VSSD_Type(*args_, **kwargs_)
factory = staticmethod(factory)
def get_anyAttributes_(self): return self.anyAttributes_
def set_anyAttributes_(self, anyAttributes_): self.anyAttributes_ = anyAttributes_
def export(self, outfile, level, namespace_='ovf:', name_='VSSD_Type', namespacedef_=''):
showIndent(outfile, level)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = []
self.exportAttributes(outfile, level, already_processed, namespace_, name_='VSSD_Type')
if self.hasContent_():
outfile.write('>\n')
self.exportChildren(outfile, level + 1, namespace_, name_)
showIndent(outfile, level)
outfile.write('</%s%s>\n' % (namespace_, name_))
else:
outfile.write('/>\n')
def exportAttributes(self, outfile, level, already_processed, namespace_='ovf:', name_='VSSD_Type'):
unique_counter = 0
for name, value in self.anyAttributes_.items():
xsinamespaceprefix = 'xsi'
xsinamespace1 = 'http://www.w3.org/2001/XMLSchema-instance'
xsinamespace2 = '{%s}' % (xsinamespace1, )
if name.startswith(xsinamespace2):
name1 = name[len(xsinamespace2):]
name2 = '%s:%s' % (xsinamespaceprefix, name1, )
if name2 not in already_processed:
already_processed.append(name2)
outfile.write(' %s=%s' % (name2, quote_attrib(value), ))
else:
mo = re_.match(Namespace_extract_pat_, name)
if mo is not None:
namespace, name = mo.group(1, 2)
if name not in already_processed:
already_processed.append(name)
if namespace == 'http://www.w3.org/XML/1998/namespace':
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
else:
unique_counter += 1
outfile.write(' xmlns:yyy%d="%s"' % (unique_counter, namespace, ))
outfile.write(' yyy%d:%s=%s' % (unique_counter, name, quote_attrib(value), ))
else:
if name not in already_processed:
already_processed.append(name)
outfile.write(' %s=%s' % (name, quote_attrib(value), ))
super(VSSD_Type, self).exportAttributes(outfile, level, already_processed, namespace_, name_='VSSD_Type')
def exportChildren(self, outfile, level, namespace_='ovf:', name_='VSSD_Type', fromsubclass_=False):
super(VSSD_Type, self).exportChildren(outfile, level, namespace_, name_, True)
def hasContent_(self):
if (
super(VSSD_Type, self).hasContent_()
):
return True
else:
return False
def exportLiteral(self, outfile, level, name_='VSSD_Type'):
level += 1
self.exportLiteralAttributes(outfile, level, [], name_)
if self.hasContent_():
self.exportLiteralChildren(outfile, level, name_)
def exportLiteralAttributes(self, outfile, level, already_processed, name_):
for name, value in self.anyAttributes_.items():
showIndent(outfile, level)
outfile.write('%s = "%s",\n' % (name, value,))
super(VSSD_Type, self).exportLiteralAttributes(outfile, level, already_processed, name_)
def exportLiteralChildren(self, outfile, level, name_):
super(VSSD_Type, self).exportLiteralChildren(outfile, level, name_)
def build(self, node):
self.buildAttributes(node, node.attrib, [])
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
def buildAttributes(self, node, attrs, already_processed):
self.anyAttributes_ = {}
for name, value in attrs.items():
if name not in already_processed:
self.anyAttributes_[name] = value
super(VSSD_Type, self).buildAttributes(node, attrs, already_processed)
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
super(VSSD_Type, self).buildChildren(child_, node, nodeName_, True)
pass
# end class VSSD_Type
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Envelope'
rootClass = EnvelopeType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
# sys.stdout.write('<?xml version="1.0" ?>\n')
# rootObj.export(sys.stdout, 0, name_=rootTag,
# namespacedef_='')
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Envelope'
rootClass = EnvelopeType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(sys.stdout, 0, name_="Envelope",
namespacedef_='')
return rootObj
def parseLiteral(inFileName):
doc = parsexml_(inFileName)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'Envelope'
rootClass = EnvelopeType
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
sys.stdout.write('#from ovfenvelope import *\n\n')
sys.stdout.write('import ovfenvelope as model_\n\n')
sys.stdout.write('rootObj = model_.rootTag(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"AnnotationSection_Type",
"CIM_ResourceAllocationSettingData_Type",
"CIM_VirtualSystemSettingData_Type",
"Caption",
"ConfigurationType",
"Content_Type",
"DeploymentOptionSection_Type",
"DiskSection_Type",
"EnvelopeType",
"EulaSection_Type",
"File_Type",
"IconType",
"InstallSection_Type",
"ItemType",
"MsgType",
"Msg_Type",
"NetworkSection_Type",
"NetworkType",
"OperatingSystemSection_Type",
"ProductSection_Type",
"PropertyConfigurationValue_Type",
"PropertyType",
"RASD_Type",
"References_Type",
"ResourceAllocationSection_Type",
"Section_Type",
"StartupSection_Type",
"Strings_Type",
"VSSD_Type",
"VirtualDiskDesc_Type",
"VirtualHardwareSection_Type",
"VirtualSystemCollection_Type",
"VirtualSystem_Type",
"cimAnySimpleType",
"cimBase64Binary",
"cimBoolean",
"cimByte",
"cimChar16",
"cimDateTime",
"cimDouble",
"cimFloat",
"cimHexBinary",
"cimInt",
"cimLong",
"cimReference",
"cimShort",
"cimString",
"cimUnsignedByte",
"cimUnsignedInt",
"cimUnsignedLong",
"cimUnsignedShort",
"qualifierBoolean",
"qualifierSArray",
"qualifierSInt64",
"qualifierString",
"qualifierUInt32"
]
|
Dhandapani/gluster-ovirt
|
backend/manager/tools/engine-image-uploader/src/ovf/ovfenvelope.py
|
Python
|
apache-2.0
| 398,478
| 0.004236
|
'''
pyttsx setup script.
Copyright (c) 2009, 2013 Peter Parente
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED 'AS IS' AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from setuptools import setup, find_packages
setup(name='pyttsx',
version='1.2',
description='pyttsx - cross platform text-to-speech',
long_description='pyttsx is a Python package supporting common text-to-speech engines on Mac OS X, Windows, and Linux.',
author='Peter Parente',
author_email='parente@cs.unc.edu',
url='https://github.com/parente/pyttsx',
download_url='http://pypi.python.org/pypi/pyttsx',
license='BSD License',
packages=['pyttsx', 'pyttsx.drivers']
)
|
valdecar/Murka
|
pyttsx-master/setup.py
|
Python
|
gpl-3.0
| 1,287
| 0.002331
|
#!/usr/bin/env python
# encoding: utf-8
"""
Generic stock functions
"""
class Stock(object):
"""
Generic Stock information
"""
def __init__(self, symbol, name, sector):
super(Stock, self).__init__()
self.symbol = symbol
self.name = name
self.sector = sector
|
kevinkirkup/hedge
|
python/hedge/stock.py
|
Python
|
gpl-3.0
| 309
| 0.003236
|
from __future__ import print_function
import sys
import subprocess
class AutoInstall(object):
_loaded = set()
@classmethod
def find_module(cls, name, path, target=None):
if path is None and name not in cls._loaded:
cls._loaded.add(name)
print("Installing", name)
try:
out = subprocess.check_output(['sudo', sys.executable, '-m', 'pip', 'install', name])
print(out)
except Exception as e:
print("Failed" + e.message)
return None
sys.meta_path.append(AutoInstall)
|
Liuchang0812/slides
|
pycon2015cn/ex6_auto_install/autoinstall.py
|
Python
|
mit
| 590
| 0.00339
|
# ----------------------------------------------------------------------------
# Copyright (c) 2017-, labman development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os import environ
from os.path import expanduser, exists
from datetime import datetime
from configparser import ConfigParser
class ConfigurationManager(object):
"""Holds the labman configuration
Parameters
----------
conf_fp: str, optional
Filepath to the configuration file. Default: config_test.cfg
Attributes
----------
test_environment : bool
If true, we are in a test environment.
database : str
The postgres database to connect to
user : str
The postgres user
password : str
The postgres password for the previous user
admin_user : str
The administrator user, which can be used to create/drop environments
admin_password : str
The postgres password for the admin_user
host : str
The host where the database lives
port : int
The port used to connect to the postgres database in the previous host
qiita_server_cert : str
If qiita enabled, the qiita server certificate
Raises
------
RuntimeError
When an option is no longer available.
"""
@staticmethod
def create(config_fp, test_env, db_host, db_port, db_name, db_user,
db_password, db_admin_user, db_admin_password, log_dir,
qiita_server_cert):
"""Creates a new labman configuration file
Parameters
----------
config_fp : str
Path to the configuration file
test_env : bool
If true, a config file for a test environment will be created
db_host : str
The host where the database lives
db_port : int
The port used to connect to the postgres database in the previous
host
db_name : str
The postgres database to connect to
db_user : str
The postgres user
db_password : str
The postgres password for the previous user
db_admin_user : str
The administrator user, which can be used to create/drop
environments
db_admin_password : str
The postgres password for the admin_user
log_dir : str
Path to the log directory
qiita_server_cert : str
The qiita server certificate (for testing)
"""
with open(config_fp, 'w') as f:
f.write(CONFIG_TEMPLATE % {
'test': test_env,
'date': str(datetime.now()),
'user': db_user,
'admin_user': db_admin_user,
'password': db_password,
'admin_password': db_admin_password,
'database': db_name,
'host': db_host,
'port': db_port,
'logdir': log_dir,
'qiita_cert': qiita_server_cert})
def __init__(self):
# If conf_fp is None, we default to the test configuration file
try:
self.conf_fp = environ['LABMAN_CONFIG_FP']
except KeyError:
self.conf_fp = expanduser('~/.labman.cfg')
if not exists(self.conf_fp):
raise RuntimeError(
'Please, configure labman using `labman config`. If the '
'config file is not in `~/.labman.cfg`, please set the '
'`LABMAN_CONFIG_FP` environment variable to the '
'configuration file')
# Parse the configuration file
config = ConfigParser()
with open(self.conf_fp, 'U') as conf_file:
config.readfp(conf_file)
_required_sections = {'postgres'}
if not _required_sections.issubset(set(config.sections())):
missing = _required_sections - set(config.sections())
raise RuntimeError(', '.join(missing))
self._get_main(config)
self._get_postgres(config)
self._get_qiita(config)
def _get_main(self, config):
"""Get the main configuration"""
self.test_environment = config.getboolean('main', 'TEST_ENVIRONMENT')
self.log_dir = config.get('main', 'LOG_DIR')
def _get_postgres(self, config):
"""Get the configuration of the postgres section"""
self.user = config.get('postgres', 'USER')
self.admin_user = config.get('postgres', 'ADMIN_USER') or None
self.password = config.get('postgres', 'PASSWORD')
if not self.password:
self.password = None
self.admin_password = config.get('postgres', 'ADMIN_PASSWORD')
if not self.admin_password:
self.admin_password = None
self.database = config.get('postgres', 'DATABASE')
self.host = config.get('postgres', 'HOST')
self.port = config.getint('postgres', 'PORT')
def _get_qiita(self, config):
self.qiita_server_cert = config.get('qiita', 'SERVER_CERT')
CONFIG_TEMPLATE = """# Configuration file generated by labman on %(date)s
# ------------------------- MAIN SETTINGS ----------------------------------
[main]
TEST_ENVIRONMENT=%(test)s
LOG_DIR=%(logdir)s
# ----------------------- POSTGRES SETTINGS --------------------------------
[postgres]
USER=%(user)s
PASSWORD=%(password)s
ADMIN_USER=%(admin_user)s
ADMIN_PASSWORD=%(admin_password)s
DATABASE=%(database)s
HOST=%(host)s
PORT=%(port)s
# ------------------------- QIITA SETTINGS ----------------------------------
[qiita]
SERVER_CERT=%(qiita_cert)s
"""
|
josenavas/labman
|
labman/db/configuration_manager.py
|
Python
|
bsd-3-clause
| 5,785
| 0
|
# coding: utf-8
#
# Copyright © 2010—2014 Andrey Mikhaylenko and contributors
#
# This file is part of Argh.
#
# Argh is free software under terms of the GNU Lesser
# General Public License version 3 (LGPLv3) as published by the Free
# Software Foundation. See the file README.rst for copying conditions.
#
"""
Command decorators
~~~~~~~~~~~~~~~~~~
"""
from mitmflib.argh.constants import (ATTR_ALIASES, ATTR_ARGS, ATTR_NAME,
ATTR_WRAPPED_EXCEPTIONS,
ATTR_WRAPPED_EXCEPTIONS_PROCESSOR,
ATTR_EXPECTS_NAMESPACE_OBJECT)
__all__ = ['aliases', 'named', 'arg', 'wrap_errors', 'expects_obj']
def named(new_name):
"""
Sets given string as command name instead of the function name.
The string is used verbatim without further processing.
Usage::
@named('load')
def do_load_some_stuff_and_keep_the_original_function_name(args):
...
The resulting command will be available only as ``load``. To add aliases
without renaming the command, check :func:`aliases`.
.. versionadded:: 0.19
"""
def wrapper(func):
setattr(func, ATTR_NAME, new_name)
return func
return wrapper
def aliases(*names):
"""
Defines alternative command name(s) for given function (along with its
original name). Usage::
@aliases('co', 'check')
def checkout(args):
...
The resulting command will be available as ``checkout``, ``check`` and ``co``.
.. note::
This decorator only works with a recent version of argparse (see `Python
issue 9324`_ and `Python rev 4c0426`_). Such version ships with
**Python 3.2+** and may be available in other environments as a separate
package. Argh does not issue warnings and simply ignores aliases if
they are not supported. See :attr:`~argh.assembling.SUPPORTS_ALIASES`.
.. _Python issue 9324: http://bugs.python.org/issue9324
.. _Python rev 4c0426: http://hg.python.org/cpython/rev/4c0426261148/
.. versionadded:: 0.19
"""
def wrapper(func):
setattr(func, ATTR_ALIASES, names)
return func
return wrapper
def arg(*args, **kwargs):
"""
Declares an argument for given function. Does not register the function
anywhere, nor does it modify the function in any way. The signature is
exactly the same as that of :meth:`argparse.ArgumentParser.add_argument`,
only some keywords are not required if they can be easily guessed.
Usage::
@arg('path')
@arg('--format', choices=['yaml','json'], default='json')
@arg('--dry-run', default=False)
@arg('-v', '--verbosity', choices=range(0,3), default=1)
def load(args):
loaders = {'json': json.load, 'yaml': yaml.load}
loader = loaders[args.format]
data = loader(args.path)
if not args.dry_run:
if 1 < verbosity:
print('saving to the database')
put_to_database(data)
Note that:
* you didn't have to specify ``action="store_true"`` for ``--dry-run``;
* you didn't have to specify ``type=int`` for ``--verbosity``.
"""
def wrapper(func):
declared_args = getattr(func, ATTR_ARGS, [])
# The innermost decorator is called first but appears last in the code.
# We need to preserve the expected order of positional arguments, so
# the outermost decorator inserts its value before the innermost's:
declared_args.insert(0, dict(option_strings=args, **kwargs))
setattr(func, ATTR_ARGS, declared_args)
return func
return wrapper
def wrap_errors(errors=None, processor=None, *args):
"""
Decorator. Wraps given exceptions into
:class:`~argh.exceptions.CommandError`. Usage::
@wrap_errors([AssertionError])
def foo(x=None, y=None):
assert x or y, 'x or y must be specified'
If the assertion fails, its message will be correctly printed and the
stack hidden. This helps to avoid boilerplate code.
:param errors:
A list of exception classes to catch.
:param processor:
A callable that expects the exception object and returns a string.
For example, this renders all wrapped errors in red colour::
from termcolor import colored
def failure(err):
return colored(str(err), 'red')
@wrap_errors(processor=failure)
def my_command(...):
...
"""
def wrapper(func):
if errors:
setattr(func, ATTR_WRAPPED_EXCEPTIONS, errors)
if processor:
setattr(func, ATTR_WRAPPED_EXCEPTIONS_PROCESSOR, processor)
return func
return wrapper
def expects_obj(func):
"""
Marks given function as expecting a namespace object.
Usage::
@arg('bar')
@arg('--quux', default=123)
@expects_obj
def foo(args):
yield args.bar, args.quux
This is equivalent to::
def foo(bar, quux=123):
yield bar, quux
In most cases you don't need this decorator.
"""
setattr(func, ATTR_EXPECTS_NAMESPACE_OBJECT, True)
return func
|
CiuffysHub/MITMf
|
mitmflib-0.18.4/mitmflib/argh/decorators.py
|
Python
|
gpl-3.0
| 5,296
| 0.000756
|
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/EnumOption.py rel_2.3.5:3329:275e75118ad4 2015/06/20 11:18:26 bdbaddog"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def EnumOption(*args, **kw):
global warned
if not warned:
msg = "The EnumOption() function is deprecated; use the EnumVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return SCons.Variables.EnumVariable(*args, **kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
IljaGrebel/OpenWrt-SDK-imx6_HummingBoard
|
staging_dir/host/lib/scons-2.3.5/SCons/Options/EnumOption.py
|
Python
|
gpl-2.0
| 1,980
| 0.001515
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def ExtensionFaultTypeInfo(vim, *args, **kwargs):
'''This data object type describes fault types defined by the extension.'''
obj = vim.client.factory.create('ns0:ExtensionFaultTypeInfo')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'faultID' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
xuru/pyvisdk
|
pyvisdk/do/extension_fault_type_info.py
|
Python
|
mit
| 1,025
| 0.00878
|
import unittest
import json
import forgi.threedee.utilities._dssr as ftud
import forgi.threedee.model.coarse_grain as ftmc
import forgi.graph.residue as fgr
class TestHelperFunctions(unittest.TestCase):
def test_dssr_to_pdb_atom_id_validIds(self):
self.assertEqual(ftud.dssr_to_pdb_resid(
"B.C24"), ("B", (" ", 24, " ")))
self.assertEqual(ftud.dssr_to_pdb_resid(
"1:B.C24"), ("B", (" ", 24, " ")))
self.assertEqual(ftud.dssr_to_pdb_resid(
"LYS124"), (None, (" ", 124, " ")))
self.assertEqual(ftud.dssr_to_pdb_resid(
"Z12.U13"), ("Z12", (" ", 13, " ")))
self.assertEqual(ftud.dssr_to_pdb_resid(
"A.5BU36"), ("A", (" ", 36, " ")))
self.assertEqual(ftud.dssr_to_pdb_resid(
"C.C47^M"), ("C", (" ", 47, "M")))
self.assertEqual(ftud.dssr_to_pdb_resid(
"C.5BU47^M"), ("C", (" ", 47, "M")))
self.assertEqual(ftud.dssr_to_pdb_resid(u'A.C1'), ("A", (" ", 1, " ")))
self.assertEqual(ftud.dssr_to_pdb_resid(
u'B.U-1'), ("B", (" ", -1, " ")))
self.assertEqual(ftud.dssr_to_pdb_resid(
u'A.A-2'), ("A", (" ", -2, " ")))
class TestCoaxialStacks(unittest.TestCase):
def setUp(self):
cg = ftmc.CoarseGrainRNA.from_bg_file("test/forgi/threedee/data/1J1U.cg")
with open("test/forgi/threedee/data/1J1U.json") as f:
j = json.load(f)
self.dssr = ftud.DSSRAnnotation(j, cg)
def test_coaxial_stacks(self):
self.assertEqual(sorted(self.dssr.coaxial_stacks()),
sorted([["s2", "s1"], ["s0", "s3"]]))
@unittest.skip("Currently not working. TODO")
def test_compare_coaxial_stacks(self):
forgi, dssr = self.dssr.compare_coaxial_stack_annotation()
self.assertEqual(len(dssr), 2)
self.assertGreaterEqual(len(forgi), 1)
self.assertGreaterEqual(len(forgi & dssr), 1)
self.assertIn(("s0", "s5"), (x.stems for x in forgi))
for x in forgi:
self.assertEqual(x.forgi, "stacking")
for x in dssr:
self.assertEqual(x.dssr, "stacking")
def test_stacking_nts(self):
stacks = self.dssr.stacking_nts()
self.assertIn((fgr.RESID("B:544"), fgr.RESID("B:545")), stacks)
self.assertNotIn((fgr.RESID("B:549"), fgr.RESID("B:544")), stacks)
|
ViennaRNA/forgi
|
test/forgi/threedee/utilities/test_dssr.py
|
Python
|
gpl-3.0
| 2,377
| 0.000841
|
'''
pysplat is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pysplat is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kicad-footprint-generator. If not, see < http://www.gnu.org/licenses/ >.
(C) 2016 by Thomas Pointhuber, <thomas.pointhuber@gmx.at>
'''
|
pointhi/PySplat
|
PySplat/util/__init__.py
|
Python
|
gpl-3.0
| 682
| 0.004399
|
# -*- coding: utf-8 -*-
from tornado.options import define
define('debug', default=False, type=bool)
# Tornado的监听端口
define('port', default=8888, type=int)
# WHoosh Search相关
define('whoosh_ix_path', default='/Users/liushuai/Desktop/index', type=str)
# MongoDB
define('mongo_addr', default='127.0.0.1', type=str)
define('mongo_port', default=27017, type=int)
|
liushuaikobe/GitArchiveUtils
|
gitradar/config.py
|
Python
|
gpl-2.0
| 375
| 0
|
import unittest
import os
import numpy as np
from tools.sampling import read_log_file
from tools.walk_trees import walk_trees_with_data
from tools.game_tree.nodes import ActionNode, BoardCardsNode, HoleCardsNode
LEDUC_POKER_GAME_FILE_PATH = 'games/leduc.limit.2p.game'
class SamplingTests(unittest.TestCase):
def test_log_parsing_to_sample_trees(self):
players = read_log_file(
LEDUC_POKER_GAME_FILE_PATH,
'test/sample_log.log',
['player_1', 'player_2'])
callback_was_called_at_least_once = False
def node_callback(data, node):
nonlocal callback_was_called_at_least_once
if isinstance(node, ActionNode):
callback_was_called_at_least_once = True
if data:
self.assertTrue(np.all(node.action_decision_counts == [0, 1, 0]))
else:
self.assertTrue(np.all(node.action_decision_counts == [0, 0, 0]))
return [data if action == 1 else False for action in node.children]
elif isinstance(node, HoleCardsNode):
return [cards == (43,) or cards == (47,) for cards in node.children]
elif isinstance(node, BoardCardsNode):
return [data if cards == (50,) else False for cards in node.children]
else:
return [data for _ in node.children]
for name in players:
player_tree = players[name]
walk_trees_with_data(node_callback, True, player_tree)
self.assertTrue(callback_was_called_at_least_once)
def test_log_parsing_to_sample_trees_performance(self):
players = read_log_file(
LEDUC_POKER_GAME_FILE_PATH,
'test/sample_log-large.log',
['CFR_trained', 'Random_1'])
visits_sum = 0
for name in players:
player_tree = players[name]
for _, root_action_node in player_tree.children.items():
visits_sum += np.sum(root_action_node.action_decision_counts)
self.assertEqual(visits_sum, 50000)
|
JakubPetriska/poker-cfr
|
test/sampling_tests.py
|
Python
|
mit
| 2,099
| 0.003335
|
# Copyright (c) 2008, Aldo Cortesi. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
try:
import tracemalloc
except ImportError:
tracemalloc = None
from libqtile.dgroups import DGroups
from xcffib.xproto import EventMask, WindowError, AccessError, DrawableError
import logging
import os
import pickle
import shlex
import signal
import sys
import traceback
import xcffib
import xcffib.xinerama
import xcffib.xproto
import six
from . import asyncio
from .config import Drag, Click, Screen, Match, Rule
from .group import _Group
from .log_utils import logger
from .state import QtileState
from .utils import QtileError, get_cache_dir
from .widget.base import _Widget
from . import command
from . import hook
from . import utils
from . import window
from . import xcbq
if sys.version_info >= (3, 3):
def _import_module(module_name, dir_path):
import importlib
file_name = os.path.join(dir_path, module_name) + '.py'
f = importlib.machinery.SourceFileLoader(module_name, file_name)
module = f.load_module()
return module
else:
def _import_module(module_name, dir_path):
import imp
fp = None
try:
fp, pathname, description = imp.find_module(module_name, [dir_path])
module = imp.load_module(module_name, fp, pathname, description)
finally:
if fp:
fp.close()
return module
class Qtile(command.CommandObject):
"""
This object is the __root__ of the command graph.
"""
def __init__(self, config, displayName=None, fname=None, no_spawn=False, state=None):
self.no_spawn = no_spawn
self._eventloop = None
self._finalize = False
if not displayName:
displayName = os.environ.get("DISPLAY")
if not displayName:
raise QtileError("No DISPLAY set.")
if not fname:
# Dots might appear in the host part of the display name
# during remote X sessions. Let's strip the host part first.
displayNum = displayName.partition(":")[2]
if "." not in displayNum:
displayName += ".0"
fname = command.find_sockfile(displayName)
self.conn = xcbq.Connection(displayName)
self.config = config
self.fname = fname
hook.init(self)
self.windowMap = {}
self.widgetMap = {}
self.groupMap = {}
self.groups = []
self.keyMap = {}
# Find the modifier mask for the numlock key, if there is one:
nc = self.conn.keysym_to_keycode(xcbq.keysyms["Num_Lock"])
self.numlockMask = xcbq.ModMasks[self.conn.get_modifier(nc)]
self.validMask = ~(self.numlockMask | xcbq.ModMasks["lock"])
# Because we only do Xinerama multi-screening,
# we can assume that the first
# screen's root is _the_ root.
self.root = self.conn.default_screen.root
self.root.set_attribute(
eventmask=(
EventMask.StructureNotify |
EventMask.SubstructureNotify |
EventMask.SubstructureRedirect |
EventMask.EnterWindow |
EventMask.LeaveWindow
)
)
self.root.set_property(
'_NET_SUPPORTED',
[self.conn.atoms[x] for x in xcbq.SUPPORTED_ATOMS]
)
self.supporting_wm_check_window = self.conn.create_window(-1, -1, 1, 1)
self.root.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
# setup the default cursor
self.root.set_cursor('left_ptr')
wmname = getattr(self.config, "wmname", "qtile")
self.supporting_wm_check_window.set_property('_NET_WM_NAME', wmname)
self.supporting_wm_check_window.set_property(
'_NET_SUPPORTING_WM_CHECK',
self.supporting_wm_check_window.wid
)
if config.main:
config.main(self)
self.dgroups = None
if self.config.groups:
key_binder = None
if hasattr(self.config, 'dgroups_key_binder'):
key_binder = self.config.dgroups_key_binder
self.dgroups = DGroups(self, self.config.groups, key_binder)
if hasattr(config, "widget_defaults") and config.widget_defaults:
_Widget.global_defaults = config.widget_defaults
else:
_Widget.global_defaults = {}
for i in self.groups:
self.groupMap[i.name] = i
self.setup_eventloop()
self.server = command._Server(self.fname, self, config, self._eventloop)
self.currentScreen = None
self.screens = []
self._process_screens()
self.currentScreen = self.screens[0]
self._drag = None
self.ignoreEvents = set([
xcffib.xproto.KeyReleaseEvent,
xcffib.xproto.ReparentNotifyEvent,
xcffib.xproto.CreateNotifyEvent,
# DWM handles this to help "broken focusing windows".
xcffib.xproto.MapNotifyEvent,
xcffib.xproto.LeaveNotifyEvent,
xcffib.xproto.FocusOutEvent,
xcffib.xproto.FocusInEvent,
xcffib.xproto.NoExposureEvent
])
self.conn.flush()
self.conn.xsync()
self._xpoll()
# Map and Grab keys
for key in self.config.keys:
self.mapKey(key)
# It fixes problems with focus when clicking windows of some specific clients like xterm
def noop(qtile):
pass
self.config.mouse += (Click([], "Button1", command.lazy.function(noop), focus="after"),)
self.mouseMap = {}
for i in self.config.mouse:
if self.mouseMap.get(i.button_code) is None:
self.mouseMap[i.button_code] = []
self.mouseMap[i.button_code].append(i)
self.grabMouse()
# no_spawn is set when we are restarting; we only want to run the
# startup hook once.
if not no_spawn:
hook.fire("startup_once")
hook.fire("startup")
self.scan()
self.update_net_desktops()
hook.subscribe.setgroup(self.update_net_desktops)
if state:
st = pickle.load(six.BytesIO(state.encode()))
try:
st.apply(self)
except:
logger.exception("failed restoring state")
self.selection = {
"PRIMARY": {"owner": None, "selection": ""},
"CLIPBOARD": {"owner": None, "selection": ""}
}
self.setup_selection()
def setup_selection(self):
PRIMARY = self.conn.atoms["PRIMARY"]
CLIPBOARD = self.conn.atoms["CLIPBOARD"]
self.selection_window = self.conn.create_window(-1, -1, 1, 1)
self.selection_window.set_attribute(eventmask=EventMask.PropertyChange)
self.conn.xfixes.select_selection_input(self.selection_window,
"PRIMARY")
self.conn.xfixes.select_selection_input(self.selection_window,
"CLIPBOARD")
r = self.conn.conn.core.GetSelectionOwner(PRIMARY).reply()
self.selection["PRIMARY"]["owner"] = r.owner
r = self.conn.conn.core.GetSelectionOwner(CLIPBOARD).reply()
self.selection["CLIPBOARD"]["owner"] = r.owner
# ask for selection on starup
self.convert_selection(PRIMARY)
self.convert_selection(CLIPBOARD)
def setup_eventloop(self):
self._eventloop = asyncio.new_event_loop()
self._eventloop.add_signal_handler(signal.SIGINT, self.stop)
self._eventloop.add_signal_handler(signal.SIGTERM, self.stop)
self._eventloop.set_exception_handler(
lambda x, y: logger.exception("Got an exception in poll loop")
)
logger.info('Adding io watch')
fd = self.conn.conn.get_file_descriptor()
self._eventloop.add_reader(fd, self._xpoll)
self.setup_python_dbus()
def setup_python_dbus(self):
# This is a little strange. python-dbus internally depends on gobject,
# so gobject's threads need to be running, and a gobject "main loop
# thread" needs to be spawned, but we try to let it only interact with
# us via calls to asyncio's call_soon_threadsafe.
try:
# We import dbus here to thrown an ImportError if it isn't
# available. Since the only reason we're running this thread is
# because of dbus, if dbus isn't around there's no need to run
# this thread.
import dbus # noqa
from gi.repository import GLib
def gobject_thread():
ctx = GLib.main_context_default()
while not self._finalize:
try:
ctx.iteration(True)
except Exception:
logger.exception("got exception from gobject")
self._glib_loop = self.run_in_executor(gobject_thread)
except ImportError:
logger.warning("importing dbus/gobject failed, dbus will not work.")
self._glib_loop = None
def finalize(self):
self._finalize = True
self._eventloop.remove_signal_handler(signal.SIGINT)
self._eventloop.remove_signal_handler(signal.SIGTERM)
self._eventloop.set_exception_handler(None)
try:
from gi.repository import GLib
GLib.idle_add(lambda: None)
self._eventloop.run_until_complete(self._glib_loop)
except ImportError:
pass
try:
for w in self.widgetMap.values():
w.finalize()
for l in self.config.layouts:
l.finalize()
for screen in self.screens:
for bar in [screen.top, screen.bottom, screen.left, screen.right]:
if bar is not None:
bar.finalize()
logger.info('Removing io watch')
fd = self.conn.conn.get_file_descriptor()
self._eventloop.remove_reader(fd)
self.conn.finalize()
self.server.close()
except:
logger.exception('exception during finalize')
finally:
self._eventloop.close()
self._eventloop = None
def _process_fake_screens(self):
"""
Since Xephyr, Xnest don't really support offset screens,
we'll fake it here for testing, (or if you want to partition
a physical monitor into separate screens)
"""
for i, s in enumerate(self.config.fake_screens):
# should have x,y, width and height set
s._configure(self, i, s.x, s.y, s.width, s.height, self.groups[i])
if not self.currentScreen:
self.currentScreen = s
self.screens.append(s)
def _process_screens(self):
if hasattr(self.config, 'fake_screens'):
self._process_fake_screens()
return
# What's going on here is a little funny. What we really want is only
# screens that don't overlap here; overlapping screens should see the
# same parts of the root window (i.e. for people doing xrandr
# --same-as). However, the order that X gives us pseudo screens in is
# important, because it indicates what people have chosen via xrandr
# --primary or whatever. So we need to alias screens that should be
# aliased, but preserve order as well. See #383.
xywh = {}
screenpos = []
for s in self.conn.pseudoscreens:
pos = (s.x, s.y)
(w, h) = xywh.get(pos, (0, 0))
if pos not in xywh:
screenpos.append(pos)
xywh[pos] = (max(w, s.width), max(h, s.height))
for i, (x, y) in enumerate(screenpos):
(w, h) = xywh[(x, y)]
if i + 1 > len(self.config.screens):
scr = Screen()
else:
scr = self.config.screens[i]
if not self.currentScreen:
self.currentScreen = scr
scr._configure(
self,
i,
x,
y,
w,
h,
self.groups[i],
)
self.screens.append(scr)
if not self.screens:
if self.config.screens:
s = self.config.screens[0]
else:
s = Screen()
self.currentScreen = s
s._configure(
self,
0, 0, 0,
self.conn.default_screen.width_in_pixels,
self.conn.default_screen.height_in_pixels,
self.groups[0],
)
self.screens.append(s)
def mapKey(self, key):
self.keyMap[(key.keysym, key.modmask & self.validMask)] = key
code = self.conn.keysym_to_keycode(key.keysym)
self.root.grab_key(
code,
key.modmask,
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
if self.numlockMask:
self.root.grab_key(
code,
key.modmask | self.numlockMask,
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
self.root.grab_key(
code,
key.modmask | self.numlockMask | xcbq.ModMasks["lock"],
True,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
def unmapKey(self, key):
key_index = (key.keysym, key.modmask & self.validMask)
if key_index not in self.keyMap:
return
code = self.conn.keysym_to_keycode(key.keysym)
self.root.ungrab_key(code, key.modmask)
if self.numlockMask:
self.root.ungrab_key(code, key.modmask | self.numlockMask)
self.root.ungrab_key(
code,
key.modmask | self.numlockMask | xcbq.ModMasks["lock"]
)
del(self.keyMap[key_index])
def update_net_desktops(self):
try:
index = self.groups.index(self.currentGroup)
# TODO: we should really only except ValueError here, AttributeError is
# an annoying chicken and egg because we're accessing currentScreen
# (via currentGroup), and when we set up the initial groups, there
# aren't any screens yet. This can probably be changed when #475 is
# fixed.
except (ValueError, AttributeError):
index = 0
self.root.set_property("_NET_NUMBER_OF_DESKTOPS", len(self.groups))
self.root.set_property(
"_NET_DESKTOP_NAMES", "\0".join([i.name for i in self.groups])
)
self.root.set_property("_NET_CURRENT_DESKTOP", index)
def addGroup(self, name, layout=None, layouts=None):
if name not in self.groupMap.keys():
g = _Group(name, layout)
self.groups.append(g)
if not layouts:
layouts = self.config.layouts
g._configure(layouts, self.config.floating_layout, self)
self.groupMap[name] = g
hook.fire("addgroup", self, name)
hook.fire("changegroup")
self.update_net_desktops()
return True
return False
def delGroup(self, name):
# one group per screen is needed
if len(self.groups) == len(self.screens):
raise ValueError("Can't delete all groups.")
if name in self.groupMap.keys():
group = self.groupMap[name]
if group.screen and group.screen.previous_group:
target = group.screen.previous_group
else:
target = group.prevGroup()
# Find a group that's not currently on a screen to bring to the
# front. This will terminate because of our check above.
while target.screen:
target = target.prevGroup()
for i in list(group.windows):
i.togroup(target.name)
if self.currentGroup.name == name:
self.currentScreen.setGroup(target, save_prev=False)
self.groups.remove(group)
del(self.groupMap[name])
hook.fire("delgroup", self, name)
hook.fire("changegroup")
self.update_net_desktops()
def registerWidget(self, w):
"""
Register a bar widget. If a widget with the same name already
exists, this will silently ignore that widget. However, this is
not necessarily a bug. By default a widget's name is just
self.__class__.lower(), so putting multiple widgets of the same
class will alias and one will be inaccessible. Since more than one
groupbox widget is useful when you have more than one screen, this
is a not uncommon occurrence. If you want to use the debug
info for widgets with the same name, set the name yourself.
"""
if w.name:
if w.name in self.widgetMap:
return
self.widgetMap[w.name] = w
@utils.LRUCache(200)
def colorPixel(self, name):
return self.conn.screens[0].default_colormap.alloc_color(name).pixel
@property
def currentLayout(self):
return self.currentGroup.layout
@property
def currentGroup(self):
return self.currentScreen.group
@property
def currentWindow(self):
return self.currentScreen.group.currentWindow
def scan(self):
_, _, children = self.root.query_tree()
for item in children:
try:
attrs = item.get_attributes()
state = item.get_wm_state()
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
continue
if attrs and attrs.map_state == xcffib.xproto.MapState.Unmapped:
continue
if state and state[0] == window.WithdrawnState:
continue
self.manage(item)
def unmanage(self, win):
c = self.windowMap.get(win)
if c:
hook.fire("client_killed", c)
self.reset_gaps(c)
if getattr(c, "group", None):
c.group.remove(c)
del self.windowMap[win]
self.update_client_list()
def reset_gaps(self, c):
if c.strut:
self.update_gaps((0, 0, 0, 0), c.strut)
def update_gaps(self, strut, old_strut=None):
from libqtile.bar import Gap
(left, right, top, bottom) = strut[:4]
if old_strut:
(old_left, old_right, old_top, old_bottom) = old_strut[:4]
if not left and old_left:
self.currentScreen.left = None
elif not right and old_right:
self.currentScreen.right = None
elif not top and old_top:
self.currentScreen.top = None
elif not bottom and old_bottom:
self.currentScreen.bottom = None
if top:
self.currentScreen.top = Gap(top)
elif bottom:
self.currentScreen.bottom = Gap(bottom)
elif left:
self.currentScreen.left = Gap(left)
elif right:
self.currentScreen.right = Gap(right)
self.currentScreen.resize()
def manage(self, w):
try:
attrs = w.get_attributes()
internal = w.get_property("QTILE_INTERNAL")
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
if attrs and attrs.override_redirect:
return
if w.wid not in self.windowMap:
if internal:
try:
c = window.Internal(w, self)
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
self.windowMap[w.wid] = c
else:
try:
c = window.Window(w, self)
except (xcffib.xproto.WindowError, xcffib.xproto.AccessError):
return
if w.get_wm_type() == "dock" or c.strut:
c.static(self.currentScreen.index)
else:
hook.fire("client_new", c)
# Window may be defunct because
# it's been declared static in hook.
if c.defunct:
return
self.windowMap[w.wid] = c
# Window may have been bound to a group in the hook.
if not c.group:
self.currentScreen.group.add(c, focus=c.can_steal_focus())
self.update_client_list()
hook.fire("client_managed", c)
return c
else:
return self.windowMap[w.wid]
def update_client_list(self):
"""
Updates the client stack list
this is needed for third party tasklists
and drag and drop of tabs in chrome
"""
windows = [wid for wid, c in self.windowMap.items() if c.group]
self.root.set_property("_NET_CLIENT_LIST", windows)
# TODO: check stack order
self.root.set_property("_NET_CLIENT_LIST_STACKING", windows)
def grabMouse(self):
self.root.ungrab_button(None, None)
for i in self.config.mouse:
if isinstance(i, Click) and i.focus:
# Make a freezing grab on mouse button to gain focus
# Event will propagate to target window
grabmode = xcffib.xproto.GrabMode.Sync
else:
grabmode = xcffib.xproto.GrabMode.Async
eventmask = EventMask.ButtonPress
if isinstance(i, Drag):
eventmask |= EventMask.ButtonRelease
self.root.grab_button(
i.button_code,
i.modmask,
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
if self.numlockMask:
self.root.grab_button(
i.button_code,
i.modmask | self.numlockMask,
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
self.root.grab_button(
i.button_code,
i.modmask | self.numlockMask | xcbq.ModMasks["lock"],
True,
eventmask,
grabmode,
xcffib.xproto.GrabMode.Async,
)
def grabKeys(self):
self.root.ungrab_key(None, None)
for key in self.keyMap.values():
self.mapKey(key)
def get_target_chain(self, ename, e):
"""
Returns a chain of targets that can handle this event. The event
will be passed to each target in turn for handling, until one of
the handlers returns False or the end of the chain is reached.
"""
chain = []
handler = "handle_%s" % ename
# Certain events expose the affected window id as an "event" attribute.
eventEvents = [
"EnterNotify",
"ButtonPress",
"ButtonRelease",
"KeyPress",
]
c = None
if hasattr(e, "window"):
c = self.windowMap.get(e.window)
elif hasattr(e, "drawable"):
c = self.windowMap.get(e.drawable)
elif ename in eventEvents:
c = self.windowMap.get(e.event)
if c and hasattr(c, handler):
chain.append(getattr(c, handler))
if hasattr(self, handler):
chain.append(getattr(self, handler))
if not chain:
logger.info("Unknown event: %r" % ename)
return chain
def _xpoll(self):
while True:
try:
e = self.conn.conn.poll_for_event()
if not e:
break
ename = e.__class__.__name__
if ename.endswith("Event"):
ename = ename[:-5]
if e.__class__ not in self.ignoreEvents:
logger.debug(ename)
for h in self.get_target_chain(ename, e):
logger.info("Handling: %s" % ename)
r = h(e)
if not r:
break
# Catch some bad X exceptions. Since X is event based, race
# conditions can occur almost anywhere in the code. For
# example, if a window is created and then immediately
# destroyed (before the event handler is evoked), when the
# event handler tries to examine the window properties, it
# will throw a WindowError exception. We can essentially
# ignore it, since the window is already dead and we've got
# another event in the queue notifying us to clean it up.
except (WindowError, AccessError, DrawableError):
pass
except Exception as e:
error_code = self.conn.conn.has_error()
if error_code:
error_string = xcbq.XCB_CONN_ERRORS[error_code]
logger.exception("Shutting down due to X connection error %s (%s)" %
(error_string, error_code))
self.stop()
break
logger.exception("Got an exception in poll loop")
self.conn.flush()
def stop(self):
logger.info('Stopping eventloop')
self._eventloop.stop()
def loop(self):
self.server.start()
try:
self._eventloop.run_forever()
finally:
self.finalize()
def find_screen(self, x, y):
"""
Find a screen based on the x and y offset.
"""
result = []
for i in self.screens:
if i.x <= x <= i.x + i.width and \
i.y <= y <= i.y + i.height:
result.append(i)
if len(result) == 1:
return result[0]
return None
def find_closest_screen(self, x, y):
"""
If find_screen returns None, then this basically extends a
screen vertically and horizontally and see if x,y lies in the
band.
Only works if it can find a SINGLE closest screen, else we
revert to _find_closest_closest.
Useful when dragging a window out of a screen onto another but
having leftmost corner above viewport.
"""
normal = self.find_screen(x, y)
if normal is not None:
return normal
x_match = []
y_match = []
for i in self.screens:
if i.x <= x <= i.x + i.width:
x_match.append(i)
if i.y <= y <= i.y + i.height:
y_match.append(i)
if len(x_match) == 1:
return x_match[0]
if len(y_match) == 1:
return y_match[0]
return self._find_closest_closest(x, y, x_match + y_match)
def _find_closest_closest(self, x, y, candidate_screens):
"""
if find_closest_screen can't determine one, we've got multiple
screens, so figure out who is closer. We'll calculate using
the square of the distance from the center of a screen.
Note that this could return None if x, y is right/below all
screens (shouldn't happen but we don't do anything about it
here other than returning None)
"""
closest_distance = None
closest_screen = None
if not candidate_screens:
# try all screens
candidate_screens = self.screens
# if left corner is below and right of screen
# it can't really be a candidate
candidate_screens = [
s for s in candidate_screens
if x < s.x + s.width and y < s.y + s.height
]
for s in candidate_screens:
middle_x = s.x + s.width / 2
middle_y = s.y + s.height / 2
distance = (x - middle_x) ** 2 + (y - middle_y) ** 2
if closest_distance is None or distance < closest_distance:
closest_distance = distance
closest_screen = s
return closest_screen
def handle_SelectionNotify(self, e):
if not getattr(e, "owner", None):
return
name = self.conn.atoms.get_name(e.selection)
self.selection[name]["owner"] = e.owner
self.selection[name]["selection"] = ""
self.convert_selection(e.selection)
hook.fire("selection_notify", name, self.selection[name])
def convert_selection(self, selection, _type="UTF8_STRING"):
TYPE = self.conn.atoms[_type]
self.conn.conn.core.ConvertSelection(self.selection_window.wid,
selection,
TYPE, selection,
xcffib.CurrentTime)
def handle_PropertyNotify(self, e):
name = self.conn.atoms.get_name(e.atom)
# it's the selection property
if name in ("PRIMARY", "CLIPBOARD"):
assert e.window == self.selection_window.wid
prop = self.selection_window.get_property(e.atom, "UTF8_STRING")
# If the selection property is None, it is unset, which means the
# clipboard is empty.
value = prop and prop.value.to_utf8() or six.u("")
self.selection[name]["selection"] = value
hook.fire("selection_change", name, self.selection[name])
def handle_EnterNotify(self, e):
if e.event in self.windowMap:
return True
s = self.find_screen(e.root_x, e.root_y)
if s:
self.toScreen(s.index, warp=False)
def handle_ClientMessage(self, event):
atoms = self.conn.atoms
opcode = event.type
data = event.data
# handle change of desktop
if atoms["_NET_CURRENT_DESKTOP"] == opcode:
index = data.data32[0]
try:
self.currentScreen.setGroup(self.groups[index])
except IndexError:
logger.info("Invalid Desktop Index: %s" % index)
def handle_KeyPress(self, e):
keysym = self.conn.code_to_syms[e.detail][0]
state = e.state
if self.numlockMask:
state = e.state | self.numlockMask
k = self.keyMap.get((keysym, state & self.validMask))
if not k:
logger.info("Ignoring unknown keysym: %s" % keysym)
return
for i in k.commands:
if i.check(self):
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs)
)
if status in (command.ERROR, command.EXCEPTION):
logger.error("KB command error %s: %s" % (i.name, val))
else:
return
def cmd_focus_by_click(self, e):
wnd = e.child or e.root
# Additional option for config.py
# Brings clicked window to front
if self.config.bring_front_click:
self.conn.conn.core.ConfigureWindow(
wnd,
xcffib.xproto.ConfigWindow.StackMode,
[xcffib.xproto.StackMode.Above]
)
if self.windowMap.get(wnd):
self.currentGroup.focus(self.windowMap.get(wnd), False)
self.windowMap.get(wnd).focus(False)
self.conn.conn.core.AllowEvents(xcffib.xproto.Allow.ReplayPointer, e.time)
self.conn.conn.flush()
def handle_ButtonPress(self, e):
button_code = e.detail
state = e.state
if self.numlockMask:
state = e.state | self.numlockMask
k = self.mouseMap.get(button_code)
for m in k:
if not m or m.modmask & self.validMask != state & self.validMask:
logger.info("Ignoring unknown button: %s" % button_code)
continue
if isinstance(m, Click):
for i in m.commands:
if i.check(self):
if m.focus == "before":
self.cmd_focus_by_click(e)
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs))
if m.focus == "after":
self.cmd_focus_by_click(e)
if status in (command.ERROR, command.EXCEPTION):
logger.error(
"Mouse command error %s: %s" % (i.name, val)
)
elif isinstance(m, Drag):
x = e.event_x
y = e.event_y
if m.start:
i = m.start
if m.focus == "before":
self.cmd_focus_by_click(e)
status, val = self.server.call(
(i.selectors, i.name, i.args, i.kwargs))
if status in (command.ERROR, command.EXCEPTION):
logger.error(
"Mouse command error %s: %s" % (i.name, val)
)
continue
else:
val = (0, 0)
if m.focus == "after":
self.cmd_focus_by_click(e)
self._drag = (x, y, val[0], val[1], m.commands)
self.root.grab_pointer(
True,
xcbq.ButtonMotionMask |
xcbq.AllButtonsMask |
xcbq.ButtonReleaseMask,
xcffib.xproto.GrabMode.Async,
xcffib.xproto.GrabMode.Async,
)
def handle_ButtonRelease(self, e):
button_code = e.detail
state = e.state & ~xcbq.AllButtonsMask
if self.numlockMask:
state = state | self.numlockMask
k = self.mouseMap.get(button_code)
for m in k:
if not m:
logger.info(
"Ignoring unknown button release: %s" % button_code
)
continue
if isinstance(m, Drag):
self._drag = None
self.root.ungrab_pointer()
def handle_MotionNotify(self, e):
if self._drag is None:
return
ox, oy, rx, ry, cmd = self._drag
dx = e.event_x - ox
dy = e.event_y - oy
if dx or dy:
for i in cmd:
if i.check(self):
status, val = self.server.call((
i.selectors,
i.name,
i.args + (rx + dx, ry + dy, e.event_x, e.event_y),
i.kwargs
))
if status in (command.ERROR, command.EXCEPTION):
logger.error(
"Mouse command error %s: %s" % (i.name, val)
)
def handle_ConfigureNotify(self, e):
"""
Handle xrandr events.
"""
screen = self.currentScreen
if e.window == self.root.wid and \
e.width != screen.width and \
e.height != screen.height:
screen.resize(0, 0, e.width, e.height)
def handle_ConfigureRequest(self, e):
# It's not managed, or not mapped, so we just obey it.
cw = xcffib.xproto.ConfigWindow
args = {}
if e.value_mask & cw.X:
args["x"] = max(e.x, 0)
if e.value_mask & cw.Y:
args["y"] = max(e.y, 0)
if e.value_mask & cw.Height:
args["height"] = max(e.height, 0)
if e.value_mask & cw.Width:
args["width"] = max(e.width, 0)
if e.value_mask & cw.BorderWidth:
args["borderwidth"] = max(e.border_width, 0)
w = xcbq.Window(self.conn, e.window)
w.configure(**args)
def handle_MappingNotify(self, e):
self.conn.refresh_keymap()
if e.request == xcffib.xproto.Mapping.Keyboard:
self.grabKeys()
def handle_MapRequest(self, e):
w = xcbq.Window(self.conn, e.window)
c = self.manage(w)
if c and (not c.group or not c.group.screen):
return
w.map()
def handle_DestroyNotify(self, e):
self.unmanage(e.window)
def handle_UnmapNotify(self, e):
if e.event != self.root.wid:
c = self.windowMap.get(e.window)
if c and getattr(c, "group", None):
try:
c.window.unmap()
c.state = window.WithdrawnState
except xcffib.xproto.WindowError:
# This means that the window has probably been destroyed,
# but we haven't yet seen the DestroyNotify (it is likely
# next in the queue). So, we just let these errors pass
# since the window is dead.
pass
self.unmanage(e.window)
def handle_ScreenChangeNotify(self, e):
hook.fire("screen_change", self, e)
def toScreen(self, n, warp=True):
"""
Have Qtile move to screen and put focus there
"""
if n >= len(self.screens):
return
old = self.currentScreen
self.currentScreen = self.screens[n]
if old != self.currentScreen:
hook.fire("current_screen_change")
self.currentGroup.focus(self.currentWindow, warp)
def moveToGroup(self, group):
"""
Create a group if it doesn't exist and move a windows there
"""
if self.currentWindow and group:
self.addGroup(group)
self.currentWindow.togroup(group)
def _items(self, name):
if name == "group":
return True, list(self.groupMap.keys())
elif name == "layout":
return True, list(range(len(self.currentGroup.layouts)))
elif name == "widget":
return False, list(self.widgetMap.keys())
elif name == "bar":
return False, [x.position for x in self.currentScreen.gaps]
elif name == "window":
return True, self.listWID()
elif name == "screen":
return True, list(range(len(self.screens)))
def _select(self, name, sel):
if name == "group":
if sel is None:
return self.currentGroup
else:
return self.groupMap.get(sel)
elif name == "layout":
if sel is None:
return self.currentGroup.layout
else:
return utils.lget(self.currentGroup.layouts, sel)
elif name == "widget":
return self.widgetMap.get(sel)
elif name == "bar":
return getattr(self.currentScreen, sel)
elif name == "window":
if sel is None:
return self.currentWindow
else:
return self.clientFromWID(sel)
elif name == "screen":
if sel is None:
return self.currentScreen
else:
return utils.lget(self.screens, sel)
def listWID(self):
return [i.window.wid for i in self.windowMap.values()]
def clientFromWID(self, wid):
for i in self.windowMap.values():
if i.window.wid == wid:
return i
return None
def call_soon(self, func, *args):
""" A wrapper for the event loop's call_soon which also flushes the X
event queue to the server after func is called. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_soon(f)
def call_soon_threadsafe(self, func, *args):
""" Another event loop proxy, see `call_soon`. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_soon_threadsafe(f)
def call_later(self, delay, func, *args):
""" Another event loop proxy, see `call_soon`. """
def f():
func(*args)
self.conn.flush()
self._eventloop.call_later(delay, f)
def run_in_executor(self, func, *args):
""" A wrapper for running a function in the event loop's default
executor. """
return self._eventloop.run_in_executor(None, func, *args)
def cmd_debug(self):
"""Set log level to DEBUG"""
logger.setLevel(logging.DEBUG)
logger.debug('Switching to DEBUG threshold')
def cmd_info(self):
"""Set log level to INFO"""
logger.setLevel(logging.INFO)
logger.info('Switching to INFO threshold')
def cmd_warning(self):
"""Set log level to WARNING"""
logger.setLevel(logging.WARNING)
logger.warning('Switching to WARNING threshold')
def cmd_error(self):
"""Set log level to ERROR"""
logger.setLevel(logging.ERROR)
logger.error('Switching to ERROR threshold')
def cmd_critical(self):
"""Set log level to CRITICAL"""
logger.setLevel(logging.CRITICAL)
logger.critical('Switching to CRITICAL threshold')
def cmd_pause(self):
"""Drops into pdb"""
import pdb
pdb.set_trace()
def cmd_groups(self):
"""
Return a dictionary containing information for all groups.
Example:
groups()
"""
return dict((i.name, i.info()) for i in self.groups)
def cmd_get_info(self):
x = {}
for i in self.groups:
x[i.name] = i.info()
return x
def cmd_list_widgets(self):
"""
List of all addressible widget names.
"""
return list(self.widgetMap.keys())
def cmd_to_layout_index(self, index, group=None):
"""
Switch to the layout with the given index in self.layouts.
:index Index of the layout in the list of layouts.
:group Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.toLayoutIndex(index)
def cmd_next_layout(self, group=None):
"""
Switch to the next layout.
:group Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.nextLayout()
def cmd_prev_layout(self, group=None):
"""
Switch to the prev layout.
:group Group name. If not specified, the current group is assumed.
"""
if group:
group = self.groupMap.get(group)
else:
group = self.currentGroup
group.prevLayout()
def cmd_screens(self):
"""
Return a list of dictionaries providing information on all screens.
"""
lst = []
for i in self.screens:
lst.append(dict(
index=i.index,
group=i.group.name if i.group is not None else None,
x=i.x,
y=i.y,
width=i.width,
height=i.height,
gaps=dict(
top=i.top.geometry() if i.top else None,
bottom=i.bottom.geometry() if i.bottom else None,
left=i.left.geometry() if i.left else None,
right=i.right.geometry() if i.right else None,
)
))
return lst
def cmd_simulate_keypress(self, modifiers, key):
"""
Simulates a keypress on the focused window.
:modifiers A list of modifier specification strings. Modifiers can
be one of "shift", "lock", "control" and "mod1" - "mod5".
:key Key specification.
Examples:
simulate_keypress(["control", "mod2"], "k")
"""
# FIXME: This needs to be done with sendevent, once we have that fixed.
keysym = xcbq.keysyms.get(key)
if keysym is None:
raise command.CommandError("Unknown key: %s" % key)
keycode = self.conn.first_sym_to_code[keysym]
class DummyEv(object):
pass
d = DummyEv()
d.detail = keycode
try:
d.state = utils.translateMasks(modifiers)
except KeyError as v:
return v.args[0]
self.handle_KeyPress(d)
def cmd_execute(self, cmd, args):
"""
Executes the specified command, replacing the current process.
"""
self.stop()
os.execv(cmd, args)
def cmd_restart(self):
"""
Restart qtile using the execute command.
"""
argv = [sys.executable] + sys.argv
if '--no-spawn' not in argv:
argv.append('--no-spawn')
buf = six.BytesIO()
try:
pickle.dump(QtileState(self), buf, protocol=0)
except:
logger.error("Unable to pickle qtile state")
argv = [s for s in argv if not s.startswith('--with-state')]
argv.append('--with-state=' + buf.getvalue().decode())
self.cmd_execute(sys.executable, argv)
def cmd_spawn(self, cmd):
"""
Run cmd in a shell.
cmd may be a string, which is parsed by shlex.split, or
a list (similar to subprocess.Popen).
Example:
spawn("firefox")
spawn(["xterm", "-T", "Temporary terminal"])
"""
if isinstance(cmd, six.string_types):
args = shlex.split(cmd)
else:
args = list(cmd)
r, w = os.pipe()
pid = os.fork()
if pid < 0:
os.close(r)
os.close(w)
return pid
if pid == 0:
os.close(r)
# close qtile's stdin, stdout, stderr so the called process doesn't
# pollute our xsession-errors.
os.close(0)
os.close(1)
os.close(2)
pid2 = os.fork()
if pid2 == 0:
os.close(w)
# Open /dev/null as stdin, stdout, stderr
try:
fd = os.open(os.devnull, os.O_RDWR)
except OSError:
# This shouldn't happen, catch it just in case
pass
else:
# For Python >=3.4, need to set file descriptor to inheritable
try:
os.set_inheritable(fd, True)
except AttributeError:
pass
# Again, this shouldn't happen, but we should just check
if fd > 0:
os.dup2(fd, 0)
os.dup2(fd, 1)
os.dup2(fd, 2)
try:
os.execvp(args[0], args)
except OSError as e:
logger.error("failed spawn: \"{0}\"\n{1}".format(cmd, e))
os._exit(1)
else:
# Here it doesn't matter if fork failed or not, we just write
# its return code and exit.
os.write(w, str(pid2).encode())
os.close(w)
# sys.exit raises SystemExit, which will then be caught by our
# top level catchall and we'll end up with two qtiles; os._exit
# actually calls exit.
os._exit(0)
else:
os.close(w)
os.waitpid(pid, 0)
# 1024 bytes should be enough for any pid. :)
pid = os.read(r, 1024)
os.close(r)
return int(pid)
def cmd_status(self):
"""
Return "OK" if Qtile is running.
"""
return "OK"
def cmd_sync(self):
"""
Sync the X display. Should only be used for development.
"""
self.conn.flush()
def cmd_to_screen(self, n):
"""
Warp focus to screen n, where n is a 0-based screen number.
Example:
to_screen(0)
"""
return self.toScreen(n)
def cmd_next_screen(self):
"""
Move to next screen
"""
return self.toScreen(
(self.screens.index(self.currentScreen) + 1) % len(self.screens)
)
def cmd_prev_screen(self):
"""
Move to the previous screen
"""
return self.toScreen(
(self.screens.index(self.currentScreen) - 1) % len(self.screens)
)
def cmd_windows(self):
"""
Return info for each client window.
"""
return [
i.info() for i in self.windowMap.values()
if not isinstance(i, window.Internal)
]
def cmd_internal_windows(self):
"""
Return info for each internal window (bars, for example).
"""
return [
i.info() for i in self.windowMap.values()
if isinstance(i, window.Internal)
]
def cmd_qtile_info(self):
"""
Returns a dictionary of info on the Qtile instance.
"""
return dict(socketname=self.fname)
def cmd_shutdown(self):
"""
Quit Qtile.
"""
self.stop()
def cmd_switch_groups(self, groupa, groupb):
"""
Switch position of groupa to groupb
"""
if groupa not in self.groupMap or groupb not in self.groupMap:
return
indexa = self.groups.index(self.groupMap[groupa])
indexb = self.groups.index(self.groupMap[groupb])
self.groups[indexa], self.groups[indexb] = \
self.groups[indexb], self.groups[indexa]
hook.fire("setgroup")
# update window _NET_WM_DESKTOP
for group in (self.groups[indexa], self.groups[indexb]):
for w in group.windows:
w.group = group
def find_window(self, wid):
window = self.windowMap.get(wid)
if window:
if not window.group.screen:
self.currentScreen.setGroup(window.group)
window.group.focus(window, False)
def cmd_findwindow(self, prompt="window", widget="prompt"):
mb = self.widgetMap.get(widget)
if not mb:
logger.error("No widget named '%s' present." % widget)
return
mb.startInput(
prompt,
self.find_window,
"window",
strict_completer=True
)
def cmd_next_urgent(self):
try:
nxt = [w for w in self.windowMap.values() if w.urgent][0]
nxt.group.cmd_toscreen()
nxt.group.focus(nxt)
except IndexError:
pass # no window had urgent set
def cmd_togroup(self, prompt="group", widget="prompt"):
"""
Move current window to the selected group in a propmt widget
prompt: Text with which to prompt user.
widget: Name of the prompt widget (default: "prompt").
"""
if not self.currentWindow:
logger.warning("No window to move")
return
mb = self.widgetMap.get(widget)
if not mb:
logger.error("No widget named '%s' present." % widget)
return
mb.startInput(prompt, self.moveToGroup, "group", strict_completer=True)
def cmd_switchgroup(self, prompt="group", widget="prompt"):
def f(group):
if group:
try:
self.groupMap[group].cmd_toscreen()
except KeyError:
logger.info("No group named '%s' present." % group)
pass
mb = self.widgetMap.get(widget)
if not mb:
logger.warning("No widget named '%s' present." % widget)
return
mb.startInput(prompt, f, "group", strict_completer=True)
def cmd_spawncmd(self, prompt="spawn", widget="prompt",
command="%s", complete="cmd"):
"""
Spawn a command using a prompt widget, with tab-completion.
prompt: Text with which to prompt user (default: "spawn: ").
widget: Name of the prompt widget (default: "prompt").
command: command template (default: "%s").
complete: Tab completion function (default: "cmd")
"""
def f(args):
if args:
self.cmd_spawn(command % args)
try:
mb = self.widgetMap[widget]
mb.startInput(prompt, f, complete)
except KeyError:
logger.error("No widget named '%s' present." % widget)
def cmd_qtilecmd(self, prompt="command",
widget="prompt", messenger="xmessage"):
"""
Execute a Qtile command using the client syntax.
Tab completeion aids navigation of the command tree.
prompt: Text to display at the prompt (default: "command: ").
widget: Name of the prompt widget (default: "prompt").
messenger: command to display output (default: "xmessage").
Set this to None to disable.
"""
def f(cmd):
if cmd:
# c here is used in eval() below
c = command.CommandRoot(self) # noqa
try:
cmd_arg = str(cmd).split(' ')
except AttributeError:
return
cmd_len = len(cmd_arg)
if cmd_len == 0:
logger.info('No command entered.')
return
try:
result = eval('c.%s' % (cmd))
except (
command.CommandError,
command.CommandException,
AttributeError) as err:
logger.error(err)
result = None
if result is not None:
from pprint import pformat
message = pformat(result)
if messenger:
self.cmd_spawn('%s "%s"' % (messenger, message))
logger.info(result)
mb = self.widgetMap[widget]
if not mb:
logger.error("No widget named %s present." % widget)
return
mb.startInput(prompt, f, "qsh")
def cmd_addgroup(self, group):
return self.addGroup(group)
def cmd_delgroup(self, group):
return self.delGroup(group)
def cmd_add_rule(self, match_args, rule_args, min_priorty=False):
"""
Add a dgroup rule, returns rule_id needed to remove it
param: match_args (config.Match arguments)
param: rule_args (config.Rule arguments)
param: min_priorty if the rule is added with minimun prioriry(last)
"""
if not self.dgroups:
logger.warning('No dgroups created')
return
match = Match(**match_args)
rule = Rule(match, **rule_args)
return self.dgroups.add_rule(rule, min_priorty)
def cmd_remove_rule(self, rule_id):
self.dgroups.remove_rule(rule_id)
def cmd_run_external(self, full_path):
def format_error(path, e):
s = """Can't call "main" from "{path}"\n\t{err_name}: {err}"""
return s.format(path=path, err_name=e.__class__.__name__, err=e)
module_name = os.path.splitext(os.path.basename(full_path))[0]
dir_path = os.path.dirname(full_path)
err_str = ""
local_stdout = six.BytesIO()
old_stdout = sys.stdout
sys.stdout = local_stdout
sys.exc_clear()
try:
module = _import_module(module_name, dir_path)
module.main(self)
except ImportError as e:
err_str += format_error(full_path, e)
except:
(exc_type, exc_value, exc_traceback) = sys.exc_info()
err_str += traceback.format_exc()
err_str += format_error(full_path, exc_type(exc_value))
finally:
sys.exc_clear()
sys.stdout = old_stdout
local_stdout.close()
return local_stdout.getvalue() + err_str
def cmd_hide_show_bar(self, position="all"):
"""
param: position one of: "top", "bottom", "left", "right" or "all"
"""
if position in ["top", "bottom", "left", "right"]:
bar = getattr(self.currentScreen, position)
if bar:
bar.show(not bar.is_show())
self.currentGroup.layoutAll()
else:
logger.warning(
"Not found bar in position '%s' for hide/show." % position)
elif position == "all":
screen = self.currentScreen
is_show = None
for bar in [screen.left, screen.right, screen.top, screen.bottom]:
if bar:
if is_show is None:
is_show = not bar.is_show()
bar.show(is_show)
if is_show is not None:
self.currentGroup.layoutAll()
else:
logger.warning("Not found bar for hide/show.")
else:
logger.error("Invalid position value:%s" % position)
def cmd_get_state(self):
buf = six.BytesIO()
pickle.dump(QtileState(self), buf, protocol=0)
state = buf.getvalue().decode()
logger.info('State = ')
logger.info(''.join(state.split('\n')))
return state
def cmd_tracemalloc_toggle(self):
if not tracemalloc.is_tracing():
tracemalloc.start()
else:
tracemalloc.stop()
def cmd_tracemalloc_dump(self):
if not tracemalloc:
logger.warning('No tracemalloc module')
raise command.CommandError("No tracemalloc module")
if not tracemalloc.is_tracing():
return [False, "Trace not started"]
cache_directory = get_cache_dir()
malloc_dump = os.path.join(cache_directory, "qtile_tracemalloc.dump")
tracemalloc.take_snapshot().dump(malloc_dump)
return [True, malloc_dump]
|
himaaaatti/qtile
|
libqtile/manager.py
|
Python
|
mit
| 59,989
| 0.000267
|
# Copyright (C) 2007, Eduardo Silva <edsiper@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import Gdk
from sugar3.graphics import style
from sugar3.graphics.palette import WidgetInvoker
def _get_screen_area():
frame_thickness = style.GRID_CELL_SIZE
screen_area = Gdk.Rectangle()
screen_area.x = screen_area.y = frame_thickness
screen_area.width = Gdk.Screen.width() - frame_thickness
screen_area.height = Gdk.Screen.height() - frame_thickness
return screen_area
class FrameWidgetInvoker(WidgetInvoker):
def __init__(self, widget):
WidgetInvoker.__init__(self, widget, widget.get_child())
self._position_hint = self.ANCHORED
self._screen_area = _get_screen_area()
|
godiard/sugar
|
src/jarabe/frame/frameinvoker.py
|
Python
|
gpl-2.0
| 1,411
| 0
|
#!/usr/bin/env python
NAME = 'F5 Trafficshield'
def is_waf(self):
for hv in [['cookie', '^ASINFO='], ['server', 'F5-TrafficShield']]:
r = self.matchheader(hv)
if r is None:
return
elif r:
return r
# the following based on nmap's http-waf-fingerprint.nse
if self.matchheader(('server', 'F5-TrafficShield')):
return True
return False
|
thinksabin/wafw00f
|
wafw00f/plugins/f5trafficshield.py
|
Python
|
bsd-3-clause
| 408
| 0
|
"""Implementation of :class:`SymPyRealDomain` class. """
from sympy.polys.domains.realdomain import RealDomain
from sympy.polys.domains.groundtypes import SymPyRealType
class SymPyRealDomain(RealDomain):
"""Domain for real numbers based on SymPy Float type. """
dtype = SymPyRealType
zero = dtype(0)
one = dtype(1)
alias = 'RR_sympy'
def __init__(self):
pass
def from_ZZ_python(K1, a, K0):
"""Convert a Python `int` object to `dtype`. """
return SymPyRealType(a)
def from_QQ_python(K1, a, K0):
"""Convert a Python `Fraction` object to `dtype`. """
return SymPyRealType(a.numerator) / a.denominator
def from_ZZ_sympy(K1, a, K0):
"""Convert a SymPy `Integer` object to `dtype`. """
return SymPyRealType(a.p)
def from_QQ_sympy(K1, a, K0):
"""Convert a SymPy `Rational` object to `dtype`. """
return SymPyRealType(a.p) / a.q
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY `mpz` object to `dtype`. """
return SymPyRealType(int(a))
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY `mpq` object to `dtype`. """
return SymPyRealType(int(a.numer())) / int(a.denom())
def from_RR_sympy(K1, a, K0):
"""Convert a SymPy `Float` object to `dtype`. """
return a
def from_RR_mpmath(K1, a, K0):
"""Convert a mpmath `mpf` object to `dtype`. """
return SymPyRealType(a)
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/sympy/polys/domains/sympyrealdomain.py
|
Python
|
agpl-3.0
| 1,456
| 0.00206
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from authentic2.compat import user_model_label
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'LibertySession.django_session_key'
db.alter_column(u'saml_libertysession', 'django_session_key', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'LibertyArtifact.provider_id'
db.alter_column(u'saml_libertyartifact', 'provider_id', self.gf('django.db.models.fields.CharField')(max_length=256))
# Changing field 'LibertyArtifact.artifact'
db.alter_column(u'saml_libertyartifact', 'artifact', self.gf('django.db.models.fields.CharField')(max_length=128, primary_key=True))
# Changing field 'LibertyArtifact.django_session_key'
db.alter_column(u'saml_libertyartifact', 'django_session_key', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'LibertyManageDump.django_session_key'
db.alter_column(u'saml_libertymanagedump', 'django_session_key', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'LibertySessionSP.django_session_key'
db.alter_column(u'saml_libertysessionsp', 'django_session_key', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'LibertyAssertion.provider_id'
db.alter_column(u'saml_libertyassertion', 'provider_id', self.gf('django.db.models.fields.CharField')(max_length=256))
# Changing field 'LibertyAssertion.assertion_id'
db.alter_column(u'saml_libertyassertion', 'assertion_id', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'LibertyAssertion.session_index'
db.alter_column(u'saml_libertyassertion', 'session_index', self.gf('django.db.models.fields.CharField')(max_length=128))
# Changing field 'LibertySessionDump.django_session_key'
db.alter_column(u'saml_libertysessiondump', 'django_session_key', self.gf('django.db.models.fields.CharField')(max_length=128))
def backwards(self, orm):
# Changing field 'LibertySession.django_session_key'
db.alter_column(u'saml_libertysession', 'django_session_key', self.gf('django.db.models.fields.CharField')(max_length=40))
# Changing field 'LibertyArtifact.provider_id'
db.alter_column(u'saml_libertyartifact', 'provider_id', self.gf('django.db.models.fields.CharField')(max_length=80))
# Changing field 'LibertyArtifact.artifact'
db.alter_column(u'saml_libertyartifact', 'artifact', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True))
# Changing field 'LibertyArtifact.django_session_key'
db.alter_column(u'saml_libertyartifact', 'django_session_key', self.gf('django.db.models.fields.CharField')(max_length=40))
# Changing field 'LibertyManageDump.django_session_key'
db.alter_column(u'saml_libertymanagedump', 'django_session_key', self.gf('django.db.models.fields.CharField')(max_length=40))
# Changing field 'LibertySessionSP.django_session_key'
db.alter_column(u'saml_libertysessionsp', 'django_session_key', self.gf('django.db.models.fields.CharField')(max_length=40))
# Changing field 'LibertyAssertion.provider_id'
db.alter_column(u'saml_libertyassertion', 'provider_id', self.gf('django.db.models.fields.CharField')(max_length=80))
# Changing field 'LibertyAssertion.assertion_id'
db.alter_column(u'saml_libertyassertion', 'assertion_id', self.gf('django.db.models.fields.CharField')(max_length=50))
# Changing field 'LibertyAssertion.session_index'
db.alter_column(u'saml_libertyassertion', 'session_index', self.gf('django.db.models.fields.CharField')(max_length=80))
# Changing field 'LibertySessionDump.django_session_key'
db.alter_column(u'saml_libertysessiondump', 'django_session_key', self.gf('django.db.models.fields.CharField')(max_length=40))
models = {
u'attribute_aggregator.attributesource': {
'Meta': {'object_name': 'AttributeSource'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'namespace': ('django.db.models.fields.CharField', [], {'default': "('Default', 'Default')", 'max_length': '100'})
},
user_model_label: {
'Meta': {'object_name': user_model_label.split('.')[-1]},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
},
u'idp.attributeitem': {
'Meta': {'object_name': 'AttributeItem'},
'attribute_name': ('django.db.models.fields.CharField', [], {'default': "('OpenLDAProotDSE', 'OpenLDAProotDSE')", 'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'output_name_format': ('django.db.models.fields.CharField', [], {'default': "('urn:oasis:names:tc:SAML:2.0:attrname-format:uri', 'SAMLv2 URI')", 'max_length': '100'}),
'output_namespace': ('django.db.models.fields.CharField', [], {'default': "('Default', 'Default')", 'max_length': '100'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['attribute_aggregator.AttributeSource']", 'null': 'True', 'blank': 'True'})
},
u'idp.attributelist': {
'Meta': {'object_name': 'AttributeList'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'attributes of the list'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['idp.AttributeItem']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'idp.attributepolicy': {
'Meta': {'object_name': 'AttributePolicy'},
'allow_attributes_selection': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'ask_consent_attributes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'attribute_filter_for_sso_from_push_sources': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filter attributes of push sources with list'", 'null': 'True', 'to': u"orm['idp.AttributeList']"}),
'attribute_list_for_sso_from_pull_sources': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes from pull sources'", 'null': 'True', 'to': u"orm['idp.AttributeList']"}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'filter_source_of_filtered_attributes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forward_attributes_from_push_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map_attributes_from_push_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'map_attributes_of_filtered_attributes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'output_name_format': ('django.db.models.fields.CharField', [], {'default': "('urn:oasis:names:tc:SAML:2.0:attrname-format:uri', 'SAMLv2 URI')", 'max_length': '100'}),
'output_namespace': ('django.db.models.fields.CharField', [], {'default': "('Default', 'Default')", 'max_length': '100'}),
'send_error_and_no_attrs_if_missing_required_attrs': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_filter_for_sso_from_push_sources': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'filter attributes of push sources with sources'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['attribute_aggregator.AttributeSource']"})
},
u'saml.authorizationattributemap': {
'Meta': {'object_name': 'AuthorizationAttributeMap'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'})
},
u'saml.authorizationattributemapping': {
'Meta': {'object_name': 'AuthorizationAttributeMapping'},
'attribute_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'attribute_value': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'attribute_value_format': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'map': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['saml.AuthorizationAttributeMap']"}),
'source_attribute_name': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
u'saml.authorizationsppolicy': {
'Meta': {'object_name': 'AuthorizationSPPolicy'},
'attribute_map': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'authorization_attributes'", 'null': 'True', 'to': u"orm['saml.AuthorizationAttributeMap']"}),
'default_denial_message': ('django.db.models.fields.CharField', [], {'default': "u'You are not authorized to access the service.'", 'max_length': '80'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'})
},
u'saml.idpoptionssppolicy': {
'Meta': {'object_name': 'IdPOptionsSPPolicy'},
'accept_slo': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allow_create': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'back_url': ('django.db.models.fields.CharField', [], {'default': "'/'", 'max_length': '200'}),
'binding_for_sso_response': ('django.db.models.fields.CharField', [], {'default': "'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Artifact'", 'max_length': '200'}),
'enable_binding_for_sso_response': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_http_method_for_defederation_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_http_method_for_slo_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'force_user_consent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forward_slo': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'handle_persistent': ('django.db.models.fields.CharField', [], {'default': "'AUTHSAML2_UNAUTH_PERSISTENT_ACCOUNT_LINKING_BY_AUTH'", 'max_length': '200'}),
'handle_transient': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'http_method_for_defederation_request': ('django.db.models.fields.IntegerField', [], {'default': '5', 'max_length': '200'}),
'http_method_for_slo_request': ('django.db.models.fields.IntegerField', [], {'default': '4', 'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'no_nameid_policy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'requested_name_id_format': ('django.db.models.fields.CharField', [], {'default': "'none'", 'max_length': '200'}),
'transient_is_persistent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'want_authn_request_signed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'want_force_authn_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'want_is_passive_authn_request': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'saml.keyvalue': {
'Meta': {'object_name': 'KeyValue'},
'key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'value': ('authentic2.saml.fields.PickledObjectField', [], {})
},
u'saml.libertyartifact': {
'Meta': {'object_name': 'LibertyArtifact'},
'artifact': ('django.db.models.fields.CharField', [], {'max_length': '128', 'primary_key': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'django_session_key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'provider_id': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'saml.libertyassertion': {
'Meta': {'object_name': 'LibertyAssertion'},
'assertion': ('django.db.models.fields.TextField', [], {}),
'assertion_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider_id': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'session_index': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'saml.libertyfederation': {
'Meta': {'unique_together': "(('name_id_qualifier', 'name_id_format', 'name_id_content', 'name_id_sp_name_qualifier'),)", 'object_name': 'LibertyFederation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idp_id': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name_id_content': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_id_format': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name_id_qualifier': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'name_id_sp_name_qualifier': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'name_id_sp_provided_id': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'sp_id': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % user_model_label})
},
u'saml.libertyidentitydump': {
'Meta': {'object_name': 'LibertyIdentityDump'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identity_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s']" % user_model_label, 'unique': 'True'})
},
u'saml.libertyidentityprovider': {
'Meta': {'object_name': 'LibertyIdentityProvider'},
'authorization_policy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'authorization_policy'", 'null': 'True', 'to': u"orm['saml.AuthorizationSPPolicy']"}),
'enable_following_authorization_policy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_following_idp_options_policy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'idp_options_policy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'idp_options_policy'", 'null': 'True', 'to': u"orm['saml.IdPOptionsSPPolicy']"}),
'liberty_provider': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'identity_provider'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['saml.LibertyProvider']"})
},
u'saml.libertymanagedump': {
'Meta': {'object_name': 'LibertyManageDump'},
'django_session_key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manage_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'saml.libertyprovider': {
'Meta': {'object_name': 'LibertyProvider'},
'ca_cert_chain': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'entity_id': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200'}),
'entity_id_sha1': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'}),
'federation_source': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'metadata': ('django.db.models.fields.TextField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'protocol_conformance': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'public_key': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'ssl_certificate': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'saml.libertyproviderpolicy': {
'Meta': {'object_name': 'LibertyProviderPolicy'},
'authn_request_signature_check_hint': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'saml.libertyserviceprovider': {
'Meta': {'object_name': 'LibertyServiceProvider'},
'attribute_policy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attribute_policy'", 'null': 'True', 'to': u"orm['idp.AttributePolicy']"}),
'enable_following_attribute_policy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enable_following_sp_options_policy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liberty_provider': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'service_provider'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['saml.LibertyProvider']"}),
'policy': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['saml.LibertyProviderPolicy']", 'null': 'True'}),
'sp_options_policy': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sp_options_policy'", 'null': 'True', 'to': u"orm['saml.SPOptionsIdPPolicy']"})
},
u'saml.libertysession': {
'Meta': {'object_name': 'LibertySession'},
'assertion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['saml.LibertyAssertion']", 'null': 'True'}),
'creation': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'django_session_key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'federation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['saml.LibertyFederation']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name_id_content': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name_id_format': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'name_id_qualifier': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'name_id_sp_name_qualifier': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True'}),
'provider_id': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'session_index': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'saml.libertysessiondump': {
'Meta': {'object_name': 'LibertySessionDump'},
'django_session_key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.IntegerField', [], {}),
'session_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'saml.libertysessionsp': {
'Meta': {'object_name': 'LibertySessionSP'},
'django_session_key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'federation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['saml.LibertyFederation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_index': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
u'saml.spoptionsidppolicy': {
'Meta': {'object_name': 'SPOptionsIdPPolicy'},
'accept_slo': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'accepted_name_id_format': ('authentic2.saml.fields.MultiSelectField', [], {'max_length': '31', 'blank': 'True'}),
'ask_user_consent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'authn_request_signed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'default_name_id_format': ('django.db.models.fields.CharField', [], {'default': "'none'", 'max_length': '200'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'encrypt_assertion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'encrypt_nameid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forward_slo': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idp_initiated_sso': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'prefered_assertion_consumer_binding': ('django.db.models.fields.CharField', [], {'default': "'meta'", 'max_length': '4'})
}
}
complete_apps = ['saml']
|
adieu/authentic2
|
authentic2/saml/migrations/0022_auto__chg_field_libertysession_django_session_key__chg_field_libertyar.py
|
Python
|
agpl-3.0
| 24,009
| 0.006872
|
import time
import json
import logging
import re
import requests
from telegram import InlineQueryResultArticle, InputTextMessageContent, ParseMode
from telegram.ext import ConversationHandler
from keybaseproofbot.models import Proof
from keybaseproofbot.proof_handler import check_proof_message, lookup_proof, store_proof, check_key
from keybaseproofbot.utils import fix_dashes
from keybaseproofbot.wrapperfilter import filter_group, filter_private
@filter_group
def proof_message_handle(bot, update):
if update.message.from_user.username == '':
logging.info("User (%s) without username sent message.",
update.message.from_user.first_name)
return
entities = [
entity for entity in update.message.entities if entity.type == 'pre' or entity.type == 'code'
]
if len(entities) != 2:
logging.warning(
"Message with message id %s from sender %s does not have two pre blocks.",
update.message.message_id, update.message.from_user.username)
bot.delete_message(chat_id=update.message.chat_id, message_id=update.message.message_id)
return
succes, proof = check_proof_message(bot, update, entities)
if succes:
signed_block = update.message.text[entities[1].offset:entities[1]
.offset + entities[1].length]
store_proof(proof, signed_block, update)
bot.send_message(
chat_id=update.message.chat_id,
text="Your proof was succesfully stored!",
reply_to_message_id=update.message.message_id)
elif proof == 'invalid_sign':
bot.send_message(
chat_id=update.message.chat_id,
text="Your proof is not valid. Paging @pingiun to take a look at it.",
reply_to_message_id=update.message.message_id)
bot.deleteMessage(chat_id=update.message.chat_id, message_id=update.message.id)
elif proof == 'notimplemented':
bot.send_message(
chat_id=update.message.chat_id,
text="Using other hosts than keybase.io is not supported yet.")
bot.deleteMessage(chat_id=update.message.chat_id, message_id=update.message.id)
@filter_group
def other_message_handle(bot, update):
bot.deleteMessage(
chat_id=update.message.chat_id, message_id=update.message.id)
def inline_handler(bot, update):
query = update.inline_query.query
proofs = Proof.query.filter(
Proof.telegram_username.like("%{}%".format(query))).all()
results = [
InlineQueryResultArticle(
id=proof.telegram_username,
title=proof.telegram_username,
input_message_content=InputTextMessageContent(
"✅ https://keybase.io/{} is @{} on Telegram (cached). You can talk to @KeybaseProofBot for current information, or check out @KeybaseProofs.".format(
proof.keybase_username, proof.telegram_username))) for proof in proofs
]
update.inline_query.answer(results)
@filter_private
def start(bot, update):
bot.send_message(
chat_id=update.message.chat_id,
text="Hello, welcome to the (unofficial) Keybase Telegram Proving Bot. "
"I can help you search for Telegram user proofs.\n"
"*Please keep in mind that this bot is unofficial, which means that your telegram proof "
"is not included in your signature chain. Revocations are also not implemented (yet).*\n\n"
"You can control me with these commands:\n\n"
"/newproof - build a proof message to post in @KeybaseProofs\n"
"/lookup - check if a user has proved their identity on Telegram\n"
"/forwardproof - the bot forwards the proof message for a certain Telegram user\n"
"/cancel - cancel the current command",
parse_mode=ParseMode.MARKDOWN)
@filter_private
def notusername(bot, update):
bot.send_message(
chat_id=update.message.chat_id,
text="Please enter a username like @pingiun, or /cancel to cancel "
"the current command.")
@filter_private
def notkbusername(bot, update):
bot.send_message(
chat_id=update.message.chat_id, text="Please enter a correct input.")
@filter_private
def cancel(bot, update):
bot.send_message(
chat_id=update.message.chat_id, text="Canceled current command.")
return ConversationHandler.END
@filter_private
def newproof(bot, update, args):
if not update.message.from_user.username:
bot.send_message(
chat_id=update.message.chat_id,
text="You need to have a username to prove it!")
if len(args) == 1:
update.message.text = args[0]
return make_json(bot, update)
bot.send_message(
chat_id=update.message.chat_id,
text="Please enter a keybase username to connect to your Telegram account."
)
return 'enter_kbusername'
temp_proof_data = {}
@filter_private
def make_json(bot, update):
match = re.match(r'^(?:(?:(?:https://)?keybase.io/)|@)?([A-Za-z_]+)$',
update.message.text)
if not match:
return notkbusername(bot, update)
username = match.group(0)
r = requests.get(
'https://keybase.io/_/api/1.0/user/lookup.json?usernames={}&fields=basics,public_keys'.format(username))
try:
keybase = r.json()
except json.decoder.JSONDecodeError as e:
logging.exception(e)
bot.send_message(
chat_id=update.message.chat_id,
text="Something went wrong while looking up your username.")
return ConversationHandler.END
try:
fingerprint = keybase['them'][0]['public_keys']['primary'][
'key_fingerprint']
host = 'keybase.io'
key_id = fingerprint[:-16]
kid = keybase['them'][0]['public_keys']['primary']['kid']
uid = keybase['them'][0]['id']
username = keybase['them'][0]['basics']['username']
except KeyError:
bot.send_message(
chat_id=update.message.chat_id,
text="Your username was not found on Keybase!")
return
try:
data = {
'body': {
'key': {
'fingerprint': fingerprint,
'host': host,
'key_id': key_id,
'kid': kid,
'uid': uid,
'username': username,
},
'service': {
'name': 'telegram',
'username': update.message.from_user.username,
},
'type': 'web_service_binding',
'version': 1,
},
'ctime': int(time.time()),
'expire_in': 60 * 60 * 24 * 365 * 1, # Expire in 1 year
'tag': 'signature'
}
temp_proof_data[update.message.chat_id] = data
json_block = json.dumps(data, indent=4)
except Exception as e:
logging.exception(e)
bot.send_message(
chat_id=update.message.chat_id, text="Something went wrong!")
return
bot.send_message(chat_id=update.message.chat_id,
text="Okay, please paste the following into your terminal (where you can use the keybase cli client) and paste the output here.")
bot.send_message(
chat_id=update.message.chat_id,
text="```\nkeybase pgp sign --message \"{}\"\n```".format(json_block.replace(r'"', r'\"')),
parse_mode=ParseMode.MARKDOWN)
bot.send_message(chat_id=update.message.chat_id,
text="If want to use gpg(2) you can copy and paste this command instead:")
bot.send_message(chat_id=update.message.chat_id,
text="```\necho \"{}\" | gpg -a --sign\n```".format(json_block.replace(r'"', r'\"')),
parse_mode=ParseMode.MARKDOWN)
return 'sign_block'
@filter_private
def check_block(bot, update):
if update.message.text.startswith('/cancel'):
return cancel()
lines = update.message.text.split('\n')
if len(lines) > 1 and not ("BEGIN PGP MESSAGE" in lines[0] and "END PGP MESSAGE" in lines[-1]):
bot.send_message(
chat_id=update.message.chat_id,
text="Your message is not a valid gpg message.")
return ConversationHandler.END
del lines
pgp_content = fix_dashes(update.message.text)
proof_data = temp_proof_data[update.message.chat_id]
# See mom, i clean up after myself:
del temp_proof_data[update.message.chat_id]
fingerprint = ' '.join([
proof_data['body']['key']['fingerprint'][i:i + 4].upper()
for i in range(0, len(proof_data['body']['key']['fingerprint']), 4)
])
succes, proof = check_key(bot, proof_data, pgp_content,
update.message.from_user.username,
update.message.chat_id)
if succes:
bot.send_message(
chat_id=update.message.chat_id,
text="Your signed block is valid. You can now copy and paste the following "
"message to @KeybaseProofs.")
bot.send_message(chat_id=update.message.chat_id,
text="Keybase proof\n\n"
"I hereby claim:\n\n"
"- I am @{} on telegram.\n"
"- I am {} on keybase.\n"
"- I have a public key whose fingerprint is {}\n\n"
"To claim this, I am signing this object:\n"
"```\n{}\n```\n"
"with the key from above, yielding:\n"
"```\n{}\n```\n"
"Finally, I am proving my Telegram account by posting it in @KeybaseProofs"
.format(update.message.from_user.username,
proof_data['body']['key']['username'],
fingerprint,
json.dumps(
proof_data, sort_keys=True, indent=4),
pgp_content))
elif proof == 'invalid_sign':
bot.send_message(
chat_id=update.message.chat_id,
text="Your signed block is not valid.",
reply_to_message_id=update.message.message_id)
elif proof == 'notimplemented':
bot.send_message(
chat_id=update.message.chat_id,
text="Using other hosts than keybase.io is not supported yet.")
else:
logging.error("Unhandled check_proof result: " + proof)
@filter_private
def lookup_start(bot, update, args):
if len(args) >= 1:
update.message.text = ' '.join(args)
return lookup_username(bot, update)
bot.send_message(
chat_id=update.message.chat_id,
text="Please enter a query to search for.")
return 'enter_username'
@filter_private
def lookup_username(bot, update):
bot.send_chat_action(chat_id=update.message.chat_id, action='typing')
info = lookup_proof(bot, query=update.message.text)
if info:
proof_object = json.loads(info.proof_object)
fingerprint = ' '.join([
proof_object['body']['key']['fingerprint'][i:i + 4].upper()
for i in range(0,
len(proof_object['body']['key']['fingerprint']), 4)
])
bot.send_message(
chat_id=update.message.chat_id,
text="▶ Identifying https://keybase.io/{}".format(info.keybase_username))
bot.send_message(
chat_id=update.message.chat_id,
text="✅ public key fingerprint: " +
fingerprint)
bot.send_chat_action(chat_id=update.message.chat_id, action='typing')
succes, proof = check_key(bot,
json.loads(info.proof_object), info.signed_block,
info.telegram_username, info.user_id)
if succes == 'no_expiry':
bot.send_message(chat_id=update.message.chat_id,
text="😕 \"@{}\" on telegram. "
"But the proof has no expiry set, so be careful.".format(info.telegram_username))
elif succes:
bot.send_message(
chat_id=update.message.chat_id,
text="✅ \"@{}\" on telegram".format(info.telegram_username))
else:
if proof == 'not_username':
bot.send_message(chat_id=update.message.chat_id,
text="❌ WARNING: \"{}\" on telegram may have deleted their account, or changed their username. "
"The user may not be who they claim they are!".format(info.telegram_username))
elif proof == 'invalid_sign':
bot.send_message(chat_id=update.message.chat_id,
text="❌ WARNING: \"{}\" on telegram has not signed their proof correctly. "
"The user may not be who they claim they are!".format(info.telegram_username))
elif proof == 'malformed':
bot.send_message(chat_id=update.message.chat_id,
text="❌ WARNING: \"{}\" has a malformed proof, it could not be verified.".format(info.telegram_username))
elif proof == 'expired':
bot.send_message(chat_id=update.message.chat_id,
text="❌ WARNING: \"{}\" has let their proof expire. It cannot be trusted anymore. "
"The user may not be who they claim they are!".format(info.telegram_username))
else:
bot.send_message(chat_id=update.message.chat_id,
text="Could not verify Telegram username, you are advised to check for yourself. (Internal error)")
logging.error("Check proof failed for lookup. Return message: %s", proof)
bot.send_message(
chat_id=update.message.chat_id,
text="▶ If you want to check the proof message yourself, use the /forwardproof command."
)
else:
bot.send_message(chat_id=update.message.chat_id, text="No proof found for your query.")
return ConversationHandler.END
@filter_private
def forward_proof_start(bot, update, args):
if len(args) >= 1:
update.message.text = ' '.join(args)
return forward_proof(bot, update)
bot.send_message(
chat_id=update.message.chat_id,
text="Please enter a username to search for.")
return 'enter_username'
@filter_private
def forward_proof(bot, update):
match = re.match(r'(?:@)?([A-Za-z_]+)', update.message.text)
if match:
info = lookup_proof(bot, telegram_username=match.group(0))
if info:
bot.send_message(chat_id=update.message.chat_id, text="This is the proof message for that user:")
bot.forwardMessage(chat_id=update.message.chat_id, from_chat_id=info.chat_id, message_id=info.message_id)
else:
bot.send_message(chat_id=update.message.chat_id, text="No proof found for your query.")
return ConversationHandler.END
else:
bot.send_message(chat_id=update.message.chat_id,
text="That is not a valid Telegram username, try again.")
|
pingiun/keybaseproofbot
|
keybaseproofbot/handlers.py
|
Python
|
mit
| 15,489
| 0.003168
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from base64 import b64encode
from datetime import datetime
from airflow import configuration
from airflow import models
from airflow.contrib.operators.sftp_operator import SFTPOperator, SFTPOperation
from airflow.contrib.operators.ssh_operator import SSHOperator
from airflow.models import DAG, TaskInstance
from airflow.settings import Session
TEST_DAG_ID = 'unit_tests'
DEFAULT_DATE = datetime(2017, 1, 1)
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class SFTPOperatorTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
from airflow.contrib.hooks.ssh_hook import SSHHook
hook = SSHHook(ssh_conn_id='ssh_default')
hook.no_host_key_check = True
args = {
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'provide_context': True
}
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once', default_args=args)
dag.schedule_interval = '@once'
self.hook = hook
self.dag = dag
self.test_dir = "/tmp"
self.test_local_filename = 'test_local_file'
self.test_remote_filename = 'test_remote_file'
self.test_local_filepath = '{0}/{1}'.format(self.test_dir,
self.test_local_filename)
self.test_remote_filepath = '{0}/{1}'.format(self.test_dir,
self.test_remote_filename)
def test_pickle_file_transfer_put(self):
configuration.set("core", "enable_xcom_pickling", "True")
test_local_file_content = \
b"This is local file content \n which is multiline " \
b"continuing....with other character\nanother line here \n this is last line"
# create a test file locally
with open(self.test_local_filepath, 'wb') as f:
f.write(test_local_file_content)
# put test file to remote
put_test_task = SFTPOperator(
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=self.dag
)
self.assertIsNotNone(put_test_task)
ti2 = TaskInstance(task=put_test_task, execution_date=datetime.now())
ti2.run()
# check the remote file content
check_file_task = SSHOperator(
task_id="test_check_file",
ssh_hook=self.hook,
command="cat {0}".format(self.test_remote_filepath),
do_xcom_push=True,
dag=self.dag
)
self.assertIsNotNone(check_file_task)
ti3 = TaskInstance(task=check_file_task, execution_date=datetime.now())
ti3.run()
self.assertEqual(
ti3.xcom_pull(task_ids='test_check_file', key='return_value').strip(),
test_local_file_content)
def test_json_file_transfer_put(self):
configuration.set("core", "enable_xcom_pickling", "False")
test_local_file_content = \
b"This is local file content \n which is multiline " \
b"continuing....with other character\nanother line here \n this is last line"
# create a test file locally
with open(self.test_local_filepath, 'wb') as f:
f.write(test_local_file_content)
# put test file to remote
put_test_task = SFTPOperator(
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.PUT,
dag=self.dag
)
self.assertIsNotNone(put_test_task)
ti2 = TaskInstance(task=put_test_task, execution_date=datetime.now())
ti2.run()
# check the remote file content
check_file_task = SSHOperator(
task_id="test_check_file",
ssh_hook=self.hook,
command="cat {0}".format(self.test_remote_filepath),
do_xcom_push=True,
dag=self.dag
)
self.assertIsNotNone(check_file_task)
ti3 = TaskInstance(task=check_file_task, execution_date=datetime.now())
ti3.run()
self.assertEqual(
ti3.xcom_pull(task_ids='test_check_file', key='return_value').strip(),
b64encode(test_local_file_content).decode('utf-8'))
def test_pickle_file_transfer_get(self):
configuration.set("core", "enable_xcom_pickling", "True")
test_remote_file_content = \
"This is remote file content \n which is also multiline " \
"another line here \n this is last line. EOF"
# create a test file remotely
create_file_task = SSHOperator(
task_id="test_create_file",
ssh_hook=self.hook,
command="echo '{0}' > {1}".format(test_remote_file_content,
self.test_remote_filepath),
do_xcom_push=True,
dag=self.dag
)
self.assertIsNotNone(create_file_task)
ti1 = TaskInstance(task=create_file_task, execution_date=datetime.now())
ti1.run()
# get remote file to local
get_test_task = SFTPOperator(
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
dag=self.dag
)
self.assertIsNotNone(get_test_task)
ti2 = TaskInstance(task=get_test_task, execution_date=datetime.now())
ti2.run()
# test the received content
content_received = None
with open(self.test_local_filepath, 'r') as f:
content_received = f.read()
self.assertEqual(content_received.strip(), test_remote_file_content)
def test_json_file_transfer_get(self):
configuration.set("core", "enable_xcom_pickling", "False")
test_remote_file_content = \
"This is remote file content \n which is also multiline " \
"another line here \n this is last line. EOF"
# create a test file remotely
create_file_task = SSHOperator(
task_id="test_create_file",
ssh_hook=self.hook,
command="echo '{0}' > {1}".format(test_remote_file_content,
self.test_remote_filepath),
do_xcom_push=True,
dag=self.dag
)
self.assertIsNotNone(create_file_task)
ti1 = TaskInstance(task=create_file_task, execution_date=datetime.now())
ti1.run()
# get remote file to local
get_test_task = SFTPOperator(
task_id="test_sftp",
ssh_hook=self.hook,
local_filepath=self.test_local_filepath,
remote_filepath=self.test_remote_filepath,
operation=SFTPOperation.GET,
dag=self.dag
)
self.assertIsNotNone(get_test_task)
ti2 = TaskInstance(task=get_test_task, execution_date=datetime.now())
ti2.run()
# test the received content
content_received = None
with open(self.test_local_filepath, 'r') as f:
content_received = f.read()
self.assertEqual(content_received.strip(),
test_remote_file_content.encode('utf-8').decode('utf-8'))
def delete_local_resource(self):
if os.path.exists(self.test_local_filepath):
os.remove(self.test_local_filepath)
def delete_remote_resource(self):
# check the remote file content
remove_file_task = SSHOperator(
task_id="test_check_file",
ssh_hook=self.hook,
command="rm {0}".format(self.test_remote_filepath),
do_xcom_push=True,
dag=self.dag
)
self.assertIsNotNone(remove_file_task)
ti3 = TaskInstance(task=remove_file_task, execution_date=datetime.now())
ti3.run()
def tearDown(self):
self.delete_local_resource() and self.delete_remote_resource()
if __name__ == '__main__':
unittest.main()
|
MetrodataTeam/incubator-airflow
|
tests/contrib/operators/test_sftp_operator.py
|
Python
|
apache-2.0
| 9,161
| 0.001092
|
#! /usr/bin/python3
# -*- coding:utf-8 -*-
# Funciones y parametros arbitrarios
def funcion(**nombres):
print (type(nombres))
for alumno in nombres:
print ("%s es alumno y tiene %d años" % (alumno, nombres[alumno]))
return nombres
#diccionario = {"Adrian":25, "Niño":25, "Roberto":23, "Celina":23}
print (funcion(Adrian = 25, Nino = 25, Roberto = 23, Celina = 23))
|
IntelBUAP/Python3
|
codigo27.py
|
Python
|
gpl-2.0
| 388
| 0.033679
|
# Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <bram@topydo.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Provides the AddCommand class that implements the 'add' subcommand. """
import codecs
import re
from datetime import date
from os.path import expanduser
from sys import stdin
from topydo.lib.Config import config
from topydo.lib.prettyprinters.Numbers import PrettyPrinterNumbers
from topydo.lib.WriteCommand import WriteCommand
class AddCommand(WriteCommand):
def __init__(self, p_args, p_todolist, # pragma: no branch
p_out=lambda a: None,
p_err=lambda a: None,
p_prompt=lambda a: None):
super().__init__(
p_args, p_todolist, p_out, p_err, p_prompt)
self.text = ' '.join(p_args)
self.from_file = None
def _process_flags(self):
opts, args = self.getopt('f:')
for opt, value in opts:
if opt == '-f':
self.from_file = expanduser(value)
self.args = args
def get_todos_from_file(self):
if self.from_file == '-':
f = stdin
else:
f = codecs.open(self.from_file, 'r', encoding='utf-8')
todos = f.read().splitlines()
return todos
def _add_todo(self, p_todo_text):
def _preprocess_input_todo(p_todo_text):
"""
Pre-processes user input when adding a task.
It detects a priority mid-sentence and puts it at the start.
"""
todo_text = re.sub(r'^(.+) (\([A-Z]\))(.*)$', r'\2 \1\3',
p_todo_text)
return todo_text
todo_text = _preprocess_input_todo(p_todo_text)
todo = self.todolist.add(todo_text)
self.postprocess_input_todo(todo)
if config().auto_creation_date():
todo.set_creation_date(date.today())
self.out(self.printer.print_todo(todo))
def execute(self):
""" Adds a todo item to the list. """
if not super().execute():
return False
self.printer.add_filter(PrettyPrinterNumbers(self.todolist))
self._process_flags()
if self.from_file:
try:
new_todos = self.get_todos_from_file()
for todo in new_todos:
self._add_todo(todo)
except (IOError, OSError):
self.error('File not found: ' + self.from_file)
else:
if self.text:
self._add_todo(self.text)
else:
self.error(self.usage())
def usage(self):
return """Synopsis:
add <TEXT>
add -f <FILE> | -"""
def help(self):
return """\
This subcommand automatically adds the creation date to the added item.
TEXT may contain:
* Priorities mid-sentence. Example: add "Water flowers (C)"
* Dependencies using before, after, partof, parents-of and children-of tags.
These are translated to the corresponding 'id' and 'p' tags. The values of
these tags correspond to the todo number (not the dependency number).
Example: add "Subtask partof:1"
-f : Add todo items from specified FILE or from standard input.\
"""
|
bram85/topydo
|
topydo/commands/AddCommand.py
|
Python
|
gpl-3.0
| 3,836
| 0.000261
|
##
# Copyright 2012-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for iompi compiler toolchain (includes Intel compilers (icc, ifort) and OpenMPI.
@author: Stijn De Weirdt (Ghent University)
@author: Kenneth Hoste (Ghent University)
"""
from easybuild.toolchains.iccifort import IccIfort
from easybuild.toolchains.mpi.openmpi import OpenMPI
class Iompi(IccIfort, OpenMPI):
"""
Compiler toolchain with Intel compilers (icc/ifort) and OpenMPI.
"""
NAME = 'iompi'
SUBTOOLCHAIN = IccIfort.NAME
|
hpcleuven/easybuild-framework
|
easybuild/toolchains/iompi.py
|
Python
|
gpl-2.0
| 1,514
| 0.001982
|
"""
Django settings for myclass project.
Generated by 'django-admin startproject' using Django 1.9.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n^qif^$w3ooxd1m5&6ir7m^fy%3oq@s+d&pxyut32upkgzbg&4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'myquiz',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myclass.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myclass.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
wasit7/tutorials
|
django/Pieng/myclass/myclass/settings.py
|
Python
|
mit
| 3,183
| 0.001257
|
from fabric.api import env
from fabric.context_managers import cd
from fabric.operations import run, local, put
env.shell = '/bin/bash -l -c'
env.user = 'd'
env.roledefs.update({
'staging': ['staging.solebtc.com'],
'production': ['solebtc.com']
})
# Heaven will execute fab -R staging deploy:branch_name=master
def deploy(branch_name):
deployProduction(branch_name) if env.roles[0] == 'production' else deployStaging(branch_name)
def deployStaging(branch_name):
printMessage("staging")
codedir = '$GOPATH/src/github.com/solefaucet/solebtc'
run('rm -rf %s' % codedir)
run('mkdir -p %s' % codedir)
local('git archive --format=tar --output=/tmp/archive.tar %s' % branch_name)
local('ls /tmp')
put('/tmp/archive.tar', '~/')
local('rm /tmp/archive.tar')
run('mv archive.tar %s' % codedir)
with cd(codedir):
run('tar xf archive.tar')
run('go build -o solebtc')
# mv doc to nginx root
run('mv apidoc/v1.json /usr/share/nginx/html/doc')
# database version control
run("mysql -e 'create database if not exists solebtc_prod';")
run('go get bitbucket.org/liamstask/goose/cmd/goose')
run('goose -env production up')
# restart solebtc service with supervisorctl
run('supervisorctl restart solebtc')
def deployProduction(branch_name):
printMessage("production")
# TODO
# scp executable file from staging to production, database up, restart service
# mark current timestamp or commit as version number so we can rollback easily
def printMessage(server):
print("Deploying to %s server at %s as %s" % (server, env.host, env.user))
|
solefaucet/sole-server
|
fabfile.py
|
Python
|
mit
| 1,674
| 0.005376
|
from game import models
from game.method.in_game import thread_fields, Thread_field
from game.tool.room_tool import *
from game.tool.tools import to_json
# 实时获得房间信息 room_id
def get_room_info(request):
room_id = int(request.POST['room_id'])
room = get_room_by_id(room_id)
print(room_id)
print(room.users_status)
users_array = []
for u_id in room.users:
find_user = models.User.objects.filter(id=u_id)
if find_user:
find_user = find_user[0]
u_dict = {
'user_id': find_user.id,
'user_name': find_user.username,
'win': find_user.win,
'fail': find_user.fail,
'user_status': room.users_status[u_id]
}
users_array.append(u_dict)
# 结果
response = {
'status': room.status,
'owner': room.owner,
'users': users_array
}
print(response)
return to_json(response)
# 更改准备状态 room_id user_id
def change_user_status(request):
user_id = int(request.POST['user_id'])
room_id = int(request.POST['room_id'])
room = get_room_by_id(room_id)
u_status = room.users_status[user_id]
room.users_status[user_id] = not u_status
return to_json({'response_code': 1, 'user_status': not u_status})
# 房主开始游戏 user_id room_id
def begin_game(request):
user_id = int(request.POST['user_id'])
room_id = int(request.POST['room_id'])
room = get_room_by_id(room_id)
if user_id == room.owner:
f = True
for u_id in room.users:
if u_id != room.owner and not room.users_status[u_id]:
f = False
if f:
room.users_status[user_id] = True
room.status = True
# 计算布局线程,存入线程
thread_fields[room_id] = Thread_field(room.users, room_id)
thread_fields[room_id].start()
return to_json({'response_code': 1})
else:
return to_json({'response_code': -1})
else:
return to_json({'response_code': -1})
# # 用户准备 user_id room_id
# def user_ready(request):
# user_id = request.POST.get('user_id')
# room_id = request.POST.get('room_id')
# # 找到此房间
# room = get_room_by_id(room_id)
# room.users_status[user_id] = True
#
#
# # 用户取消准备 user_id room_id
# def user_cancel_ready(request):
# user_id = request.POST.get('user_id')
# room_id = request.POST.get('room_id')
# # 找到此房间
# room = get_room_by_id(room_id)
# room.users_status[user_id] = False
#
#
# # 开始游戏 owner_id room_id
# def owner_begin(request):
# owner_id = request.POST.get('owner_id')
# room_id = request.POST.get('room_id')
# # 找到此房间
# room = get_room_by_id(room_id)
# room.users_status[owner_id] = True
# all_ready = True
# if room_id == room.owner:
# for u in room.users:
# if not room.users_status[u]:
# all_ready = False
# break
# if all_ready:
# # 全部准备好
# room.status = True
# return 0
# else:
# # 有人没有准备好
# return 0
# else:
# # 这个人不是房主
# return 0
#
#
# # 检查是否开始游戏了 room_id
# def check_room_status(request):
# room_id = request.POST.get('room_id')
# # 找到此房间
# room = get_room_by_id(room_id)
# if room.status:
# # 已经开始了
# return 0
# else:
# # 还没有开始
# return 0
|
sdu14SoftwareEngineering/GameOfLife_WEB
|
game/method/ready_game.py
|
Python
|
apache-2.0
| 3,728
| 0
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for gclient.py.
See gclient_smoketest.py for integration tests.
"""
import Queue
import copy
import logging
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import gclient
import gclient_utils
import gclient_scm
from testing_support import trial_dir
def write(filename, content):
"""Writes the content of a file and create the directories as needed."""
filename = os.path.abspath(filename)
dirname = os.path.dirname(filename)
if not os.path.isdir(dirname):
os.makedirs(dirname)
with open(filename, 'w') as f:
f.write(content)
class SCMMock(object):
def __init__(self, unit_test, name, url):
self.unit_test = unit_test
self.name = name
self.url = url
def RunCommand(self, command, options, args, file_list):
self.unit_test.assertEquals('None', command)
self.unit_test.processed.put((self.name, self.url))
def FullUrlForRelativeUrl(self, url):
return self.url + url
# pylint: disable=no-self-use
def DoesRemoteURLMatch(self, _):
return True
def GetActualRemoteURL(self, _):
return self.url
class GclientTest(trial_dir.TestCase):
def setUp(self):
super(GclientTest, self).setUp()
self.processed = Queue.Queue()
self.previous_dir = os.getcwd()
os.chdir(self.root_dir)
# Manual mocks.
self._old_createscm = gclient.Dependency.CreateSCM
gclient.Dependency.CreateSCM = self._createscm
self._old_sys_stdout = sys.stdout
sys.stdout = gclient.gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient.gclient_utils.MakeFileAnnotated(sys.stdout)
def tearDown(self):
self.assertEquals([], self._get_processed())
gclient.Dependency.CreateSCM = self._old_createscm
sys.stdout = self._old_sys_stdout
os.chdir(self.previous_dir)
super(GclientTest, self).tearDown()
def _createscm(self, parsed_url, root_dir, name, out_fh=None, out_cb=None):
self.assertTrue(parsed_url.startswith('svn://example.com/'), parsed_url)
self.assertTrue(root_dir.startswith(self.root_dir), root_dir)
return SCMMock(self, name, parsed_url)
def testDependencies(self):
self._dependencies('1')
def testDependenciesJobs(self):
self._dependencies('1000')
def _dependencies(self, jobs):
"""Verifies that dependencies are processed in the right order.
e.g. if there is a dependency 'src' and another 'src/third_party/bar', that
bar isn't fetched until 'src' is done.
Args:
|jobs| is the number of parallel jobs simulated.
"""
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', jobs])
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "bar", "url": "svn://example.com/bar" },\n'
' { "name": "bar/empty", "url": "svn://example.com/bar_empty" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",\n'
# This one will depend on dir1/dir2 in bar.
' "foo/dir1/dir2/dir3": "/dir1/dir2/dir3",\n'
' "foo/dir1/dir2/dir3/dir4": "/dir1/dir2/dir3/dir4",\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
# There is two foo/dir1/dir2. This one is fetched as bar/dir1/dir2.
' "foo/dir1/dir2": "/dir1/dir2",\n'
'}')
write(
os.path.join('bar/empty', 'DEPS'),
'deps = {\n'
'}')
obj = gclient.GClient.LoadCurrentConfig(options)
self._check_requirements(obj.dependencies[0], {})
self._check_requirements(obj.dependencies[1], {})
obj.RunOnDeps('None', args)
actual = self._get_processed()
first_3 = [
('bar', 'svn://example.com/bar'),
('bar/empty', 'svn://example.com/bar_empty'),
('foo', 'svn://example.com/foo'),
]
if jobs != 1:
# We don't care of the ordering of these items except that bar must be
# before bar/empty.
self.assertTrue(
actual.index(('bar', 'svn://example.com/bar')) <
actual.index(('bar/empty', 'svn://example.com/bar_empty')))
self.assertEquals(first_3, sorted(actual[0:3]))
else:
self.assertEquals(first_3, actual[0:3])
self.assertEquals(
[
('foo/dir1', 'svn://example.com/foo/dir1'),
('foo/dir1/dir2', 'svn://example.com/bar/dir1/dir2'),
('foo/dir1/dir2/dir3', 'svn://example.com/foo/dir1/dir2/dir3'),
('foo/dir1/dir2/dir3/dir4',
'svn://example.com/foo/dir1/dir2/dir3/dir4'),
],
actual[3:])
self.assertEquals(3, len(obj.dependencies))
self.assertEquals('foo', obj.dependencies[0].name)
self.assertEquals('bar', obj.dependencies[1].name)
self.assertEquals('bar/empty', obj.dependencies[2].name)
self._check_requirements(
obj.dependencies[0],
{
'foo/dir1': ['bar', 'bar/empty', 'foo'],
'foo/dir1/dir2/dir3':
['bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2'],
'foo/dir1/dir2/dir3/dir4':
[ 'bar', 'bar/empty', 'foo', 'foo/dir1', 'foo/dir1/dir2',
'foo/dir1/dir2/dir3'],
})
self._check_requirements(
obj.dependencies[1],
{
'foo/dir1/dir2': ['bar', 'bar/empty', 'foo', 'foo/dir1'],
})
self._check_requirements(
obj.dependencies[2],
{})
self._check_requirements(
obj,
{
'foo': [],
'bar': [],
'bar/empty': ['bar'],
})
def _check_requirements(self, solution, expected):
for dependency in solution.dependencies:
e = expected.pop(dependency.name)
a = sorted(dependency.requirements)
self.assertEquals(e, a, (dependency.name, e, a))
self.assertEquals({}, expected)
def _get_processed(self):
"""Retrieves the item in the order they were processed."""
items = []
try:
while True:
items.append(self.processed.get_nowait())
except Queue.Empty:
pass
return items
def testAutofix(self):
# Invalid urls causes pain when specifying requirements. Make sure it's
# auto-fixed.
url = 'proto://host/path/@revision'
d = gclient.Dependency(
None, 'name', url, url, None, None, None,
None, '', True, False, None, True)
self.assertEquals('proto://host/path@revision', d.url)
def testStr(self):
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
gclient.Dependency(
obj, 'bar', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.Dependency(
obj.dependencies[0], 'foo/dir1', 'raw_url', 'url', None, None, None,
None, 'DEPS', True, False, None, True),
],
[])
# Make sure __str__() works fine.
# pylint: disable=protected-access
obj.dependencies[0]._file_list.append('foo')
str_obj = str(obj)
self.assertEquals(263, len(str_obj), '%d\n%s' % (len(str_obj), str_obj))
def testHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
print >> fh, 'solutions = [{"name":"top","url":"svn://example.com/top"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
print >> fh, 'hooks = %s' % repr(hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options, patch_refs={})
self.assertEqual(
[h.action for h in client.GetHooks(options)],
[tuple(x['action']) for x in hooks])
def testCustomHooks(self):
topdir = self.root_dir
gclient_fn = os.path.join(topdir, '.gclient')
fh = open(gclient_fn, 'w')
extra_hooks = [{'name': 'append', 'pattern':'.', 'action':['supercmd']}]
print >> fh, ('solutions = [{"name":"top","url":"svn://example.com/top",'
'"custom_hooks": %s},' ) % repr(extra_hooks + [{'name': 'skip'}])
print >> fh, '{"name":"bottom","url":"svn://example.com/bottom"}]'
fh.close()
subdir_fn = os.path.join(topdir, 'top')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
hooks = [{'pattern':'.', 'action':['cmd1', 'arg1', 'arg2']}]
hooks.append({'pattern':'.', 'action':['cmd2', 'arg1', 'arg2']})
skip_hooks = [
{'name': 'skip', 'pattern':'.', 'action':['cmd3', 'arg1', 'arg2']}]
skip_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['cmd4', 'arg1', 'arg2']})
print >> fh, 'hooks = %s' % repr(hooks + skip_hooks)
fh.close()
# Make sure the custom hooks for that project don't affect the next one.
subdir_fn = os.path.join(topdir, 'bottom')
os.mkdir(subdir_fn)
deps_fn = os.path.join(subdir_fn, 'DEPS')
fh = open(deps_fn, 'w')
sub_hooks = [{'pattern':'.', 'action':['response1', 'yes1', 'yes2']}]
sub_hooks.append(
{'name': 'skip', 'pattern':'.', 'action':['response2', 'yes', 'sir']})
print >> fh, 'hooks = %s' % repr(sub_hooks)
fh.close()
fh = open(os.path.join(subdir_fn, 'fake.txt'), 'w')
print >> fh, 'bogus content'
fh.close()
os.chdir(topdir)
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
options.force = True
client = gclient.GClient.LoadCurrentConfig(options)
work_queue = gclient_utils.ExecutionQueue(options.jobs, None, False)
for s in client.dependencies:
work_queue.enqueue(s)
work_queue.flush({}, None, [], options=options, patch_refs={})
self.assertEqual(
[h.action for h in client.GetHooks(options)],
[tuple(x['action']) for x in hooks + extra_hooks + sub_hooks])
def testTargetOS(self):
"""Verifies that specifying a target_os pulls in all relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. The
value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz', 'unix'], sorted(obj.enforced_os))
def testTargetOsWithTargetOsOnly(self):
"""Verifies that specifying a target_os and target_os_only pulls in only
the relevant dependencies.
The target_os variable allows specifying the name of an additional OS which
should be considered when selecting dependencies from a DEPS' deps_os. With
target_os_only also set, the _enforced_os tuple will be set to only the
target_os value.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os = ["baz"]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
' "baz": { "foo/dir3": "/dir3", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEqual(['baz'], sorted(obj.enforced_os))
def testTargetOsOnlyWithoutTargetOs(self):
"""Verifies that specifying a target_os_only without target_os_only raises
an exception.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' }]\n'
'target_os_only = True')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "foo/dir1": "/dir1",'
'}\n'
'deps_os = {\n'
' "unix": { "foo/dir2": "/dir2", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = "unix"
exception_raised = False
try:
gclient.GClient.LoadCurrentConfig(options)
except gclient_utils.Error:
exception_raised = True
self.assertTrue(exception_raised)
def testTargetOsInDepsFile(self):
"""Verifies that specifying a target_os value in a DEPS file pulls in all
relevant dependencies.
The target_os variable in a DEPS file allows specifying the name of an
additional OS which should be considered when selecting dependencies from a
DEPS' deps_os. The value will be appended to the _enforced_os tuple.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },\n'
' { "name": "bar",\n'
' "url": "svn://example.com/bar",\n'
' }]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix", },\n'
' "baz": { "foo/baz": "/baz", },\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
write(
os.path.join('bar', 'DEPS'),
'deps_os = {\n'
' "unix": { "bar/unix": "/unix", },\n'
' "baz": { "bar/baz": "/baz", },\n'
' "jaz": { "bar/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
('bar', 'svn://example.com/bar'),
('bar/unix', 'svn://example.com/bar/unix'),
('foo', 'svn://example.com/foo'),
('foo/baz', 'svn://example.com/foo/baz'),
('foo/unix', 'svn://example.com/foo/unix'),
],
sorted(self._get_processed()))
def testTargetOsForHooksInDepsFile(self):
"""Verifies that specifying a target_os value in a DEPS file runs the right
entries in hooks_os.
"""
write(
'DEPS',
'hooks = [\n'
' {\n'
' "name": "a",\n'
' "pattern": ".",\n'
' "action": [ "python", "do_a" ],\n'
' },\n'
']\n'
'\n'
'hooks_os = {\n'
' "blorp": ['
' {\n'
' "name": "b",\n'
' "pattern": ".",\n'
' "action": [ "python", "do_b" ],\n'
' },\n'
' ],\n'
'}\n')
write(
'.gclient',
'solutions = [\n'
' { "name": ".",\n'
' "url": "svn://example.com/",\n'
' }]\n')
# Test for an OS not in hooks_os.
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', '1'])
options.deps_os = 'zippy'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', args)
self.assertEqual(['zippy'], sorted(obj.enforced_os))
all_hooks = [h.action for h in obj.GetHooks(options)]
self.assertEquals(
[('.', 'svn://example.com/'),],
sorted(self._get_processed()))
self.assertEquals(all_hooks,
[('python', 'do_a')])
# Test for OS that has extra hooks in hooks_os.
parser = gclient.OptionParser()
options, args = parser.parse_args(['--jobs', '1'])
options.deps_os = 'blorp'
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', args)
self.assertEqual(['blorp'], sorted(obj.enforced_os))
all_hooks = [h.action for h in obj.GetHooks(options)]
self.assertEquals(
[('.', 'svn://example.com/'),],
sorted(self._get_processed()))
self.assertEquals(all_hooks,
[('python', 'do_a'),
('python', 'do_b')])
def testUpdateWithOsDeps(self):
"""Verifies that complicated deps_os constructs result in the
correct data also with multple operating systems. Also see
testDepsOsOverrideDepsInDepsFile."""
test_data = [
# Tuples of deps, deps_os, os_list and expected_deps.
(
# OS with no overrides at all.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os2'],
{'foo': 'default_foo'}
),
(
# One OS wants to add a module.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os1_bar'}}
),
(
# One OS wants to add a module. One doesn't care.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os1_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os1_bar'}}
),
(
# Two OSes want to add a module with the same definition.
{'foo': 'default_foo'},
{'os1': { 'bar': 'os12_bar' },
'os2': { 'bar': 'os12_bar' }},
['os1', 'os2'],
{'foo': 'default_foo',
'bar': {'should_process': True, 'url': 'os12_bar'}}
),
(
# One OS doesn't need module, one OS wants the default.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': {}},
['os1', 'os2'],
{'foo': 'default_foo'}
),
(
# OS doesn't need module.
{'foo': 'default_foo'},
{'os1': { 'foo': None } },
['os1'],
{'foo': 'default_foo'}
),
(
# No-op override. Regression test for http://crbug.com/735418 .
{'foo': 'default_foo'},
{'os1': { 'foo': 'default_foo' } },
[],
{'foo': {'should_process': True, 'url': 'default_foo'}}
),
]
for deps, deps_os, target_os_list, expected_deps in test_data:
orig_deps = copy.deepcopy(deps)
result = gclient.Dependency.MergeWithOsDeps(
deps, deps_os, target_os_list, False)
self.assertEqual(result, expected_deps)
self.assertEqual(deps, orig_deps)
def testUpdateWithOsDepsInvalid(self):
test_data = [
# Tuples of deps, deps_os, os_list.
(
# OS wants a different version of module.
{'foo': 'default_foo'},
{'os1': { 'foo': 'os1_foo'} },
['os1'],
),
(
# One OS doesn't need module, another OS wants a special version.
{'foo': 'default_foo'},
{'os1': { 'foo': None },
'os2': { 'foo': 'os2_foo'}},
['os1', 'os2'],
),
]
for deps, deps_os, target_os_list in test_data:
with self.assertRaises(gclient_utils.Error):
gclient.Dependency.MergeWithOsDeps(deps, deps_os, target_os_list, False)
def testLateOverride(self):
"""Verifies expected behavior of LateOverride."""
url = "git@github.com:dart-lang/spark.git"
d = gclient.Dependency(None, 'name', 'raw_url', 'url',
None, None, None, None, '', True, False, None, True)
late_url = d.LateOverride(url)
self.assertEquals(url, late_url)
def testDepsOsOverrideDepsInDepsFile(self):
"""Verifies that a 'deps_os' path cannot override a 'deps' path. Also
see testUpdateWithOsDeps above.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo",\n'
' "url": "svn://example.com/foo",\n'
' },]\n')
write(
os.path.join('foo', 'DEPS'),
'target_os = ["baz"]\n'
'deps = {\n'
' "foo/src": "/src",\n' # This path is to be overridden by similar path
# in deps_os['unix'].
'}\n'
'deps_os = {\n'
' "unix": { "foo/unix": "/unix",'
' "foo/src": "/src_unix"},\n'
' "baz": { "foo/baz": "/baz",\n'
' "foo/src": None},\n'
' "jaz": { "foo/jaz": "/jaz", },\n'
'}')
parser = gclient.OptionParser()
options, _ = parser.parse_args(['--jobs', '1'])
options.deps_os = 'unix'
obj = gclient.GClient.LoadCurrentConfig(options)
with self.assertRaises(gclient_utils.Error):
obj.RunOnDeps('None', [])
self.assertEqual(['unix'], sorted(obj.enforced_os))
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
],
sorted(self._get_processed()))
def testRecursionOverride(self):
"""Verifies gclient respects the |recursion| var syntax.
We check several things here:
- |recursion| = 3 sets recursion on the foo dep to exactly 3
(we pull /fizz, but not /fuzz)
- pulling foo/bar at recursion level 1 (in .gclient) is overriden by
a later pull of foo/bar at recursion level 2 (in the dep tree)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
],
self._get_processed())
def testRecursedepsOverride(self):
"""Verifies gclient respects the |recursedeps| var syntax.
This is what we mean to check here:
- |recursedeps| = [...] on 2 levels means we pull exactly 3 deps
(up to /fizz, but not /fuzz)
- pulling foo/bar with no recursion (in .gclient) is overriden by
a later pull of foo/bar with recursion (in the dep tree)
- pulling foo/tar with no recursion (in .gclient) is no recursively
pulled (taz is left out)
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
' { "name": "foo/tar", "url": "svn://example.com/tar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}\n'
'recursedeps = ["baz"]')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}')
write(
os.path.join('tar', 'DEPS'),
'deps = {\n'
' "taz": "/taz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('foo/tar', 'svn://example.com/tar'),
],
sorted(self._get_processed()))
def testRecursedepsOverrideWithRelativePaths(self):
"""Verifies gclient respects |recursedeps| with relative paths."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('foo/bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/foo/bar'),
('foo/baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testRelativeRecursion(self):
"""Verifies that nested use_relative_paths is always respected."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = ["bar"]')
write(
os.path.join('foo/bar', 'DEPS'),
'use_relative_paths = True\n'
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/foo/bar'),
('foo/bar/baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testRecursionOverridesRecursedeps(self):
"""Verifies gclient respects |recursion| over |recursedeps|.
|recursion| is set in a top-level DEPS file. That value is meant
to affect how many subdeps are parsed via recursion.
|recursedeps| is set in each DEPS file to control whether or not
to recurse into the immediate next subdep.
This test verifies that if both syntaxes are mixed in a DEPS file,
we disable |recursedeps| support and only obey |recursion|.
Since this setting is evaluated per DEPS file, recursed DEPS
files will each be re-evaluated according to the per DEPS rules.
So a DEPS that only contains |recursedeps| could then override any
previous |recursion| setting. There is extra processing to ensure
this does not happen.
For this test to work correctly, we need to use a DEPS chain that
only contains recursion controls in the top DEPS file.
In foo, |recursion| and |recursedeps| are specified. When we see
|recursion|, we stop trying to use |recursedeps|.
There are 2 constructions of DEPS here that are key to this test:
(1) In foo, if we used |recursedeps| instead of |recursion|, we
would also pull in bar. Since bar's DEPS doesn't contain any
recursion statements, we would stop processing at bar.
(2) In fizz, if we used |recursedeps| at all, we should pull in
fuzz.
We expect to keep going past bar (satisfying 1) and we don't
expect to pull in fuzz (satisfying 2).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
' { "name": "foo/bar", "url": "svn://example.com/bar" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursion = 3\n'
'recursedeps = ["bar"]')
write(
os.path.join('bar', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
write(
os.path.join('baz', 'DEPS'),
'deps = {\n'
' "fizz": "/fizz",\n'
'}')
write(
os.path.join('fizz', 'DEPS'),
'deps = {\n'
' "fuzz": "/fuzz",\n'
'}\n'
'recursedeps = ["fuzz"]')
write(
os.path.join('fuzz', 'DEPS'),
'deps = {\n'
' "tar": "/tar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('foo/bar', 'svn://example.com/bar'),
('bar', 'svn://example.com/foo/bar'),
# Deps after this would have been skipped if we were obeying
# |recursedeps|.
('baz', 'svn://example.com/foo/bar/baz'),
('fizz', 'svn://example.com/foo/bar/baz/fizz'),
# And this dep would have been picked up if we were obeying
# |recursedeps|.
# 'svn://example.com/foo/bar/baz/fuzz',
],
self._get_processed())
def testRecursedepsAltfile(self):
"""Verifies gclient respects the |recursedeps| var syntax with overridden
target DEPS file.
This is what we mean to check here:
- Naming an alternate DEPS file in recursedeps pulls from that one.
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo" },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}\n'
'recursedeps = [("bar", "DEPS.alt")]')
write(os.path.join('bar', 'DEPS'), 'ERROR ERROR ERROR')
write(
os.path.join('bar', 'DEPS.alt'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
('baz', 'svn://example.com/foo/bar/baz'),
],
self._get_processed())
def testGitDeps(self):
"""Verifies gclient respects a .DEPS.git deps file.
Along the way, we also test that if both DEPS and .DEPS.git are present,
that gclient does not read the DEPS file. This will reliably catch bugs
where gclient is always hitting the wrong file (DEPS).
"""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "baz": "/baz",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
],
self._get_processed())
def testGitDepsFallback(self):
"""Verifies gclient respects fallback to DEPS upon missing deps file."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
self.assertEquals(
[
('foo', 'svn://example.com/foo'),
('bar', 'svn://example.com/foo/bar'),
],
self._get_processed())
def testDepsFromNotAllowedHostsUnspecified(self):
"""Verifies gclient works fine with DEPS without allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals([], dep.findDepsFromNotAllowedHosts())
self.assertEquals(frozenset(), dep.allowed_hosts)
self._get_processed()
def testDepsFromNotAllowedHostsOK(self):
"""Verifies gclient works fine with DEPS with proper allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'allowed_hosts = ["example.com"]\n'
'deps = {\n'
' "bar": "svn://example.com/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals([], dep.findDepsFromNotAllowedHosts())
self.assertEquals(frozenset(['example.com']), dep.allowed_hosts)
self._get_processed()
def testDepsFromNotAllowedHostsBad(self):
"""Verifies gclient works fine with DEPS with proper allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', '.DEPS.git'),
'allowed_hosts = ["other.com"]\n'
'deps = {\n'
' "bar": "svn://example.com/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
obj.RunOnDeps('None', [])
dep = obj.dependencies[0]
self.assertEquals(frozenset(['other.com']), dep.allowed_hosts)
self.assertEquals([dep.dependencies[0]], dep.findDepsFromNotAllowedHosts())
self._get_processed()
def testDepsParseFailureWithEmptyAllowedHosts(self):
"""Verifies gclient fails with defined but empty allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'allowed_hosts = []\n'
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
try:
obj.RunOnDeps('None', [])
self.fail()
except gclient_utils.Error, e:
self.assertIn('allowed_hosts must be', str(e))
finally:
self._get_processed()
def testDepsParseFailureWithNonIterableAllowedHosts(self):
"""Verifies gclient fails with defined but non-iterable allowed_hosts."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'allowed_hosts = None\n'
'deps = {\n'
' "bar": "/bar",\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
obj = gclient.GClient.LoadCurrentConfig(options)
try:
obj.RunOnDeps('None', [])
self.fail()
except gclient_utils.Error, e:
self.assertIn('allowed_hosts must be', str(e))
finally:
self._get_processed()
def testCreatesCipdDependencies(self):
"""Verifies something."""
write(
'.gclient',
'solutions = [\n'
' { "name": "foo", "url": "svn://example.com/foo",\n'
' "deps_file" : ".DEPS.git",\n'
' },\n'
']')
write(
os.path.join('foo', 'DEPS'),
'vars = {\n'
' "lemur_version": "version:1234",\n'
'}\n'
'deps = {\n'
' "bar": {\n'
' "packages": [{\n'
' "package": "lemur",\n'
' "version": Var("lemur_version"),\n'
' }],\n'
' "dep_type": "cipd",\n'
' }\n'
'}')
options, _ = gclient.OptionParser().parse_args([])
options.validate_syntax = True
obj = gclient.GClient.LoadCurrentConfig(options)
self.assertEquals(1, len(obj.dependencies))
sol = obj.dependencies[0]
sol._condition = 'some_condition'
sol.ParseDepsFile()
self.assertEquals(1, len(sol.dependencies))
dep = sol.dependencies[0]
self.assertIsInstance(dep, gclient.CipdDependency)
self.assertEquals(
'https://chrome-infra-packages.appspot.com/lemur@version:1234',
dep.url)
def testSameDirAllowMultipleCipdDeps(self):
"""Verifies gclient allow multiple cipd deps under same directory."""
parser = gclient.OptionParser()
options, _ = parser.parse_args([])
obj = gclient.GClient('foo', options)
cipd_root = gclient_scm.CipdRoot(
os.path.join(self.root_dir, 'dir1'), 'https://example.com')
obj.add_dependencies_and_close(
[
gclient.Dependency(
obj, 'foo', 'raw_url', 'url', None, None, None, None, 'DEPS', True,
False, None, True),
],
[])
obj.dependencies[0].add_dependencies_and_close(
[
gclient.CipdDependency(obj.dependencies[0], 'foo',
{'package': 'foo_package',
'version': 'foo_version'},
cipd_root, None, True, False,
'fake_condition', True),
gclient.CipdDependency(obj.dependencies[0], 'foo',
{'package': 'bar_package',
'version': 'bar_version'},
cipd_root, None, True, False,
'fake_condition', True),
],
[])
dep0 = obj.dependencies[0].dependencies[0]
dep1 = obj.dependencies[0].dependencies[1]
self.assertEquals('https://example.com/foo_package@foo_version', dep0.url)
self.assertEquals('https://example.com/bar_package@bar_version', dep1.url)
if __name__ == '__main__':
sys.stdout = gclient_utils.MakeFileAutoFlush(sys.stdout)
sys.stdout = gclient_utils.MakeFileAnnotated(sys.stdout, include_zero=True)
sys.stderr = gclient_utils.MakeFileAutoFlush(sys.stderr)
sys.stderr = gclient_utils.MakeFileAnnotated(sys.stderr, include_zero=True)
logging.basicConfig(
level=[logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG][
min(sys.argv.count('-v'), 3)],
format='%(relativeCreated)4d %(levelname)5s %(module)13s('
'%(lineno)d) %(message)s')
unittest.main()
|
Shouqun/node-gn
|
tools/depot_tools/tests/gclient_test.py
|
Python
|
mit
| 40,396
| 0.003688
|
from os import system
import xmlrpclib
from SimpleXMLRPCServer import SimpleXMLRPCServer
def clear_buffer_cache():
system('free -g')
system('sync')
system("sudo sed -n 's/0/3/w /proc/sys/vm/drop_caches' /proc/sys/vm/drop_caches")
system('sync')
system("sudo sed -n 's/3/0/w /proc/sys/vm/drop_caches' /proc/sys/vm/drop_caches")
system('free -g')
return True
def is_even(n):
return n%2 == 0
server = SimpleXMLRPCServer(('0.0.0.0', 8888))
print 'Listening on port 8888...'
server.register_function(clear_buffer_cache, 'clear_buffer_cache')
server.register_function(is_even, 'is_even')
server.serve_forever()
|
limingzju/ClearCacheServer
|
server.py
|
Python
|
bsd-2-clause
| 617
| 0.024311
|
# Standard imports
import unittest
import json
import logging
from datetime import datetime, timedelta
# Our imports
from emission.clients.gamified import gamified
from emission.core.get_database import get_db, get_mode_db, get_section_db
from emission.core.wrapper.user import User
from emission.core.wrapper.client import Client
import emission.tests.common
logging.basicConfig(level=logging.DEBUG)
class TestGamified(unittest.TestCase):
def setUp(self):
import emission.tests.common
from copy import copy
self.testUsers = ["test@example.com", "best@example.com", "fest@example.com",
"rest@example.com", "nest@example.com"]
self.serverName = 'localhost'
# Sometimes, we may have entries left behind in the database if one of the tests failed
# or threw an exception, so let us start by cleaning up all entries
emission.tests.common.dropAllCollections(get_db())
self.ModesColl = get_mode_db()
self.assertEquals(self.ModesColl.find().count(), 0)
self.setupUserAndClient()
emission.tests.common.loadTable(self.serverName, "Stage_Modes", "emission/tests/data/modes.json")
emission.tests.common.loadTable(self.serverName, "Stage_Sections", "emission/tests/data/testCarbonFile")
self.SectionsColl = get_section_db()
self.walkExpect = 1057.2524056424411
self.busExpect = 2162.668467546699
self.busCarbon = 267.0/1609
self.airCarbon = 217.0/1609
self.driveCarbon = 278.0/1609
self.busOptimalCarbon = 92.0/1609
self.allDriveExpect = (self.busExpect * self.driveCarbon + self.walkExpect * self.driveCarbon)/1000
self.myFootprintExpect = float(self.busExpect * self.busCarbon)/1000
self.sb375GoalExpect = 40.142892/7
self.mineMinusOptimalExpect = 0
self.allDriveMinusMineExpect = float(self.allDriveExpect - self.myFootprintExpect)/self.allDriveExpect
self.sb375DailyGoalMinusMineExpect = float(self.sb375GoalExpect - self.myFootprintExpect)/self.sb375GoalExpect
self.now = datetime.now()
self.twodaysago = self.now - timedelta(days=2)
self.weekago = self.now - timedelta(weeks = 1)
for section in self.SectionsColl.find():
section['section_start_datetime'] = self.twodaysago
section['section_end_datetime'] = self.twodaysago + timedelta(hours = 1)
section['predicted_mode'] = {'walking': 1.0}
if section['user_id'] == 'fest@example.com':
logging.debug("Setting user_id for section %s, %s = %s" %
(section['trip_id'], section['section_id'], self.user.uuid))
section['user_id'] = self.user.uuid
if section['confirmed_mode'] == 5:
airSection = copy(section)
airSection['confirmed_mode'] = 9
airSection['_id'] = section['_id'] + "_air"
self.SectionsColl.insert(airSection)
airSection['confirmed_mode'] = ''
airSection['_id'] = section['_id'] + "_unconf"
self.SectionsColl.insert(airSection)
# print("Section start = %s, section end = %s" %
# (section['section_start_datetime'], section['section_end_datetime']))
self.SectionsColl.save(section)
def setupUserAndClient(self):
# At this point, the more important test is to execute the query and see
# how well it works
fakeEmail = "fest@example.com"
client = Client("gamified")
client.update(createKey = False)
emission.tests.common.makeValid(client)
(resultPre, resultReg) = client.preRegister("this_is_the_super_secret_id", fakeEmail)
studyList = Client.getPendingClientRegs(fakeEmail)
self.assertEqual(studyList, ["gamified"])
user = User.register("fest@example.com")
self.assertEqual(user.getFirstStudy(), 'gamified')
self.user = user
def testGetScoreComponents(self):
components = gamified.getScoreComponents(self.user.uuid, self.weekago, self.now)
self.assertEqual(components[0], 0.75)
# bus_short disappears in optimal, air_short disappears as long motorized, so optimal = 0
# self.assertEqual(components[1], (self.busExpect * self.busCarbon) / 1000)
# TODO: Figure out what we should do when optimal == 0. Currently, we
# return 0, which seems sub-optimal (pun intended)
self.assertEqual(components[1], 0.0)
# air_short disappears as long motorized, but we need to consider walking
self.assertAlmostEqual(components[2], self.allDriveMinusMineExpect, places=4)
# air_short disappears as long motorized, so only bus_short is left
self.assertAlmostEqual(components[3], self.sb375DailyGoalMinusMineExpect, places = 4)
# Checks both calcScore and updateScore, since we calculate the score before we update it
def testUpdateScore(self):
self.assertEqual(gamified.getStoredScore(self.user), (0, 0))
components = gamified.updateScore(self.user.uuid)
print "self.allDriveMinusMineExpect = %s, self.sb375DailyGoalMinusMineExpect = %s" % \
(self.allDriveMinusMineExpect, self.sb375DailyGoalMinusMineExpect)
expectedScore = 0.75 * 50 + 30 * self.allDriveMinusMineExpect + 20 * 0.0 + \
10 * self.sb375DailyGoalMinusMineExpect
storedScore = gamified.getStoredScore(self.user)
self.assertEqual(storedScore[0], 0)
self.assertAlmostEqual(storedScore[1], expectedScore, 6)
def testGetLevel(self):
self.assertEqual(gamified.getLevel(0), (1, 1))
self.assertEqual(gamified.getLevel(11.0), (1, 1))
self.assertEqual(gamified.getLevel(21.0), (1, 2))
self.assertEqual(gamified.getLevel(100), (2, 1))
self.assertEqual(gamified.getLevel(199.0), (2, 1))
self.assertEqual(gamified.getLevel(200), (2, 2))
self.assertEqual(gamified.getLevel(201.0), (2, 2))
self.assertEqual(gamified.getLevel(999), (2, 5))
self.assertEqual(gamified.getLevel(1000), (3, 1))
self.assertEqual(gamified.getLevel(9999.0), (3, 5))
self.assertEqual(gamified.getLevel(10000), (3, 5))
self.assertEqual(gamified.getLevel(100000), (3, 5))
def testGetFileName(self):
self.assertEqual(gamified.getFileName(1, 1), "level_1_1.png")
self.assertEqual(gamified.getFileName(1.0, 2.0), "level_1_2.png")
self.assertEqual(gamified.getFileName(1.055, 2), "level_1_2.png")
def testRunBackgroundTasksForDay(self):
self.assertEqual(gamified.getStoredScore(self.user), (0, 0))
components = gamified.runBackgroundTasks(self.user.uuid)
expectedScore = 0.75 * 50 + 30 * self.allDriveMinusMineExpect + 20 * 0.0 + \
10 * self.sb375DailyGoalMinusMineExpect
storedScore = gamified.getStoredScore(self.user)
self.assertEqual(storedScore[0], 0)
self.assertAlmostEqual(storedScore[1], expectedScore, 6)
if __name__ == '__main__':
unittest.main()
|
joshzarrabi/e-mission-server
|
emission/tests/client_tests/TestGamified.py
|
Python
|
bsd-3-clause
| 7,135
| 0.004905
|
from flask import Flask
server = Flask(__name__)
server.config['SERVER_NAME'] = '127.0.0.1:5001'
from app import endpoints
|
buckbaskin/Insight
|
service1/app/__init__.py
|
Python
|
apache-2.0
| 125
| 0.008
|
from __future__ import absolute_import, division, print_function
from nfldb.db import _upsert
class Entity (object):
"""
This is an abstract base class that handles most of the SQL
plumbing for entities in `nfldb`. Its interface is meant to be
declarative: specify the schema and let the methods defined here
do the SQL generation work. However, it is possible to override
methods (like `nfldb.Entity._sql_field`) when more customization
is desired.
Note that many of the methods defined here take an `aliases`
argument. This should be a dictionary mapping table name (defined
in `nfldb.Entity._sql_tables`) to some custom prefix. If it
isn't provided, then the table name itself is used.
"""
# This class doesn't introduce any instance variables, but we need
# to declare as such, otherwise all subclasses will wind up with a
# `__dict__`. (Thereby negating the benefit of using __slots__.)
__slots__ = []
_sql_tables = {}
"""
A dictionary with four keys: `primary`, `tables`, `managed` and
`derived`.
The `primary` key should map to a list of primary key
fields that correspond to a shared minimal subset of primary keys
in all tables that represent this entity. (i.e., It should be the
foreign key that joins all tables in the representation together.)
The `tables` key should map to an association list of table names
that map to lists of fields for that table. The lists of fields for
every table should be *disjoint*: no two tables may share a field
name in common (outside of the primary key).
The `managed` key should be a list of tables that are managed
directly by `nfldb`. `INSERT`, `UPDATE` and `DELETE` queries
will be generated appropriately. (Tables not in this list are
assumed to be maintained by the database itself, e.g., they are
actually views or materialized views maintained by triggers.)
The `derived` key should map to a list of *computed* fields. These
are fields that aren't directly stored in the table, but can be
computed from combining columns in the table (like `offense_tds` or
`points`). This API will expose such fields as regular SQL columns
in the API, and will handle writing them for you in `WHERE` and
`ORDER BY` statements. The actual implementation of each computed
field should be in an entity's `_sql_field` method (overriding the
one defined on `nfldb.Entity`). The derived fields must be listed
here so that the SQL generation code is aware of them.
"""
@classmethod
def _sql_columns(cls):
"""
Returns all columns defined for this entity. Every field
corresponds to a single column in a table.
The first `N` columns returned correspond to this entity's
primary key, where `N` is the number of columns in the
primary key.
"""
cols = cls._sql_tables['primary'][:]
for table, table_cols in cls._sql_tables['tables']:
cols += table_cols
return cols
@classmethod
def sql_fields(cls):
"""
Returns a list of all SQL fields across all tables for this
entity, including derived fields. This method can be used
in conjunction with `nfldb.Entity.from_row_tuple` to quickly
create new `nfldb` objects without every constructing a dict.
"""
if not hasattr(cls, '_cached_sql_fields'):
cls._cached_sql_fields = cls._sql_columns()
cls._cached_sql_fields += cls._sql_tables['derived']
return cls._cached_sql_fields
@classmethod
def from_row_dict(cls, db, row):
"""
Introduces a new entity object from a full SQL row result from
the entity's tables. (i.e., `row` is a dictionary mapping
column to value.) Note that the column names must be of the
form '{entity_name}_{column_name}'. For example, in the `game`
table, the `gsis_id` column must be named `game_gsis_id` in
`row`.
"""
obj = cls(db)
seta = setattr
prefix = cls._sql_primary_table() + '_'
slice_from = len(prefix)
for k in row:
if k.startswith(prefix):
seta(obj, k[slice_from:], row[k])
return obj
@classmethod
def from_row_tuple(cls, db, t):
"""
Given a tuple `t` corresponding to a result from a SELECT query,
this will construct a new instance for this entity. Note that
the tuple `t` must be in *exact* correspondence with the columns
returned by `nfldb.Entity.sql_fields`.
"""
cols = cls.sql_fields()
seta = setattr
obj = cls(db)
for i, field in enumerate(cols):
seta(obj, field, t[i])
return obj
@classmethod
def _sql_from(cls, aliases=None):
"""
Return a valid SQL `FROM table AS alias [LEFT JOIN extra_table
...]` string for this entity.
"""
# This is a little hokey. Pick the first table as the 'FROM' table.
# Subsequent tables are joined.
from_table = cls._sql_primary_table()
as_from_table = cls._sql_table_alias(from_table, aliases)
extra_tables = ''
for table, _ in cls._sql_tables['tables'][1:]:
extra_tables += cls._sql_join_to(cls,
from_table=from_table,
to_table=table,
from_aliases=aliases,
to_aliases=aliases)
return '''
FROM {from_table} AS {as_from_table}
{extra_tables}
'''.format(from_table=from_table, as_from_table=as_from_table,
extra_tables=extra_tables)
@classmethod
def _sql_select_fields(cls, fields, wrap=None, aliases=None):
"""
Returns correctly qualified SELECT expressions for each
field in `fields` (namely, a field may be a derived field).
If `wrap` is a not `None`, then it is applied to the result
of calling `cls._sql_field` on each element in `fields`.
All resulting fields are aliased with `AS` to correspond to
the name given in `fields`. Namely, this makes table aliases
opaque to the resulting query, but this also disallows
selecting columns of the same name from multiple tables.
"""
if wrap is None:
wrap = lambda x: x
sql = lambda f: wrap(cls._sql_field(f, aliases=aliases))
entity_prefix = cls._sql_primary_table()
return ['%s AS %s_%s' % (sql(f), entity_prefix, f) for f in fields]
@classmethod
def _sql_relation_distance(cls_from, cls_to):
primf = set(cls_from._sql_tables['primary'])
primt = set(cls_to._sql_tables['primary'])
if len(primf.intersection(primt)) == 0:
return None
outsiders = primf.difference(primt).union(primt.difference(primf))
if len(primf) > len(primt):
return -len(outsiders)
else:
return len(outsiders)
@classmethod
def _sql_join_all(cls_from, cls_tos):
"""
Given a list of sub classes `cls_tos` of `nfldb.Entity`,
produce as many SQL `LEFT JOIN` clauses as is necessary so
that all fields in all entity types given are available for
filtering.
Unlike the other join functions, this one has no alias support
or support for controlling particular tables.
The key contribution of this function is that it knows how to
connect a group of tables correctly. e.g., If the group of
tables is `game`, `play` and `play_player`, then `game` and
`play` will be joined and `play` and `play_player` will be
joined. (Instead of `game` and `play_player` or some other
erronoeous combination.)
In essence, each table is joined with the least general table
in the group.
"""
assert cls_from not in cls_tos, \
'cannot join %s with itself with `sql_join_all`' % cls_from
def dist(f, t):
return f._sql_relation_distance(t)
def relation_dists(froms, tos):
return filter(lambda (f, t, d): d is not None,
((f, t, dist(f, t)) for f in froms for t in tos))
def more_general(froms, tos):
return filter(lambda (f, t, d): d < 0, relation_dists(froms, tos))
def more_specific(froms, tos):
return filter(lambda (f, t, d): d > 0, relation_dists(froms, tos))
joins = ''
froms, tos = set([cls_from]), set(cls_tos)
while len(tos) > 0:
general = more_general(froms, tos)
specific = more_specific(froms, tos)
assert len(general) > 0 or len(specific) > 0, \
'Cannot compute distances between sets. From: %s, To: %s' \
% (froms, tos)
def add_join(f, t):
tos.discard(t)
froms.add(t)
return f._sql_join_to_all(t)
if general:
f, t, _ = max(general, key=lambda (f, t, d): d)
joins += add_join(f, t)
if specific:
f, t, _ = min(specific, key=lambda (f, t, d): d)
joins += add_join(f, t)
return joins
@classmethod
def _sql_join_to_all(cls_from, cls_to, from_table=None,
from_aliases=None, to_aliases=None):
"""
Given a **sub class** `cls_to` of `nfldb.Entity`, produce
as many SQL `LEFT JOIN` clauses as is necessary so that all
fields in `cls_to.sql_fields()` are available for filtering.
See the documentation for `nfldb.Entity._sql_join_to` for
information on the parameters.
"""
to_primary = cls_to._sql_primary_table()
joins = cls_from._sql_join_to(cls_to,
from_table=from_table,
to_table=to_primary,
from_aliases=from_aliases,
to_aliases=to_aliases)
for table, _ in cls_to._sql_tables['tables'][1:]:
joins += cls_to._sql_join_to(cls_to,
from_table=to_primary,
to_table=table,
from_aliases=to_aliases,
to_aliases=to_aliases)
return joins
@classmethod
def _sql_join_to(cls_from, cls_to,
from_table=None, to_table=None,
from_aliases=None, to_aliases=None):
"""
Given a **sub class** `cls_to` of `nfldb.Entity`, produce
a SQL `LEFT JOIN` clause.
If the primary keys in `cls_from` and `cls_to` have an empty
intersection, then an assertion error is raised.
Note that the first table defined for each of `cls_from` and
`cls_to` is used to join them if `from_table` or `to_table`
are `None`.
`from_aliases` are only applied to the `from` tables and
`to_aliases` are only applied to the `to` tables. This allows
one to do self joins.
"""
if from_table is None:
from_table = cls_from._sql_primary_table()
if to_table is None:
to_table = cls_to._sql_primary_table()
from_table = cls_from._sql_table_alias(from_table,
aliases=from_aliases)
as_to_table = cls_to._sql_table_alias(to_table, aliases=to_aliases)
from_pkey = cls_from._sql_tables['primary']
to_pkey = cls_to._sql_tables['primary']
# Avoiding set.intersection so we can preserve order.
common = [k for k in from_pkey if k in to_pkey]
assert len(common) > 0, \
"Cannot join %s to %s with non-overlapping primary keys." \
% (cls_from.__name__, cls_to.__name__)
fkey = [qualified_field(from_table, f) for f in common]
tkey = [qualified_field(as_to_table, f) for f in common]
return '''
LEFT JOIN {to_table} AS {as_to_table}
ON ({fkey}) = ({tkey})
'''.format(to_table=to_table, as_to_table=as_to_table,
fkey=', '.join(fkey), tkey=', '.join(tkey))
@classmethod
def _sql_primary_key(cls, table, aliases=None):
t = cls._sql_table_alias(table, aliases)
return [qualified_field(t, f)
for f in cls._sql_tables['primary']]
@classmethod
def _sql_primary_table(cls):
return cls._sql_tables['tables'][0][0]
@classmethod
def _sql_column_to_table(cls, name):
"""
Returns the table in `cls._sql_tables` containing the
field `name`.
If `name` corresponds to a primary key column, then
the primary table (first table) is returned.
If a table could not be found, a `exceptions.KeyError` is
raised.
"""
if name in cls._sql_tables['primary']:
return cls._sql_primary_table()
for table_name, fields in cls._sql_tables['tables']:
if name in fields:
return table_name
raise KeyError("Could not find table for %s" % name)
@classmethod
def _sql_table_alias(cls, table_name, aliases):
if aliases is None or table_name not in aliases:
return table_name
else:
return aliases[table_name]
@classmethod
def _sql_field(cls, name, aliases=None):
"""
Returns a SQL expression corresponding to the field `name`.
The default implementation returns `table_for_name`.`name`.
Entities can override this for special computed fields.
"""
prefix = cls._sql_table_alias(cls._sql_column_to_table(name), aliases)
return qualified_field(prefix, name)
def _save(self, cursor):
"""
Does an upsert for each managed table specified in
`nfldb.Entity._sql_tables`. The data is drawn from
`self`.
"""
for table, prim, vals in self._rows:
_upsert(cursor, table, vals, prim)
@property
def _rows(self):
prim = self._sql_tables['primary'][:]
for table, table_fields in self._sql_tables['tables']:
if table in self._sql_tables['managed']:
r = _as_row(prim + table_fields, self)
yield table, r[0:len(prim)], r
def _as_row(fields, obj):
"""
Given a list of fields in a SQL table and a Python object, return
an association list where the keys are from `fields` and the values
are the result of `getattr(obj, fields[i], None)` for some `i`.
Note that the `time_inserted` and `time_updated` fields are always
omitted.
"""
exclude = ('time_inserted', 'time_updated')
return [(f, getattr(obj, f, None)) for f in fields if f not in exclude]
def ands(*exprs):
anded = ' AND '.join('(%s)' % e for e in exprs if e)
return 'true' if len(anded) == 0 else anded
def qualified_field(alias, field):
"""
Qualifies the SQL `field` with `alias`. If `alias` is empty,
then no qualification is used. (Just `field` is returned.)
"""
if not alias:
return field
else:
return '%s.%s' % (alias, field)
|
verdimrc/nfldb
|
nfldb/sql.py
|
Python
|
unlicense
| 15,501
| 0.000129
|
from queues import queue_opencl_devices
from queues import queue_opencl_contexts
from queues import queue_opencl_buffers
from queues import queue_opencl_programs
from queues import queue_opencl_kernels
from queues import queue_opencl_command_queues
from queues import queue_opencl_notify
import sys
from oslo.config import cfg
import PyOpenCLInterface
from kombu.mixins import ConsumerMixin
import binascii
def DispatchDevices(method, args):
if method == 'ListDevices':
nErr = 0
try:
result = PyOpenCLInterface.ListDevices()
except:
nErr = -128
return result
if method == 'GetDeviceProperties':
nid = int(args['id'])
try:
result = PyOpenCLInterface.GetDeviceProperties(nid)
except:
nErr = -128
DeviceProperties = {}
return (DeviceProperties, nErr)
return result
return -128
def DispatchContexts(method, args):
if method == 'ListContexts':
nErr = 0
try:
result = PyOpenCLInterface.ListContexts()
except:
nErr = -128
return (result, nErr)
if method == 'GetContextProperties':
nid = int(args['id'])
try:
result = PyOpenCLInterface.GetContextProperties(nid)
except:
nErr = -128
ContextProperties = {}
return (ContextProperties, nErr)
return result
if method == 'CreateContext':
try:
listDevices = args['Devices']
properties = args['Properties']
result = PyOpenCLInterface.CreateContext(listDevices, properties)
except:
return -128
return result
if method == 'ReleaseContext':
nid = int(args['id'])
try:
result = PyOpenCLInterface.ReleaseContext(nid)
except:
return -128
return result
if method == 'RetainContext':
nid = int(args['id'])
try:
result = PyOpenCLInterface.RetainContext(nid)
except:
return -128
return result
return -128
def DispatchBuffers(method, args):
if method == 'ListBuffers':
nErr = 0
try:
result = PyOpenCLInterface.ListBuffers()
except:
print "DISPATCHBUFFERS : Exception caught ListBuffers"
nErr = -128
return (result, nErr)
if method == 'GetBufferProperties':
nid = int(args['id'])
try:
result = PyOpenCLInterface.GetBufferProperties(nid)
except:
nErr = -128
BufferProperties = {}
return (BufferProperties, nErr)
return result
if method == 'CreateBuffer':
try:
context = int(args['Context'])
size = int(args['Size'])
properties = args['Properties']
result = PyOpenCLInterface.CreateBuffer(context, size, properties)
except:
print "DISPATCHBUFFERS.CreateBuffer Exception Caught : %s" % sys.exc_info()[0]
return -128
return result
if method == 'ReleaseBuffer':
nid = int(args['id'])
try:
result = PyOpenCLInterface.ReleaseBuffer(nid)
except:
return -128
return result
if method == 'RetainBuffer':
nid = int(args['id'])
try:
result = PyOpenCLInterface.RetainBuffer(nid)
except:
return -128
return result
print "DISPATCHBUFFERS : Unknown Method"
return -128
def DispatchPrograms(method, args):
if method == 'ListPrograms':
nErr = 0
try:
result = PyOpenCLInterface.ListPrograms()
except:
nErr = -128
return (result, nErr)
if method == 'GetProgramProperties':
try:
nid = int(args['id'])
result = PyOpenCLInterface.GetProgramProperties(nid)
except:
print "Exception caught in DispatchPrograms.%s " % method
print "Exception info %s " % sys.exc_info()[0]
nErr = -128
Properties = {}
return (Properties, nErr)
return result
if method == 'CreateProgram':
try:
context = int(args['Context'])
programStringsList = args['ProgramStrings']
programStrings = []
for stru in programStringsList:
programStrings.append(str(stru))
result = PyOpenCLInterface.CreateProgram(context, programStrings)
except:
print "Exception caught in DispatchPrograms.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'ReleaseProgram':
try:
nid = int(args['id'])
result = PyOpenCLInterface.ReleaseProgram(nid)
except:
print "Exception caught in DispatchPrograms.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'RetainProgram':
try:
nid = int(args['id'])
result = PyOpenCLInterface.RetainProgram(nid)
except:
print "Exception caught in DispatchPrograms.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'BuildProgram':
try:
nid = int(args['id'])
listDevices = args['Devices']
buildOptions = args['Options']
result = PyOpenCLInterface.BuildProgram(nid, listDevices, buildOptions)
except:
print "Exception caught in DispatchPrograms.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'GetProgramBuildInfo':
try:
nid = int(args['id'])
device = int(args['Device'])
buildInfo = args['BuildInfo']
result = PyOpenCLInterface.GetProgramBuildInfo(nid, device, buildInfo)
except:
print "Exception caught in DispatchPrograms.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
print "DISPATCHPROGRAMS : Unknown Method"
return -128
def DispatchKernels(method, args):
if method == 'ListKernels':
nErr = 0
try:
result = PyOpenCLInterface.ListKernels()
except:
nErr = -128
return (result, nErr)
if method == 'GetKernelProperties':
try:
nid = int(args['id'])
result = PyOpenCLInterface.GetKernelProperties(nid)
except:
print "Exception caught in DispatchKernels.%s " % method
print "Exception info %s " % sys.exc_info()[0]
nErr = -128
Properties = {}
return (Properties, nErr)
return result
if method == 'CreateKernel':
try:
program = int(args['Program'])
kernel_name = str(args['KernelName'])
result = PyOpenCLInterface.CreateKernel(program, kernel_name)
except:
print "Exception caught in DispatchKernels.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'ReleaseKernel':
try:
nid = int(args['id'])
result = PyOpenCLInterface.ReleaseKernel(nid)
except:
print "Exception caught in DispatchKernels.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'RetainKernel':
try:
nid = int(args['id'])
result = PyOpenCLInterface.RetainKernel(nid)
except:
print "Exception caught in DispatchKernel.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'KernelSetArgument':
try:
nid = int(args['id'])
paramIndex = int(args['ParamIndex'])
body = args['ParamDict']
paramDict = {}
if body.has_key('LocalMemory'):
paramDict = {'LocalMemory': int(body['LocalMemory'])}
if body.has_key('DeviceMemoryObject'):
paramDict = {'DeviceMemoryObject': int(body['DeviceMemoryObject'])}
if body.has_key('HostValue'):
base64string = str(body['HostValue'])
binArray = bytearray( binascii.a2b_base64(base64string) )
paramDict = {'HostValue': binArray}
result = PyOpenCLInterface.KernelSetArgument(nid, paramIndex, paramDict)
except:
print "Exception caught in DispatchKernels.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
print "DISPATCHPROGRAMS : Unknown Method"
return -128
def DispatchCommandQueues(method, args):
if method == 'ListCommandQueues':
nErr = 0
try:
result = PyOpenCLInterface.ListQueues()
except:
nErr = -128
return (result, nErr)
if method == 'GetCommandQueueProperties':
try:
nid = int(args['id'])
result = PyOpenCLInterface.GetQueueProperties(nid)
except:
print "Exception caught in DispatchCommandQueues.%s " % method
print "Exception info %s " % sys.exc_info()[0]
nErr = -128
Properties = {}
return (Properties, nErr)
return result
if method == 'CreateCommandQueue':
try:
context = int(args['Context'])
device = int(args['Device'])
createFlags = args['Properties']
result = PyOpenCLInterface.CreateQueue(context, device, createFlags)
except:
print "Exception caught in DispatchCommandQueues.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'ReleaseCommandQueue':
try:
nid = int(args['id'])
result = PyOpenCLInterface.ReleaseQueue(nid)
except:
print "Exception caught in DispatchQueue.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'RetainCommandQueue':
try:
nid = int(args['id'])
result = PyOpenCLInterface.RetainQueue(nid)
except:
print "Exception caught in DispatchQueues.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'EnqueueReadBuffer':
try:
nid = int(args['id'])
membuffer = int(args['MemBuffer'])
bytecount = int(args['ByteCount'])
offset = int(args['Offset'])
RawData, RetErr = PyOpenCLInterface.EnqueueReadBuffer(nid, membuffer, bytecount, offset)
# RawData is a byte array of length ByteCount; We have to divide
# RawData in 57 bytes slices and convert to base64
Data = ""
StartPosition = 0
while StartPosition < bytecount:
EndPosition = StartPosition + 57
if EndPosition > bytecount:
EndPosition = bytecount
Data2Convert = bytearray(RawData[StartPosition : EndPosition])
StartPosition = EndPosition
base64Data = binascii.b2a_base64(Data2Convert)
Data = Data + base64Data
except:
print "Exception caught in DispatchCommandQueues.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return (Data, RetErr)
if method == 'EnqueueWriteBuffer':
try:
nid = int(args['id'])
membuffer = int(args['MemBuffer'])
bytecount = int(args['ByteCount'])
offset = int(args['Offset'])
# unpack the data
data = bytearray(binascii.a2b_base64( str(args['Data'] )))
result = PyOpenCLInterface.EnqueueWriteBuffer(nid, membuffer, bytecount, offset, data)
except:
print "Exception caught in DispatchCommandQueues.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'EnqueueCopyBuffer':
try:
nid = int(args['id'])
sourcebuffer = int(args['SourceBuffer'])
destinationbuffer = int(args['DestinationBuffer'])
bytecount = int(args['ByteCount'])
sourceoffset = int(args['SourceOffset'])
destinationoffset = int(args['DestinationOffset'])
result = PyOpenCLInterface.EnqueueCopyBuffer(nid, sourcebuffer, destinationbuffer,
sourceoffset, destinationoffset, bytecount)
except:
print "Exception caught in DispatchCommandQueues.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'EnqueueNDRangeKernel':
try:
nid = int(args['id'])
kernel = int(args['Kernel'])
gwo = args['GWO']
gws = args['GWS']
lws = args['LWS']
result = PyOpenCLInterface.EnqueueNDRangeKernel(nid, kernel, gwo, gws, lws)
except:
print "Exception caught in DispatchCommandQueues.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'EnqueueTask':
try:
nid = int(args['id'])
kernel = int(args['Kernel'])
result = PyOpenCLInterface.EnqueueTask(nid, kernel)
except:
print "Exception caught in DispatchCommandQueues.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'EnqueueBarrier':
try:
nid = int(args['id'])
result = PyOpenCLInterface.EnqueueBarrier(nid)
except:
print "Exception caught in DispatchCommandQueues.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
if method == 'Finish':
try:
nid = int(args['id'])
result = PyOpenCLInterface.Finish(nid)
except:
print "Exception caught in DispatchCommandQueues.%s " % method
print "Exception info %s " % sys.exc_info()[0]
return -128
return result
print "DISPATCHPROGRAMS : Unknown Method"
return -128
class C(ConsumerMixin):
def __init__(self, connection):
self.connection = connection
return
def get_consumers(self, Consumer, channel):
return [Consumer( queue_opencl_devices, accept = ['json'], callbacks = [self.on_devices_message]),
Consumer( queue_opencl_contexts, accept = ['json'], callbacks = [self.on_contexts_message]),
Consumer( queue_opencl_programs, accept = ['json'], callbacks = [self.on_programs_message]),
Consumer( queue_opencl_kernels, accept = ['json'], callbacks = [self.on_kernels_message]),
Consumer( queue_opencl_buffers, accept = ['json'], callbacks = [self.on_buffers_message]),
Consumer( queue_opencl_command_queues, accept = ['json'], callbacks = [self.on_command_queues_message]),
Consumer( queue_opencl_notify, accept = ['json'], callbacks = [self.on_message])]
def on_message(self, body, message):
message.ack()
return
def on_command_queues_message(self, body, message):
message.ack()
print ("notify: RECEIVED COMMAND QUEUES MSG - body: %r" % (body,))
print ("notify: RECEIVED COMMAND QUEUES MSG - message: %r" % (message,))
try:
respTarget = body['Source']
respQueue = body['RespQueue']
method = body['Method']
args = body['args']
# create the response connection
resp_connection = BrokerConnection(respTarget)
resp_queue = resp_connection.SimpleQueue(respQueue,
queue_opts = {'durable': False, 'auto_delete': True},
exchange_opts = {'delivery_mode' : 1,
'auto_delete' : True,
'durable' : False})
payload = {"Result": DispatchCommandQueues(method, args)}
resp_queue.put(payload, serializer='json')
resp_queue.close()
except:
print "Exception caught : %s" % sys.exc_info()[0]
return
def on_devices_message(self, body, message):
message.ack()
print ("notify: RECEIVED DEVICES MSG - body: %r" % (body,))
print ("notify: RECEIVED DEVICES MSG - message: %r" % (message,))
try:
respTarget = body['Source']
respQueue = body['RespQueue']
method = body['Method']
args = body['args']
# create the response connection
resp_connection = BrokerConnection(respTarget)
resp_queue = resp_connection.SimpleQueue(respQueue,
queue_opts = {'durable': False, 'auto_delete': True},
exchange_opts = {'delivery_mode' : 1,
'auto_delete' : True,
'durable' : False})
payload = {"Result": DispatchDevices(method, args)}
resp_queue.put(payload, serializer='json')
resp_queue.close()
except:
print "Exception caught : %s" % sys.exc_info()[0]
return
def on_contexts_message(self, body, message):
print ("notify: RECEIVED CONTEXTS MSG - body: %r" % (body,))
print ("notify: RECEIVED CONTEXTS MSG - message: %r" % (message,))
message.ack()
try:
respTarget = body['Source']
respQueue = body['RespQueue']
method = body['Method']
args = body['args']
# create the response connection
resp_connection = BrokerConnection(respTarget)
resp_queue = resp_connection.SimpleQueue(respQueue,
queue_opts = {'durable': False, 'auto_delete': True},
exchange_opts = {'delivery_mode' : 1,
'auto_delete' : True,
'durable' : False})
payload = {"Result": DispatchContexts(method, args)}
resp_queue.put(payload, serializer='json')
resp_queue.close()
except:
print "Exception caught : %s" % sys.exc_info()[0]
return
def on_buffers_message(self, body, message):
print ("notify: RECEIVED BUFFERS MSG - body: %r" % (body,))
print ("notify: RECEIVED BUFFERS MSG - message: %r" % (message,))
message.ack()
try:
respTarget = body['Source']
respQueue = body['RespQueue']
method = body['Method']
args = body['args']
# create the response connection
resp_connection = BrokerConnection(respTarget)
resp_queue = resp_connection.SimpleQueue(respQueue,
queue_opts = {'durable': False, 'auto_delete': True},
exchange_opts = {'delivery_mode' : 1,
'auto_delete' : True,
'durable' : False})
payload = {"Result": DispatchBuffers(method, args)}
resp_queue.put(payload, serializer='json')
resp_queue.close()
except:
print "Exception caught : %s" % sys.exc_info()[0]
return
def on_programs_message(self, body, message):
print ("notify: RECEIVED PROGRAMS MSG - body: %r" % (body,))
print ("notify: RECEIVED PROGRAMS MSG - message: %r" % (message,))
message.ack()
try:
respTarget = body['Source']
respQueue = body['RespQueue']
method = body['Method']
args = body['args']
# create the response connection
resp_connection = BrokerConnection(respTarget)
resp_queue = resp_connection.SimpleQueue(respQueue,
queue_opts = {'durable': False, 'auto_delete': True},
exchange_opts = {'delivery_mode' : 1,
'auto_delete' : True,
'durable' : False})
payload = {"Result": DispatchPrograms(method, args)}
resp_queue.put(payload, serializer='json')
resp_queue.close()
except:
print "Exception caught : %s" % sys.exc_info()[0]
return
def on_kernels_message(self, body, message):
print ("notify: RECEIVED KERNELS MSG - body: %r" % (body,))
print ("notify: RECEIVED KERNELS MSG - message: %r" % (message,))
message.ack()
try:
respTarget = body['Source']
respQueue = body['RespQueue']
method = body['Method']
args = body['args']
# create the response connection
resp_connection = BrokerConnection(respTarget)
resp_queue = resp_connection.SimpleQueue(respQueue,
queue_opts = {'durable': False, 'auto_delete': True},
exchange_opts = {'delivery_mode' : 1,
'auto_delete' : True,
'durable' : False})
payload = {"Result": DispatchKernels(method, args)}
resp_queue.put(payload, serializer='json')
resp_queue.close()
except:
print "Exception caught : %s" % sys.exc_info()[0]
return
if __name__ == "__main__":
from kombu import BrokerConnection
from kombu.utils.debug import setup_logging
setup_logging(loglevel="DEBUG")
configs = cfg.ConfigOpts()
options = [ cfg.StrOpt('rabbit_host', default = 'localhost'),
cfg.StrOpt('rabbit_password', required = 'true'),
cfg.StrOpt('rabbit_user', default = 'guest')]
configs.register_opts( options )
configs(sys.argv[1:])
rh = configs.rabbit_host
rp = configs.rabbit_password
ru = configs.rabbit_user
strBroker = "amqp://" + ru + ":" + rp + "@" + rh + ":5672//"
retErr = PyOpenCLInterface.Initialize("GPU")
if retErr != 0:
print "Error could not initialize OpenCL interface"
else:
with BrokerConnection(strBroker) as connection:
try:
C(connection).run()
except KeyboardInterrupt:
print("bye bye")
print "Exiting..."
|
apatriciu/OpenStackOpenCL
|
computeOpenCL/nova/contrib/OpenCLServer/OpenCLServer.py
|
Python
|
apache-2.0
| 23,459
| 0.01006
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 Bitcoin Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# This script will locally construct a merge commit for a pull request on a
# github repository, inspect it, sign it and optionally push it.
# The following temporary branches are created/overwritten and deleted:
# * pull/$PULL/base (the current master we're merging onto)
# * pull/$PULL/head (the current state of the remote pull request)
# * pull/$PULL/merge (github's merge)
# * pull/$PULL/local-merge (our merge)
# In case of a clean merge that is accepted by the user, the local branch with
# name $BRANCH is overwritten with the merged result, and optionally pushed.
from __future__ import division,print_function,unicode_literals
import os
from sys import stdin,stdout,stderr
import argparse
import hashlib
import subprocess
import sys
import json,codecs
try:
from urllib.request import Request,urlopen
except:
from urllib2 import Request,urlopen
# External tools (can be overridden using environment)
GIT = os.getenv('GIT','git')
BASH = os.getenv('BASH','bash')
# OS specific configuration for terminal attributes
ATTR_RESET = ''
ATTR_PR = ''
COMMIT_FORMAT = '%h %s (%an)%d'
if os.name == 'posix': # if posix, assume we can use basic terminal escapes
ATTR_RESET = '\033[0m'
ATTR_PR = '\033[1;36m'
COMMIT_FORMAT = '%C(bold blue)%h%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset'
def git_config_get(option, default=None):
'''
Get named configuration option from git repository.
'''
try:
return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8')
except subprocess.CalledProcessError as e:
return default
def retrieve_pr_info(repo,pull):
'''
Retrieve pull request information from github.
Return None if no title can be found, or an error happens.
'''
try:
req = Request("https://api.github.com/repos/"+repo+"/pulls/"+pull)
result = urlopen(req)
reader = codecs.getreader('utf-8')
obj = json.load(reader(result))
return obj
except Exception as e:
print('Warning: unable to retrieve pull information from github: %s' % e)
return None
def ask_prompt(text):
print(text,end=" ",file=stderr)
stderr.flush()
reply = stdin.readline().rstrip()
print("",file=stderr)
return reply
def get_symlink_files():
files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', 'HEAD']).splitlines())
ret = []
for f in files:
if (int(f.decode('utf-8').split(" ")[0], 8) & 0o170000) == 0o120000:
ret.append(f.decode('utf-8').split("\t")[1])
return ret
def tree_sha512sum(commit='HEAD'):
# request metadata for entire tree, recursively
files = []
blob_by_name = {}
for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
name_sep = line.index(b'\t')
metadata = line[:name_sep].split() # perms, 'blob', blobid
assert(metadata[1] == b'blob')
name = line[name_sep+1:]
files.append(name)
blob_by_name[name] = metadata[2]
files.sort()
# open connection to git-cat-file in batch mode to request data for all blobs
# this is much faster than launching it per file
p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
overall = hashlib.sha512()
for f in files:
blob = blob_by_name[f]
# request blob
p.stdin.write(blob + b'\n')
p.stdin.flush()
# read header: blob, "blob", size
reply = p.stdout.readline().split()
assert(reply[0] == blob and reply[1] == b'blob')
size = int(reply[2])
# hash the blob data
intern = hashlib.sha512()
ptr = 0
while ptr < size:
bs = min(65536, size - ptr)
piece = p.stdout.read(bs)
if len(piece) == bs:
intern.update(piece)
else:
raise IOError('Premature EOF reading git cat-file output')
ptr += bs
dig = intern.hexdigest()
assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data
# update overall hash with file hash
overall.update(dig.encode("utf-8"))
overall.update(" ".encode("utf-8"))
overall.update(f)
overall.update("\n".encode("utf-8"))
p.stdin.close()
if p.wait():
raise IOError('Non-zero return value executing git cat-file')
return overall.hexdigest()
def print_merge_details(pull, title, branch, base_branch, head_branch):
print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET))
subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch])
def parse_arguments():
epilog = '''
In addition, you can set the following git configuration variables:
githubmerge.repository (mandatory),
user.signingkey (mandatory),
githubmerge.host (default: git@github.com),
githubmerge.branch (no default),
githubmerge.testcmd (default: none).
'''
parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests',
epilog=epilog)
parser.add_argument('pull', metavar='PULL', type=int, nargs=1,
help='Pull request ID to merge')
parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?',
default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')')
return parser.parse_args()
def main():
# Extract settings from git repo
repo = git_config_get('githubmerge.repository')
host = git_config_get('githubmerge.host','git@github.com')
opt_branch = git_config_get('githubmerge.branch',None)
testcmd = git_config_get('githubmerge.testcmd')
signingkey = git_config_get('user.signingkey')
if repo is None:
print("ERROR: No repository configured. Use this command to set:", file=stderr)
print("git config githubmerge.repository <owner>/<repo>", file=stderr)
sys.exit(1)
if signingkey is None:
print("ERROR: No GPG signing key set. Set one using:",file=stderr)
print("git config --global user.signingkey <key>",file=stderr)
sys.exit(1)
host_repo = host+":"+repo # shortcut for push/pull target
# Extract settings from command line
args = parse_arguments()
pull = str(args.pull[0])
# Receive pull information from github
info = retrieve_pr_info(repo,pull)
if info is None:
sys.exit(1)
title = info['title'].strip()
body = info['body'].strip()
# precedence order for destination branch argument:
# - command line argument
# - githubmerge.branch setting
# - base branch for pull (as retrieved from github)
# - 'master'
branch = args.branch or opt_branch or info['base']['ref'] or 'master'
# Initialize source branches
head_branch = 'pull/'+pull+'/head'
base_branch = 'pull/'+pull+'/base'
merge_branch = 'pull/'+pull+'/merge'
local_merge_branch = 'pull/'+pull+'/local-merge'
devnull = open(os.devnull,'w')
try:
subprocess.check_call([GIT,'checkout','-q',branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot check out branch %s." % (branch), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*'])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/heads/'+branch+':refs/heads/'+base_branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find branch %s on %s." % (branch,host_repo), file=stderr)
sys.exit(3)
subprocess.check_call([GIT,'checkout','-q',base_branch])
subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull)
subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch])
try:
# Go up to the repository's root.
toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip()
os.chdir(toplevel)
# Create unsigned merge commit.
if title:
firstline = 'Merge #%s: %s' % (pull,title)
else:
firstline = 'Merge #%s' % (pull,)
message = firstline + '\n\n'
message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%h %s (%an)',base_branch+'..'+head_branch]).decode('utf-8')
message += '\n\nPull request description:\n\n ' + body.replace('\n', '\n ') + '\n'
try:
subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot be merged cleanly.",file=stderr)
subprocess.check_call([GIT,'merge','--abort'])
sys.exit(4)
logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8')
if logmsg.rstrip() != firstline.rstrip():
print("ERROR: Creating merge failed (already merged?).",file=stderr)
sys.exit(4)
symlink_files = get_symlink_files()
for f in symlink_files:
print("ERROR: File %s was a symlink" % f)
if len(symlink_files) > 0:
sys.exit(4)
# Put tree SHA512 into the message
try:
first_sha512 = tree_sha512sum()
message += '\n\nTree-SHA512: ' + first_sha512
except subprocess.CalledProcessError as e:
print("ERROR: Unable to compute tree hash")
sys.exit(4)
try:
subprocess.check_call([GIT,'commit','--amend','-m',message.encode('utf-8')])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot update message.", file=stderr)
sys.exit(4)
print_merge_details(pull, title, branch, base_branch, head_branch)
print()
# Run test command if configured.
if testcmd:
if subprocess.call(testcmd,shell=True):
print("ERROR: Running %s failed." % testcmd,file=stderr)
sys.exit(5)
# Show the created merge.
diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch])
subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch])
if diff:
print("WARNING: merge differs from github!",file=stderr)
reply = ask_prompt("Type 'ignore' to continue.")
if reply.lower() == 'ignore':
print("Difference with github ignored.",file=stderr)
else:
sys.exit(6)
else:
# Verify the result manually.
print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr)
print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr)
print("Type 'exit' when done.",file=stderr)
if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt
os.putenv('debian_chroot',pull)
subprocess.call([BASH,'-i'])
second_sha512 = tree_sha512sum()
if first_sha512 != second_sha512:
print("ERROR: Tree hash changed unexpectedly",file=stderr)
sys.exit(8)
# Sign the merge commit.
print_merge_details(pull, title, branch, base_branch, head_branch)
while True:
reply = ask_prompt("Type 's' to sign off on the above merge, or 'x' to reject and exit.").lower()
if reply == 's':
try:
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit'])
break
except subprocess.CalledProcessError as e:
print("Error while signing, asking again.",file=stderr)
elif reply == 'x':
print("Not signing off on merge, exiting.",file=stderr)
sys.exit(1)
# Put the result in branch.
subprocess.check_call([GIT,'checkout','-q',branch])
subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch])
finally:
# Clean up temporary branches.
subprocess.call([GIT,'checkout','-q',branch])
subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull)
# Push the result.
while True:
reply = ask_prompt("Type 'push' to push the result to %s, branch %s, or 'x' to exit without pushing." % (host_repo,branch)).lower()
if reply == 'push':
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch])
break
elif reply == 'x':
sys.exit(1)
if __name__ == '__main__':
main()
|
destenson/bitcoin--bitcoin
|
contrib/devtools/github-merge.py
|
Python
|
mit
| 14,113
| 0.014667
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import six
from cryptography import utils
from cryptography.x509.oid import ObjectIdentifier
class NameAttribute(object):
def __init__(self, oid, value):
if not isinstance(oid, ObjectIdentifier):
raise TypeError(
"oid argument must be an ObjectIdentifier instance."
)
if not isinstance(value, six.text_type):
raise TypeError(
"value argument must be a text type."
)
self._oid = oid
self._value = value
oid = utils.read_only_property("_oid")
value = utils.read_only_property("_value")
def __eq__(self, other):
if not isinstance(other, NameAttribute):
return NotImplemented
return (
self.oid == other.oid and
self.value == other.value
)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.oid, self.value))
def __repr__(self):
return "<NameAttribute(oid={0.oid}, value={0.value!r})>".format(self)
class Name(object):
def __init__(self, attributes):
self._attributes = attributes
def get_attributes_for_oid(self, oid):
return [i for i in self if i.oid == oid]
def __eq__(self, other):
if not isinstance(other, Name):
return NotImplemented
return self._attributes == other._attributes
def __ne__(self, other):
return not self == other
def __hash__(self):
# TODO: this is relatively expensive, if this looks like a bottleneck
# for you, consider optimizing!
return hash(tuple(self._attributes))
def __iter__(self):
return iter(self._attributes)
def __len__(self):
return len(self._attributes)
def __repr__(self):
return "<Name({0!r})>".format(self._attributes)
|
aliyun/oss-ftp
|
python27/win32/Lib/site-packages/cryptography/x509/name.py
|
Python
|
mit
| 2,116
| 0
|
# *****************************************************************
# Copyright (c) 2013 Massachusetts Institute of Technology
#
# Developed exclusively at US Government expense under US Air Force contract
# FA8721-05-C-002. The rights of the United States Government to use, modify,
# reproduce, release, perform, display or disclose this computer software and
# computer software documentation in whole or in part, in any manner and for
# any purpose whatsoever, and to have or authorize others to do so, are
# Unrestricted and Unlimited.
#
# Licensed for use under the BSD License as described in the BSD-LICENSE.txt
# file in the root directory of this release.
#
# Project: SPAR
# Authors: SY
# Description: IBM TA2 wire class
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 22 Oct 2012 SY Original Version
# *****************************************************************
import ibm_circuit_object as ico
class IBMInputWire(ico.IBMCircuitObject):
"""
This class represents a single IBM input wire.
"""
def __init__(self, displayname, circuit):
"""Initializes the wire with the display name and circuit specified."""
ico.IBMCircuitObject.__init__(self, displayname, 0.0, 0, circuit)
|
y4n9squared/HEtest
|
hetest/python/circuit_generation/ibm/ibm_wire.py
|
Python
|
bsd-2-clause
| 1,345
| 0.003717
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import hashlib
import itertools
import numpy
from nupic.bindings.math import Random
from nupic.encoders.base import Encoder
class CoordinateEncoder(Encoder):
"""
Given a coordinate in an N-dimensional space, and a radius around
that coordinate, the Coordinate Encoder returns an SDR representation
of that position.
The Coordinate Encoder uses an N-dimensional integer coordinate space.
For example, a valid coordinate in this space is (150, -49, 58), whereas
an invalid coordinate would be (55.4, -5, 85.8475).
It uses the following algorithm:
1. Find all the coordinates around the input coordinate, within the
specified radius.
2. For each coordinate, use a uniform hash function to
deterministically map it to a real number between 0 and 1. This is the
"order" of the coordinate.
3. Of these coordinates, pick the top W by order, where W is the
number of active bits desired in the SDR.
4. For each of these W coordinates, use a uniform hash function to
deterministically map it to one of the bits in the SDR. Make this bit active.
5. This results in a final SDR with exactly W bits active
(barring chance hash collisions).
"""
def __init__(self,
w=21,
n=1000,
name=None,
verbosity=0):
"""
See `nupic.encoders.base.Encoder` for more information.
@param name An optional string which will become part of the description
"""
# Validate inputs
if (w <= 0) or (w % 2 == 0):
raise ValueError("w must be an odd positive integer")
if (n <= 6 * w) or (not isinstance(n, int)):
raise ValueError("n must be an int strictly greater than 6*w. For "
"good results we recommend n be strictly greater "
"than 11*w")
self.w = w
self.n = n
self.verbosity = verbosity
self.encoders = None
if name is None:
name = "[%s:%s]" % (self.n, self.w)
self.name = name
def getWidth(self):
"""See `nupic.encoders.base.Encoder` for more information."""
return self.n
def getDescription(self):
"""See `nupic.encoders.base.Encoder` for more information."""
return [('coordinate', 0), ('radius', 1)]
def getScalars(self, inputData):
"""See `nupic.encoders.base.Encoder` for more information."""
return numpy.array([0]*len(inputData))
def encodeIntoArray(self, inputData, output):
"""
See `nupic.encoders.base.Encoder` for more information.
@param inputData (tuple) Contains coordinate (numpy.array)
and radius (float)
@param output (numpy.array) Stores encoded SDR in this numpy array
"""
(coordinate, radius) = inputData
neighbors = self._neighbors(coordinate, radius)
winners = self._topWCoordinates(neighbors, self.w)
bitFn = lambda coordinate: self._bitForCoordinate(coordinate, self.n)
indices = numpy.array([bitFn(w) for w in winners])
output[:] = 0
output[indices] = 1
@staticmethod
def _neighbors(coordinate, radius):
"""
Returns coordinates around given coordinate, within given radius.
Includes given coordinate.
@param coordinate (numpy.array) Coordinate whose neighbors to find
@param radius (float) Radius around `coordinate`
@return (numpy.array) List of coordinates
"""
ranges = [range(n-radius, n+radius+1) for n in coordinate.tolist()]
return numpy.array(list(itertools.product(*ranges)))
@classmethod
def _topWCoordinates(cls, coordinates, w):
"""
Returns the top W coordinates by order.
@param coordinates (numpy.array) A 2D numpy array, where each element
is a coordinate
@param w (int) Number of top coordinates to return
@return (numpy.array) A subset of `coordinates`, containing only the
top ones by order
"""
orders = numpy.array([cls._orderForCoordinate(c)
for c in coordinates.tolist()])
indices = numpy.argsort(orders)[-w:]
return coordinates[indices]
@staticmethod
def _hashCoordinate(coordinate):
"""Hash a coordinate to a 64 bit integer."""
coordinateStr = ",".join(str(v) for v in coordinate)
# Compute the hash and convert to 64 bit int.
hash = int(int(hashlib.md5(coordinateStr).hexdigest(), 16) % (2 ** 64))
return hash
@classmethod
def _orderForCoordinate(cls, coordinate):
"""
Returns the order for a coordinate.
@param coordinate (numpy.array) Coordinate
@return (float) A value in the interval [0, 1), representing the
order of the coordinate
"""
seed = cls._hashCoordinate(coordinate)
rng = Random(seed)
return rng.getReal64()
@classmethod
def _bitForCoordinate(cls, coordinate, n):
"""
Maps the coordinate to a bit in the SDR.
@param coordinate (numpy.array) Coordinate
@param n (int) The number of available bits in the SDR
@return (int) The index to a bit in the SDR
"""
seed = cls._hashCoordinate(coordinate)
rng = Random(seed)
return rng.getUInt32(n)
def dump(self):
print "CoordinateEncoder:"
print " w: %d" % self.w
print " n: %d" % self.n
@classmethod
def read(cls, proto):
encoder = object.__new__(cls)
encoder.w = proto.w
encoder.n = proto.n
encoder.verbosity = proto.verbosity
encoder.name = proto.name
return encoder
def write(self, proto):
proto.w = self.w
proto.n = self.n
proto.verbosity = self.verbosity
proto.name = self.name
|
david-ragazzi/nupic
|
nupic/encoders/coordinate.py
|
Python
|
gpl-3.0
| 6,560
| 0.00564
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loss utility code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.typing import types
def pinball_loss(
y_true: types.Tensor,
y_pred: types.Tensor,
weights: types.Float = 1.0,
scope: Optional[Text] = None,
loss_collection: tf.compat.v1.GraphKeys = tf.compat.v1.GraphKeys.LOSSES,
reduction: tf.compat.v1.losses.Reduction = tf.compat.v1.losses.Reduction
.SUM_BY_NONZERO_WEIGHTS,
quantile: float = 0.5) -> types.Float:
"""Adds a Pinball loss for quantile regression.
```
loss = quantile * (y_true - y_pred) if y_true > y_pred
loss = (quantile - 1) * (y_true - y_pred) otherwise
```
See: https://en.wikipedia.org/wiki/Quantile_regression#Quantiles
`weights` acts as a coefficient for the loss. If a scalar is provided, then
the loss is simply scaled by the given value. If `weights` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is rescaled
by the corresponding element in the `weights` vector. If the shape of
`weights` matches the shape of `predictions`, then the loss of each
measurable element of `predictions` is scaled by the corresponding value of
`weights`.
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
weights: Optional `Tensor` whose rank is either 0, or the same rank as
`labels`, and must be broadcastable to `labels` (i.e., all dimensions must
be either `1`, or the same as the corresponding `losses` dimension).
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which the loss will be added.
reduction: Type of reduction to apply to loss.
quantile: A float between 0. and 1., the quantile we want to regress.
Returns:
Weighted Pinball loss float `Tensor`. If `reduction` is `NONE`, this has the
same shape as `labels`; otherwise, it is scalar.
Raises:
ValueError: If the shape of `predictions` doesn't match that of `labels` or
if the shape of `weights` is invalid. Also if `labels` or `predictions`
is None.
@compatibility(eager)
The `loss_collection` argument is ignored when executing eagerly. Consider
holding on to the return value or collecting losses via a `tf.keras.Model`.
@end_compatibility
"""
if y_true is None:
raise ValueError('y_true must not be None.')
if y_pred is None:
raise ValueError('y_pred must not be None.')
with tf.compat.v1.name_scope(scope, 'pinball_loss',
(y_pred, y_true, weights)) as scope:
y_pred = tf.cast(y_pred, dtype=tf.float32)
y_true = tf.cast(y_true, dtype=tf.float32)
y_pred.get_shape().assert_is_compatible_with(y_true.get_shape())
error = tf.subtract(y_true, y_pred)
loss_tensor = tf.maximum(quantile * error, (quantile - 1) * error)
return tf.compat.v1.losses.compute_weighted_loss(
loss_tensor, weights, scope, loss_collection, reduction=reduction)
|
tensorflow/agents
|
tf_agents/bandits/policies/loss_utils.py
|
Python
|
apache-2.0
| 3,764
| 0.002125
|
'''
Notice:
1. The function for jit should locate in mfs
2. For the usage of jit types and signatures, please refer Numba documentation <http://numba.github.com/numba-doc/0.10/index.html>
'''
from dpark import _ctx as dpark, jit, autojit
import numpy
@jit('f8(f8[:])')
def add1(x):
sum = 0.0
for i in xrange(x.shape[0]):
sum += i*x[i]
return sum
@autojit
def add2(x):
sum = 0.0
for i in xrange(x.shape[0]):
sum += i*x[i]
return sum
def add3(x):
sum = 0.0
for i in xrange(x.shape[0]):
sum += i*x[i]
return sum
rdd = dpark.makeRDD(range(0, 10)).map(lambda x: numpy.arange(x*1e7, (x+1)*1e7))
print rdd.map(add1).collect()
print rdd.map(add2).collect()
print rdd.map(add3).collect()
|
ee08b397/dpark
|
examples/jit.py
|
Python
|
bsd-3-clause
| 757
| 0.007926
|
"""An NNTP client class based on:
- RFC 977: Network News Transfer Protocol
- RFC 2980: Common NNTP Extensions
- RFC 3977: Network News Transfer Protocol (version 2)
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print('Group', name, 'has', count, 'articles, range', first, 'to', last)
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', '{0}-{1}'.format(first, last))
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'rb') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Incompatible changes from the 2.x nntplib:
# - all commands are encoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (POST, IHAVE)
# - all responses are decoded as UTF-8 data (using the "surrogateescape"
# error handler), except for raw message data (ARTICLE, HEAD, BODY)
# - the `file` argument to various methods is keyword-only
#
# - NNTP.date() returns a datetime object
# - NNTP.newgroups() and NNTP.newnews() take a datetime (or date) object,
# rather than a pair of (date, time) strings.
# - NNTP.newgroups() and NNTP.list() return a list of GroupInfo named tuples
# - NNTP.descriptions() returns a dict mapping group names to descriptions
# - NNTP.xover() returns a list of dicts mapping field names (header or metadata)
# to field values; each dict representing a message overview.
# - NNTP.article(), NNTP.head() and NNTP.body() return a (response, ArticleInfo)
# tuple.
# - the "internal" methods have been marked private (they now start with
# an underscore)
# Other changes from the 2.x/3.1 nntplib:
# - automatic querying of capabilities at connect
# - New method NNTP.getcapabilities()
# - New method NNTP.over()
# - New helper function decode_header()
# - NNTP.post() and NNTP.ihave() accept file objects, bytes-like objects and
# arbitrary iterables yielding lines.
# - An extensive test suite :-)
# TODO:
# - return structured data (GroupInfo etc.) everywhere
# - support HDR
# Imports
import re
import socket
import collections
import datetime
import warnings
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from email.header import decode_header as _email_decode_header
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["NNTP",
"NNTPError", "NNTPReplyError", "NNTPTemporaryError",
"NNTPPermanentError", "NNTPProtocolError", "NNTPDataError",
"decode_header",
]
# maximal line length when calling readline(). This is to prevent
# reading arbitrary length lines. RFC 3977 limits NNTP line length to
# 512 characters, including CRLF. We have selected 2048 just to be on
# the safe side.
_MAXLINE = 2048
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# Standard port used by NNTP servers
NNTP_PORT = 119
NNTP_SSL_PORT = 563
# Response numbers that are followed by additional text (e.g. article)
_LONGRESP = {
'100', # HELP
'101', # CAPABILITIES
'211', # LISTGROUP (also not multi-line with GROUP)
'215', # LIST
'220', # ARTICLE
'221', # HEAD, XHDR
'222', # BODY
'224', # OVER, XOVER
'225', # HDR
'230', # NEWNEWS
'231', # NEWGROUPS
'282', # XGTITLE
}
# Default decoded value for LIST OVERVIEW.FMT if not supported
_DEFAULT_OVERVIEW_FMT = [
"subject", "from", "date", "message-id", "references", ":bytes", ":lines"]
# Alternative names allowed in LIST OVERVIEW.FMT response
_OVERVIEW_FMT_ALTERNATIVES = {
'bytes': ':bytes',
'lines': ':lines',
}
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
_CRLF = b'\r\n'
GroupInfo = collections.namedtuple('GroupInfo',
['group', 'last', 'first', 'flag'])
ArticleInfo = collections.namedtuple('ArticleInfo',
['number', 'message_id', 'lines'])
# Helper function(s)
def decode_header(header_str):
"""Takes an unicode string representing a munged header value
and decodes it as a (possibly non-ASCII) readable value."""
parts = []
for v, enc in _email_decode_header(header_str):
if isinstance(v, bytes):
parts.append(v.decode(enc or 'ascii'))
else:
parts.append(v)
return ''.join(parts)
def _parse_overview_fmt(lines):
"""Parse a list of string representing the response to LIST OVERVIEW.FMT
and return a list of header/metadata names.
Raises NNTPDataError if the response is not compliant
(cf. RFC 3977, section 8.4)."""
fmt = []
for line in lines:
if line[0] == ':':
# Metadata name (e.g. ":bytes")
name, _, suffix = line[1:].partition(':')
name = ':' + name
else:
# Header name (e.g. "Subject:" or "Xref:full")
name, _, suffix = line.partition(':')
name = name.lower()
name = _OVERVIEW_FMT_ALTERNATIVES.get(name, name)
# Should we do something with the suffix?
fmt.append(name)
defaults = _DEFAULT_OVERVIEW_FMT
if len(fmt) < len(defaults):
raise NNTPDataError("LIST OVERVIEW.FMT response too short")
if fmt[:len(defaults)] != defaults:
raise NNTPDataError("LIST OVERVIEW.FMT redefines default fields")
return fmt
def _parse_overview(lines, fmt, data_process_func=None):
"""Parse the response to a OVER or XOVER command according to the
overview format `fmt`."""
n_defaults = len(_DEFAULT_OVERVIEW_FMT)
overview = []
for line in lines:
fields = {}
article_number, *tokens = line.split('\t')
article_number = int(article_number)
for i, token in enumerate(tokens):
if i >= len(fmt):
# XXX should we raise an error? Some servers might not
# support LIST OVERVIEW.FMT and still return additional
# headers.
continue
field_name = fmt[i]
is_metadata = field_name.startswith(':')
if i >= n_defaults and not is_metadata:
# Non-default header names are included in full in the response
# (unless the field is totally empty)
h = field_name + ": "
if token and token[:len(h)].lower() != h:
raise NNTPDataError("OVER/XOVER response doesn't include "
"names of additional headers")
token = token[len(h):] if token else None
fields[fmt[i]] = token
overview.append((article_number, fields))
return overview
def _parse_datetime(date_str, time_str=None):
"""Parse a pair of (date, time) strings, and return a datetime object.
If only the date is given, it is assumed to be date and time
concatenated together (e.g. response to the DATE command).
"""
if time_str is None:
time_str = date_str[-6:]
date_str = date_str[:-6]
hours = int(time_str[:2])
minutes = int(time_str[2:4])
seconds = int(time_str[4:])
year = int(date_str[:-4])
month = int(date_str[-4:-2])
day = int(date_str[-2:])
# RFC 3977 doesn't say how to interpret 2-char years. Assume that
# there are no dates before 1970 on Usenet.
if year < 70:
year += 2000
elif year < 100:
year += 1900
return datetime.datetime(year, month, day, hours, minutes, seconds)
def _unparse_datetime(dt, legacy=False):
"""Format a date or datetime object as a pair of (date, time) strings
in the format required by the NEWNEWS and NEWGROUPS commands. If a
date object is passed, the time is assumed to be midnight (00h00).
The returned representation depends on the legacy flag:
* if legacy is False (the default):
date has the YYYYMMDD format and time the HHMMSS format
* if legacy is True:
date has the YYMMDD format and time the HHMMSS format.
RFC 3977 compliant servers should understand both formats; therefore,
legacy is only needed when talking to old servers.
"""
if not isinstance(dt, datetime.datetime):
time_str = "000000"
else:
time_str = "{0.hour:02d}{0.minute:02d}{0.second:02d}".format(dt)
y = dt.year
if legacy:
y = y % 100
date_str = "{0:02d}{1.month:02d}{1.day:02d}".format(y, dt)
else:
date_str = "{0:04d}{1.month:02d}{1.day:02d}".format(y, dt)
return date_str, time_str
if _have_ssl:
def _encrypt_on(sock, context, hostname):
"""Wrap a socket in SSL/TLS. Arguments:
- sock: Socket to wrap
- context: SSL context to use for the encrypted connection
Returns:
- sock: New, encrypted socket.
"""
# Generate a default SSL context if none was passed.
if context is None:
context = ssl._create_stdlib_context()
return context.wrap_socket(sock, server_hostname=hostname)
# The classes themselves
class _NNTPBase:
# UTF-8 is the character set for all NNTP commands and responses: they
# are automatically encoded (when sending) and decoded (and receiving)
# by this class.
# However, some multi-line data blocks can contain arbitrary bytes (for
# example, latin-1 or utf-16 data in the body of a message). Commands
# taking (POST, IHAVE) or returning (HEAD, BODY, ARTICLE) raw message
# data will therefore only accept and produce bytes objects.
# Furthermore, since there could be non-compliant servers out there,
# we use 'surrogateescape' as the error handler for fault tolerance
# and easy round-tripping. This could be useful for some applications
# (e.g. NNTP gateways).
encoding = 'utf-8'
errors = 'surrogateescape'
def __init__(self, file, host,
readermode=None, timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- file: file-like object (open for read/write in binary mode)
- host: hostname of the server
- readermode: if true, send 'mode reader' command after
connecting.
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.file = file
self.debugging = 0
self.welcome = self._getresp()
# Inquire about capabilities (RFC 3977).
self._caps = None
self.getcapabilities()
# 'MODE READER' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'MODE READER' and 'AUTHINFO' need to
# arrive differs between some NNTP servers. If _setreadermode() fails
# with an authorization failed error, it will set this to True;
# the login() routine will interpret that as a request to try again
# after performing its normal function.
# Enable only if we're not already in READER mode anyway.
self.readermode_afterauth = False
if readermode and 'READER' not in self._caps:
self._setreadermode()
if not self.readermode_afterauth:
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
# RFC 4642 2.2.2: Both the client and the server MUST know if there is
# a TLS session active. A client MUST NOT attempt to start a TLS
# session if a TLS session is already active.
self.tls_on = False
# Log in and encryption setup order is left to subclasses.
self.authenticated = False
def __enter__(self):
return self
def __exit__(self, *args):
is_connected = lambda: hasattr(self, "file")
if is_connected():
try:
self.quit()
except (OSError, EOFError):
pass
finally:
if is_connected():
self._close()
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print('*welcome*', repr(self.welcome))
return self.welcome
def getcapabilities(self):
"""Get the server capabilities, as read by __init__().
If the CAPABILITIES command is not supported, an empty dict is
returned."""
if self._caps is None:
self.nntp_version = 1
self.nntp_implementation = None
try:
resp, caps = self.capabilities()
except (NNTPPermanentError, NNTPTemporaryError):
# Server doesn't support capabilities
self._caps = {}
else:
self._caps = caps
if 'VERSION' in caps:
# The server can advertise several supported versions,
# choose the highest.
self.nntp_version = max(map(int, caps['VERSION']))
if 'IMPLEMENTATION' in caps:
self.nntp_implementation = ' '.join(caps['IMPLEMENTATION'])
return self._caps
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def _putline(self, line):
"""Internal: send one line to the server, appending CRLF.
The `line` must be a bytes-like object."""
line = line + _CRLF
if self.debugging > 1: print('*put*', repr(line))
self.file.write(line)
self.file.flush()
def _putcmd(self, line):
"""Internal: send one command to the server (through _putline()).
The `line` must be an unicode string."""
if self.debugging: print('*cmd*', repr(line))
line = line.encode(self.encoding, self.errors)
self._putline(line)
def _getline(self, strip_crlf=True):
"""Internal: return one line from the server, stripping _CRLF.
Raise EOFError if the connection is closed.
Returns a bytes object."""
line = self.file.readline(_MAXLINE +1)
if len(line) > _MAXLINE:
raise NNTPDataError('line too long')
if self.debugging > 1:
print('*get*', repr(line))
if not line: raise EOFError
if strip_crlf:
if line[-2:] == _CRLF:
line = line[:-2]
elif line[-1:] in _CRLF:
line = line[:-1]
return line
def _getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error.
Returns an unicode string."""
resp = self._getline()
if self.debugging: print('*resp*', repr(resp))
resp = resp.decode(self.encoding, self.errors)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def _getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error.
Returns a (response, lines) tuple where `response` is an unicode
string and `lines` is a list of bytes objects.
If `file` is a file-like object, it must be open in binary mode.
"""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, (str, bytes)):
openedFile = file = open(file, "wb")
resp = self._getresp()
if resp[:3] not in _LONGRESP:
raise NNTPReplyError(resp)
lines = []
if file is not None:
# XXX lines = None instead?
terminators = (b'.' + _CRLF, b'.\n')
while 1:
line = self._getline(False)
if line in terminators:
break
if line.startswith(b'..'):
line = line[1:]
file.write(line)
else:
terminator = b'.'
while 1:
line = self._getline()
if line == terminator:
break
if line.startswith(b'..'):
line = line[1:]
lines.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, lines
def _shortcmd(self, line):
"""Internal: send a command and get the response.
Same return value as _getresp()."""
self._putcmd(line)
return self._getresp()
def _longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same return value as _getlongresp()."""
self._putcmd(line)
return self._getlongresp(file)
def _longcmdstring(self, line, file=None):
"""Internal: send a command and get the response plus following text.
Same as _longcmd() and _getlongresp(), except that the returned `lines`
are unicode strings rather than bytes objects.
"""
self._putcmd(line)
resp, list = self._getlongresp(file)
return resp, [line.decode(self.encoding, self.errors)
for line in list]
def _getoverviewfmt(self):
"""Internal: get the overview format. Queries the server if not
already done, else returns the cached value."""
try:
return self._cachedoverviewfmt
except AttributeError:
pass
try:
resp, lines = self._longcmdstring("LIST OVERVIEW.FMT")
except NNTPPermanentError:
# Not supported by server?
fmt = _DEFAULT_OVERVIEW_FMT[:]
else:
fmt = _parse_overview_fmt(lines)
self._cachedoverviewfmt = fmt
return fmt
def _grouplist(self, lines):
# Parse lines into "group last first flag"
return [GroupInfo(*line.split()) for line in lines]
def capabilities(self):
"""Process a CAPABILITIES command. Not supported by all servers.
Return:
- resp: server response if successful
- caps: a dictionary mapping capability names to lists of tokens
(for example {'VERSION': ['2'], 'OVER': [], LIST: ['ACTIVE', 'HEADERS'] })
"""
caps = {}
resp, lines = self._longcmdstring("CAPABILITIES")
for line in lines:
name, *tokens = line.split()
caps[name] = tokens
return resp, caps
def newgroups(self, date, *, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of newsgroup names
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWGROUPS {0} {1}'.format(date_str, time_str)
resp, lines = self._longcmdstring(cmd, file)
return resp, self._grouplist(lines)
def newnews(self, group, date, *, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: a date or datetime object
Return:
- resp: server response if successful
- list: list of message ids
"""
if not isinstance(date, (datetime.date, datetime.date)):
raise TypeError(
"the date parameter must be a date or datetime object, "
"not '{:40}'".format(date.__class__.__name__))
date_str, time_str = _unparse_datetime(date, self.nntp_version < 2)
cmd = 'NEWNEWS {0} {1} {2}'.format(group, date_str, time_str)
return self._longcmdstring(cmd, file)
def list(self, group_pattern=None, *, file=None):
"""Process a LIST or LIST ACTIVE command. Arguments:
- group_pattern: a pattern indicating which groups to query
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)
"""
if group_pattern is not None:
command = 'LIST ACTIVE ' + group_pattern
else:
command = 'LIST'
resp, lines = self._longcmdstring(command, file)
return resp, self._grouplist(lines)
def _getdescriptions(self, group_pattern, return_all):
line_pat = re.compile('^(?P<group>[^ \t]+)[ \t]+(.*)$')
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, lines = self._longcmdstring('LIST NEWSGROUPS ' + group_pattern)
if not resp.startswith('215'):
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, lines = self._longcmdstring('XGTITLE ' + group_pattern)
groups = {}
for raw_line in lines:
match = line_pat.search(raw_line.strip())
if match:
name, desc = match.group(1, 2)
if not return_all:
return desc
groups[name] = desc
if return_all:
return resp, groups
else:
# Nothing found
return ''
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
return self._getdescriptions(group, False)
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
return self._getdescriptions(group_pattern, True)
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles
- first: first article number
- last: last article number
- name: the group name
"""
resp = self._shortcmd('GROUP ' + name)
if not resp.startswith('211'):
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, int(count), int(first), int(last), name
def help(self, *, file=None):
"""Process a HELP command. Argument:
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of strings returned by the server in response to the
HELP command
"""
return self._longcmdstring('HELP', file)
def _statparse(self, resp):
"""Internal: parse the response line of a STAT, NEXT, LAST,
ARTICLE, HEAD or BODY command."""
if not resp.startswith('22'):
raise NNTPReplyError(resp)
words = resp.split()
art_num = int(words[1])
message_id = words[2]
return resp, art_num, message_id
def _statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self._shortcmd(line)
return self._statparse(resp)
def stat(self, message_spec=None):
"""Process a STAT command. Argument:
- message_spec: article number or message id (if not specified,
the current article is selected)
Returns:
- resp: server response if successful
- art_num: the article number
- message_id: the message id
"""
if message_spec:
return self._statcmd('STAT {0}'.format(message_spec))
else:
return self._statcmd('STAT')
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self._statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self._statcmd('LAST')
def _artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, lines = self._longcmd(line, file)
resp, art_num, message_id = self._statparse(resp)
return resp, ArticleInfo(art_num, message_id, lines)
def head(self, message_spec=None, *, file=None):
"""Process a HEAD command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the headers in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of header lines)
"""
if message_spec is not None:
cmd = 'HEAD {0}'.format(message_spec)
else:
cmd = 'HEAD'
return self._artcmd(cmd, file)
def body(self, message_spec=None, *, file=None):
"""Process a BODY command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the body in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of body lines)
"""
if message_spec is not None:
cmd = 'BODY {0}'.format(message_spec)
else:
cmd = 'BODY'
return self._artcmd(cmd, file)
def article(self, message_spec=None, *, file=None):
"""Process an ARTICLE command. Argument:
- message_spec: article number or message id
- file: filename string or file object to store the article in
Returns:
- resp: server response if successful
- ArticleInfo: (article number, message id, list of article lines)
"""
if message_spec is not None:
cmd = 'ARTICLE {0}'.format(message_spec)
else:
cmd = 'ARTICLE'
return self._artcmd(cmd, file)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful
"""
return self._shortcmd('SLAVE')
def xhdr(self, hdr, str, *, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of (nr, value) strings
"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self._longcmdstring('XHDR {0} {1}'.format(hdr, str), file)
def remove_number(line):
m = pat.match(line)
return m.group(1, 2) if m else line
return resp, [remove_number(line) for line in lines]
def xover(self, start, end, *, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
"""
resp, lines = self._longcmdstring('XOVER {0}-{1}'.format(start, end),
file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def over(self, message_spec, *, file=None):
"""Process an OVER command. If the command isn't supported, fall
back to XOVER. Arguments:
- message_spec:
- either a message id, indicating the article to fetch
information about
- or a (start, end) tuple, indicating a range of article numbers;
if end is None, information up to the newest message will be
retrieved
- or None, indicating the current article number must be used
- file: Filename string or file object to store the result in
Returns:
- resp: server response if successful
- list: list of dicts containing the response fields
NOTE: the "message id" form isn't supported by XOVER
"""
cmd = 'OVER' if 'OVER' in self._caps else 'XOVER'
if isinstance(message_spec, (tuple, list)):
start, end = message_spec
cmd += ' {0}-{1}'.format(start, end or '')
elif message_spec is not None:
cmd = cmd + ' ' + message_spec
resp, lines = self._longcmdstring(cmd, file)
fmt = self._getoverviewfmt()
return resp, _parse_overview(lines, fmt)
def xgtitle(self, group, *, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
warnings.warn("The XGTITLE extension is not actively used, "
"use descriptions() instead",
DeprecationWarning, 2)
line_pat = re.compile('^([^ \t]+)[ \t]+(.*)$')
resp, raw_lines = self._longcmdstring('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self, id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article
"""
warnings.warn("The XPATH extension is not actively used",
DeprecationWarning, 2)
resp = self._shortcmd('XPATH {0}'.format(id))
if not resp.startswith('223'):
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date(self):
"""Process the DATE command.
Returns:
- resp: server response if successful
- date: datetime object
"""
resp = self._shortcmd("DATE")
if not resp.startswith('111'):
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1]
if len(date) != 14:
raise NNTPDataError(resp)
return resp, _parse_datetime(date, None)
def _post(self, command, f):
resp = self._shortcmd(command)
# Raises a specific exception if posting is not allowed
if not resp.startswith('3'):
raise NNTPReplyError(resp)
if isinstance(f, (bytes, bytearray)):
f = f.splitlines()
# We don't use _putline() because:
# - we don't want additional CRLF if the file or iterable is already
# in the right format
# - we don't want a spurious flush() after each line is written
for line in f:
if not line.endswith(_CRLF):
line = line.rstrip(b"\r\n") + _CRLF
if line.startswith(b'.'):
line = b'.' + line
self.file.write(line)
self.file.write(b".\r\n")
self.file.flush()
return self._getresp()
def post(self, data):
"""Process a POST command. Arguments:
- data: bytes object, iterable or file containing the article
Returns:
- resp: server response if successful"""
return self._post('POST', data)
def ihave(self, message_id, data):
"""Process an IHAVE command. Arguments:
- message_id: message-id of the article
- data: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
return self._post('IHAVE {0}'.format(message_id), data)
def _close(self):
self.file.close()
del self.file
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
try:
resp = self._shortcmd('QUIT')
finally:
self._close()
return resp
def login(self, user=None, password=None, usenetrc=True):
if self.authenticated:
raise ValueError("Already logged in.")
if not user and not usenetrc:
raise ValueError(
"At least one of `user` and `usenetrc` must be specified")
# If no login/password was specified but netrc was requested,
# try to get them from ~/.netrc
# Presume that if .netrc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(self.host)
if auth:
user = auth[0]
password = auth[2]
except OSError:
pass
# Perform NNTP authentication if needed.
if not user:
return
resp = self._shortcmd('authinfo user ' + user)
if resp.startswith('381'):
if not password:
raise NNTPReplyError(resp)
else:
resp = self._shortcmd('authinfo pass ' + password)
if not resp.startswith('281'):
raise NNTPPermanentError(resp)
# Capabilities might have changed after login
self._caps = None
self.getcapabilities()
# Attempt to send mode reader if it was requested after login.
# Only do so if we're not in reader mode already.
if self.readermode_afterauth and 'READER' not in self._caps:
self._setreadermode()
# Capabilities might have changed after MODE READER
self._caps = None
self.getcapabilities()
def _setreadermode(self):
try:
self.welcome = self._shortcmd('mode reader')
except NNTPPermanentError:
# Error 5xx, probably 'not implemented'
pass
except NNTPTemporaryError as e:
if e.response.startswith('480'):
# Need authorization before 'mode reader'
self.readermode_afterauth = True
else:
raise
if _have_ssl:
def starttls(self, context=None):
"""Process a STARTTLS command. Arguments:
- context: SSL context to use for the encrypted connection
"""
# Per RFC 4642, STARTTLS MUST NOT be sent after authentication or if
# a TLS session already exists.
if self.tls_on:
raise ValueError("TLS is already enabled.")
if self.authenticated:
raise ValueError("TLS cannot be started after authentication.")
resp = self._shortcmd('STARTTLS')
if resp.startswith('382'):
self.file.close()
self.sock = _encrypt_on(self.sock, context, self.host)
self.file = self.sock.makefile("rwb")
self.tls_on = True
# Capabilities may change after TLS starts up, so ask for them
# again.
self._caps = None
self.getcapabilities()
else:
raise NNTPError("TLS failed to start.")
class NNTP(_NNTPBase):
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
- usenetrc: allow loading username and password from ~/.netrc file
if not specified explicitly
- timeout: timeout (in seconds) used for socket connections
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode, timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
if _have_ssl:
class NNTP_SSL(_NNTPBase):
def __init__(self, host, port=NNTP_SSL_PORT,
user=None, password=None, ssl_context=None,
readermode=None, usenetrc=False,
timeout=_GLOBAL_DEFAULT_TIMEOUT):
"""This works identically to NNTP.__init__, except for the change
in default port and the `ssl_context` argument for SSL connections.
"""
self.sock = socket.create_connection((host, port), timeout)
file = None
try:
self.sock = _encrypt_on(self.sock, ssl_context, host)
file = self.sock.makefile("rwb")
_NNTPBase.__init__(self, file, host,
readermode=readermode, timeout=timeout)
if user or usenetrc:
self.login(user, password, usenetrc)
except:
if file:
file.close()
self.sock.close()
raise
def _close(self):
try:
_NNTPBase._close(self)
finally:
self.sock.close()
__all__.append("NNTP_SSL")
# Test retrieval when run as a script.
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="""\
nntplib built-in demo - display the latest articles in a newsgroup""")
parser.add_argument('-g', '--group', default='gmane.comp.python.general',
help='group to fetch messages from (default: %(default)s)')
parser.add_argument('-s', '--server', default='news.gmane.org',
help='NNTP server hostname (default: %(default)s)')
parser.add_argument('-p', '--port', default=-1, type=int,
help='NNTP port number (default: %s / %s)' % (NNTP_PORT, NNTP_SSL_PORT))
parser.add_argument('-n', '--nb-articles', default=10, type=int,
help='number of articles to fetch (default: %(default)s)')
parser.add_argument('-S', '--ssl', action='store_true', default=False,
help='use NNTP over SSL')
args = parser.parse_args()
port = args.port
if not args.ssl:
if port == -1:
port = NNTP_PORT
s = NNTP(host=args.server, port=port)
else:
if port == -1:
port = NNTP_SSL_PORT
s = NNTP_SSL(host=args.server, port=port)
caps = s.getcapabilities()
if 'STARTTLS' in caps:
s.starttls()
resp, count, first, last, name = s.group(args.group)
print('Group', name, 'has', count, 'articles, range', first, 'to', last)
def cut(s, lim):
if len(s) > lim:
s = s[:lim - 4] + "..."
return s
first = str(int(last) - args.nb_articles + 1)
resp, overviews = s.xover(first, last)
for artnum, over in overviews:
author = decode_header(over['from']).split('<', 1)[0]
subject = decode_header(over['subject'])
lines = int(over[':lines'])
print("{:7} {:20} {:42} ({})".format(
artnum, cut(author, 20), cut(subject, 42), lines)
)
s.quit()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.5.0/Lib/nntplib.py
|
Python
|
mit
| 43,081
| 0.000696
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Indicators")
from System import *
from QuantConnect import *
from QuantConnect.Indicators import *
from QuantConnect.Data import *
from QuantConnect.Data.Market import *
from QuantConnect.Data.Custom import *
from QuantConnect.Algorithm import *
from QuantConnect.Python import PythonQuandl
### <summary>
### The algorithm creates new indicator value with the existing indicator method by Indicator Extensions
### Demonstration of using the external custom datasource Quandl to request the VIX and VXV daily data
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="using quantconnect" />
### <meta name="tag" content="custom data" />
### <meta name="tag" content="indicators" />
### <meta name="tag" content="indicator classes" />
### <meta name="tag" content="plotting indicators" />
### <meta name="tag" content="charting" />
class CustomDataIndicatorExtensionsAlgorithm(QCAlgorithm):
# Initialize the data and resolution you require for your strategy
def Initialize(self):
self.SetStartDate(2014,1,1)
self.SetEndDate(2018,1,1)
self.SetCash(25000)
self.vix = 'CBOE/VIX'
self.vxv = 'CBOE/VXV'
# Define the symbol and "type" of our generic data
self.AddData(QuandlVix, self.vix, Resolution.Daily)
self.AddData(Quandl, self.vxv, Resolution.Daily)
# Set up default Indicators, these are just 'identities' of the closing price
self.vix_sma = self.SMA(self.vix, 1, Resolution.Daily)
self.vxv_sma = self.SMA(self.vxv, 1, Resolution.Daily)
# This will create a new indicator whose value is smaVXV / smaVIX
self.ratio = IndicatorExtensions.Over(self.vxv_sma, self.vix_sma)
# Plot indicators each time they update using the PlotIndicator function
self.PlotIndicator("Ratio", self.ratio)
self.PlotIndicator("Data", self.vix_sma, self.vxv_sma)
# OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
def OnData(self, data):
# Wait for all indicators to fully initialize
if not (self.vix_sma.IsReady and self.vxv_sma.IsReady and self.ratio.IsReady): return
if not self.Portfolio.Invested and self.ratio.Current.Value > 1:
self.MarketOrder(self.vix, 100)
elif self.ratio.Current.Value < 1:
self.Liquidate()
# In CBOE/VIX data, there is a "vix close" column instead of "close" which is the
# default column namein LEAN Quandl custom data implementation.
# This class assigns new column name to match the the external datasource setting.
class QuandlVix(PythonQuandl):
def __init__(self):
self.ValueColumnName = "VIX Close"
|
AnshulYADAV007/Lean
|
Algorithm.Python/CustomDataIndicatorExtensionsAlgorithm.py
|
Python
|
apache-2.0
| 3,632
| 0.013499
|
from distutils.core import setup, Extension, Command
from distutils.command.build import build
from distutils.command.build_ext import build_ext
from distutils.command.config import config
from distutils.msvccompiler import MSVCCompiler
from distutils import sysconfig
import string
import sys
mkobjs = ['column', 'custom', 'derived', 'fileio', 'field',
'format', 'handler', 'persist', 'remap', 'std',
'store', 'string', 'table', 'univ', 'view', 'viewx']
class config_mk(config):
def run(self):
# work around bug in Python 2.2-supplied check_header, fixed
# in Python 2.3; body needs to be a valid, non-zero-length string
if self.try_cpp(body="/* body */", headers=['unicodeobject.h'],
include_dirs=[sysconfig.get_python_inc()]):
build = self.distribution.reinitialize_command('build_ext')
build.define = 'HAVE_UNICODEOBJECT_H'
# trust that mk4.h provides the correct HAVE_LONG_LONG value,
# since Mk4py doesn't #include "config.h"
class build_mk(build):
def initialize_options(self):
# build in builds directory by default, unless specified otherwise
build.initialize_options(self)
self.build_base = '../builds'
class build_mkext(build_ext):
def finalize_options(self):
self.run_command('config')
# force use of C++ compiler (helps on some platforms)
import os
cc = os.environ.get('CXX', sysconfig.get_config_var('CXX'))
if not cc:
cc = sysconfig.get_config_var('CCC') # Python 1.5.2
if cc:
os.environ['CC'] = cc
build_ext.finalize_options(self)
def build_extension(self, ext):
# work around linker problem with MacPython 2.3
if sys.platform == 'darwin':
try:
self.compiler.linker_so.remove("-Wl,-x")
except: pass
# work around linker problem with Linux, Python 2.2 and earlier:
# despite setting $CC above, still uses Python compiler
if sys.platform == 'linux2':
try:
ext.libraries.append("stdc++")
except: pass
if ext.name == "Mk4py":
if isinstance(self.compiler, MSVCCompiler):
suffix = '.obj'
if self.debug:
prefix = '../builds/msvc60/mklib/Debug/'
else:
prefix = '../builds/msvc60/mklib/Release/'
else:
suffix = '.o'
prefix = '../builds/'
for i in range(len(ext.extra_objects)):
nm = ext.extra_objects[i]
if nm in mkobjs:
if string.find(nm, '.') == -1:
nm = nm + suffix
nm = prefix + nm
ext.extra_objects[i] = nm
build_ext.build_extension(self, ext)
class test_regrtest(Command):
# Original version of this class posted
# by Berthold Hoellmann to distutils-sig@python.org
description = "test the distribution prior to install"
user_options = [
('build-base=', 'b',
"base build directory (default: 'build.build-base')"),
('build-purelib=', None,
"build directory for platform-neutral distributions"),
('build-platlib=', None,
"build directory for platform-specific distributions"),
('build-lib=', None,
"build directory for all distribution (defaults to either " +
"build-purelib or build-platlib"),
('test-dir=', None,
"directory that contains the test definitions"),
('test-options=', None,
"command-line options to pass to test.regrtest")
]
def initialize_options(self):
self.build_base = None
# these are decided only after 'build_base' has its final value
# (unless overridden by the user or client)
self.build_purelib = None
self.build_platlib = None
self.test_dir = 'test'
self.test_options = None
def finalize_options(self):
build = self.distribution.get_command_obj('build')
build_options = ('build_base', 'build_purelib', 'build_platlib')
for option in build_options:
val = getattr(self, option)
if val:
setattr(build, option, getattr(self, option))
build.ensure_finalized()
for option in build_options:
setattr(self, option, getattr(build, option))
def run(self):
# Invoke the 'build' command to "build" pure Python modules
# (ie. copy 'em into the build tree)
self.run_command('build')
# remember old sys.path to restore it afterwards
old_path = sys.path[:]
# extend sys.path
sys.path.insert(0, self.build_purelib)
sys.path.insert(0, self.build_platlib)
sys.path.insert(0, self.test_dir)
# Use test.regrtest, unlike the original version of this class
import test.regrtest
# jcw 2004-04-26 - why do I need to add these here to find the tests?
#import leaktest - not very portable
import test_inttypes
import test_stringtype
#import test_hash - doesn't work
# jcw end
test.regrtest.STDTESTS = []
test.regrtest.NOTTESTS = []
if self.test_options:
sys.argv[1:] = string.split(self.test_options, ' ')
else:
del sys.argv[1:]
# remove stale modules
del sys.modules['metakit']
try:
del sys.modules['Mk4py']
except:
pass
self.announce("running tests")
test.regrtest.main(testdir=self.test_dir)
# restore sys.path
sys.path = old_path[:]
#try:
# import metakit
#except:
# metakit = sys.modules['metakit']
setup(name = "metakit",
version = "2.4.9.7",
description = "Python bindings to the Metakit database library",
#long_description = metakit.__doc__,
author = "Gordon McMillan / Jean-Claude Wippler",
author_email = "jcw@equi4.com",
url = "http://www.equi4.com/metakit/python.html",
maintainer = "Jean-Claude Wippler",
maintainer_email = "jcw@equi4.com",
license = "X/MIT style, see: http://www.equi4.com/mklicense.html",
keywords = ['database'],
py_modules = ['metakit'],
cmdclass = {'build': build_mk, 'build_ext': build_mkext,
'test': test_regrtest, 'config': config_mk},
ext_modules = [Extension("Mk4py",
sources=["PyProperty.cpp",
"PyRowRef.cpp",
"PyStorage.cpp",
"PyView.cpp",
"scxx/PWOImp.cpp",
],
include_dirs=["scxx",
"../include"],
extra_objects=mkobjs,
)]
)
## Local Variables:
## compile-command: "python setup.py build -b ../builds"
## End:
|
electric-cloud/metakit
|
python/setup.py
|
Python
|
mit
| 7,317
| 0.009703
|
#!/usr/bin/python
# Load required modules
import sys, os, json, re, time, comet as C, multiprocessing as mp, random
from math import exp
import run_comet_simple as RC
def get_parser():
# Parse arguments
import argparse
description = 'Runs CoMEt on permuted matrices.'
parser = argparse.ArgumentParser(description=description)
# General parameters
parser.add_argument('-o', '--output_directory', required=True,
help='Output directory.')
parser.add_argument('--parallel', default=False, action='store_true',
help='Use multiprocessing to run a job on each core.')
parser.add_argument('-np', '--num_permutations', required=True, type=int,
help='Number of permuted matrices to use.')
# Mutation data
parser.add_argument('-m', '--mutation_matrix', required=True,
help='File name for mutation data.')
parser.add_argument('-mf', '--min_freq', type=int, default=0,
help='Minimum gene mutation frequency.')
parser.add_argument('-pf', '--patient_file', default=None,
help='File of patients to be included (optional).')
parser.add_argument('-gf', '--gene_file', default=None,
help='File of genes to be included (optional).')
# Comet
parser.add_argument('-ks', '--gene_set_sizes', nargs="*", type=int, required=True,
help='Gene set sizes (length must be t). This or -k must be set. ')
parser.add_argument('-N', '--num_iterations', type=int, default=pow(10, 3),
help='Number of iterations of MCMC.')
parser.add_argument('-NStop', '--n_stop', type=int, default=pow(10, 8),
help='Number of iterations of MCMC to stop the pipeline.')
parser.add_argument('-s', '--step_length', type=int, default=100,
help='Number of iterations between samples.')
parser.add_argument('-init', '--initial_soln', nargs="*",
help='Initial solution to use.')
parser.add_argument('-r', '--num_initial', default=1, type=int,
help='Number of different initial starts to use with MCMC.')
parser.add_argument('-tv', '--total_distance_cutoff', type=float, default=0.005,
help='stop condition of convergence (total distance).')
# Parameters for determining the test to be applied in CoMEt
parser.add_argument('--exact_cut', default=0.001, type=float,
help='Maximum accumulated table prob. to stop exact test.')
parser.add_argument('--binom_cut', type=float, default=0.005,
help='Minumum pval cutoff for CoMEt to perform binom test.')
parser.add_argument('-nt', '--nt', default=10, type=int,
help='Maximum co-occurrence cufoff to perform exact test.')
# Files for subtypes/core-events run
parser.add_argument('-sub', '--subtype', default=None,
help='File with a list of subtype for performing subtype-comet.')
parser.add_argument('-ce', '--core_events', default=None,
help='File with a list of core events for performing subtype-comet.')
# Hidden parameters: users can still use these parameters but they won't show in the options
# Parameters for marginal probability graph (optional)
# File mapping genes/events to new names (optional).
parser.add_argument('-e', '--event_names', default=None, help=argparse.SUPPRESS)
# File mapping samples to cancer types.
parser.add_argument('-st', '--sample_types_file', default=None, help=argparse.SUPPRESS)
# Minimum edge weight for showing in the graph
parser.add_argument('-mew', '--minimum_edge_weight', type=float, default=0.001,
help=argparse.SUPPRESS)
# Minimum sampling frequency for a gene set to be included.
parser.add_argument('-msf', '--minimum_sampling_frequency', type=float, default=50,
help=argparse.SUPPRESS)
# Template file (HTML). Change at your own risk.
parser.add_argument('-tf', '--template_file', default="comet/src/html/template.html",
type=str, help=argparse.SUPPRESS)
# Maximum standard error cutoff to consider a line
parser.add_argument('-rmse', '--standard_error_cutoff', default=0.01, type=float,
help=argparse.SUPPRESS)
# Input file with lists of pre-run results.
parser.add_argument('--precomputed_scores', default=None, help=argparse.SUPPRESS)
# Accelerating factor for target weight
parser.add_argument('-acc', '--accelerator', default=1, type=int, help=argparse.SUPPRESS)
# Flag verbose output
parser.add_argument('-v', '--verbose', default=True, action="store_true",
help=argparse.SUPPRESS)
# Set the seed of the PRNG.
parser.add_argument('--seed', default=int(time.time()), type=int,
help=argparse.SUPPRESS)
# Edge swapping parameter.
parser.add_argument('-q', '--Q', type=int, default=100,
help=argparse.SUPPRESS)
# Keep temp files (CoMEt results and permuted matrices).
parser.add_argument('--keep_temp_files', required=False, action='store_true', default=False,
help=argparse.SUPPRESS)
return parser
def runComet(cometArgs):
return RC.run( RC.get_parser().parse_args(cometArgs) )
def run( args ):
# Set up the arguments for a general CoMEt run on real data
realOutputDir = "{}/comet-results".format(args.output_directory)
realCometArgs = []
permuteFlags = ["-np", "--parallel", "--keep_temp_files", "-o"]
for i, arg in enumerate(sys.argv[1:]):
if arg not in permuteFlags and sys.argv[i] not in permuteFlags:
realCometArgs.append( arg )
realCometArgs += [ "-o", realOutputDir, "--noviz"]
# perform simple run without viz first.
results = runComet(realCometArgs)
# Load mutation data using Multi-Dendrix and output as a temporary file
realMutations = C.load_mutation_data(args.mutation_matrix, args.patient_file,
args.gene_file, args.min_freq, args.subtype)
m, n, genes, patients, geneToCases, patientToGenes, subtypes = realMutations
if args.verbose:
print '* Mutation data: %s genes x %s patients' % (m, n)
# Construct bipartite graph from mutation data
if args.verbose: print "* Creating bipartite graph..."
G = C.construct_mutation_graph(geneToCases, patientToGenes)
if args.verbose:
print '\t- Graph has', len( G.edges() ), 'edges among', len( G.nodes() ), 'nodes.'
# reset the arguments for a general CoMEt run on permuted matrices
cometArgs = []
permuteFlags = ["-np", "--parallel", "--keep_temp_files", "-m", "-o"]
for i, arg in enumerate(sys.argv[1:]):
if arg not in permuteFlags and sys.argv[i] not in permuteFlags:
cometArgs.append( arg )
cometArgs.append('--noviz')
# Create a permuted matrix, and then run it through CoMEt
import tempfile
arguments = []
if args.keep_temp_files:
directory = args.output_directory
else:
directory = tempfile.mkdtemp(dir=".", prefix=".tmp")
# Generate random seeds for each permutation
random.seed(args.seed)
seeds = [ random.randint(0, 2**31-1) for _ in range(args.num_permutations) ]
for i, seed in enumerate(seeds):
# Print simple progress bar
sys.stdout.write("* Running CoMEt on permuted matrices... {}/{}\r".format(i+1, args.num_permutations))
sys.stdout.flush()
# Create a permuted dataset and save it a temporary file
mutations = C.permute_mutation_data(G, genes, patients, seed, args.Q)
_, _, _, _, geneToCases, patientToGenes = mutations
adj_list = [ p + "\t" + "\t".join( sorted(patientToGenes[p]) ) for p in patients ]
permutation_file = "{}/permuted-matrix-{}.m2".format(directory, i+1)
with open(permutation_file, 'w') as outfile: outfile.write('\n'.join(adj_list))
# Add the new arguments
permuteArgs = map(str, cometArgs)
permuteArgs += [ "-m", permutation_file ]
permuteArgs += [ "-o", "{}/comet-results-on-permutation-{}".format(directory, i+1)]
arguments.append( permuteArgs )
if args.parallel:
pool = mp.Pool(25)
results = pool.map(runComet, arguments)
pool.close()
pool.join()
else:
results = [ runComet(permuteArgs) for permuteArgs in arguments ]
# Find the maximum test statistic on the permuted datasets
from itertools import islice
maxStat = 0
for rf in [ rf for rf in os.listdir(directory) if rf.startswith("comet-results-on-permutation") ]:
for df in [df for df in os.listdir("{}/{}/results".format(directory, rf) ) if df.endswith(".tsv")]:
with open("{}/{}/results/{}".format(directory, rf, df)) as infile:
for line in islice(infile, 1, 2):
score = float(line.split("\t")[1])
if score > maxStat:
maxStat = score
print "*" * 80
print "Number of permutations:", args.num_permutations
print "Max statistic:", maxStat
# Prepare comet results on real, mutation data, and output directory for viz
for rf in [rf for rf in os.listdir( "{}/results/".format(realOutputDir) ) if rf.endswith(".tsv")]:
resultsTable = [l.rstrip() for l in open( "{}/results/{}".format(realOutputDir, rf))]
realMutations = (m, n, genes, patients, geneToCases, patientToGenes )
outputDirViz = realOutputDir + "/viz/"
C.ensure_dir(outputDirViz)
# Perform visualization
C.output_comet_viz(RC.get_parser().parse_args(realCometArgs), realMutations, \
resultsTable, maxStat, args.num_permutations)
# Destroy the temporary directory if necessary
if not args.keep_temp_files:
import shutil
shutil.rmtree(directory)
if __name__ == "__main__": run( get_parser().parse_args(sys.argv[1:]) )
|
raphael-group/comet
|
run_comet_full.py
|
Python
|
mit
| 10,224
| 0.010172
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.