code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Chromium presubmit script for src/net.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def GetPreferredTryMasters(project, change):
masters = {
'tryserver.chromium.linux': {
'linux_chromium_rel_swarming': set(['defaulttests']),
},
'tryserver.chromium.mac': {
'mac_chromium_rel_swarming': set(['defaulttests']),
},
'tryserver.chromium.win': {
'win_chromium_rel_swarming': set(['defaulttests']),
}
}
# Changes that touch NSS files will likely need a corresponding OpenSSL edit.
# Conveniently, this one glob also matches _openssl.* changes too.
if any('nss' in f.LocalPath() for f in change.AffectedFiles()):
masters['tryserver.chromium.linux'].setdefault(
'linux_redux', set()).add('defaulttests')
return masters
| 7kbird/chrome | net/PRESUBMIT.py | Python | bsd-3-clause | 1,034 |
"""
same as viewer_thumbs, but uses the grid geometry manager to try to achieve
a more uniform layout; can generally achieve the same with frames and pack
if buttons are all fixed and uniform in size;
"""
import sys, math
from tkinter import *
from PIL.ImageTk import PhotoImage
from viewer_thumbs import makeThumbs, ViewOne
def viewer(imgdir, kind=Toplevel, cols=None):
"""
custom version that uses gridding
"""
win = kind()
win.title('Viewer: ' + imgdir)
thumbs = makeThumbs(imgdir)
if not cols:
cols = int(math.ceil(math.sqrt(len(thumbs)))) # fixed or N x N
rownum = 0
savephotos = []
while thumbs:
thumbsrow, thumbs = thumbs[:cols], thumbs[cols:]
colnum = 0
for (imgfile, imgobj) in thumbsrow:
photo = PhotoImage(imgobj)
link = Button(win, image=photo)
handler = lambda savefile=imgfile: ViewOne(imgdir, savefile)
link.config(command=handler)
link.grid(row=rownum, column=colnum)
savephotos.append(photo)
colnum += 1
rownum += 1
Button(win, text='Quit', command=win.quit).grid(columnspan=cols, stick=EW)
return win, savephotos
if __name__ == '__main__':
imgdir = (len(sys.argv) > 1 and sys.argv[1]) or 'images'
main, save = viewer(imgdir, kind=Tk)
main.mainloop()
| simontakite/sysadmin | pythonscripts/programmingpython/Gui/PIL/viewer-thumbs-grid.py | Python | gpl-2.0 | 1,411 |
# coding: utf-8
from django.core.urlresolvers import reverse
from modoboa.lib.tests import ModoTestCase
from .. import factories
from ..models import Domain, DomainAlias, Alias
class DomainAliasTestCase(ModoTestCase):
@classmethod
def setUpTestData(cls):
"""Create test data."""
super(DomainAliasTestCase, cls).setUpTestData()
factories.populate_database()
cls.dom = Domain.objects.get(name='test.com')
def test_model(self):
dom = Domain.objects.get(name="test.com")
domal = DomainAlias()
domal.name = "domalias.net"
domal.target = dom
domal.save()
self.assertEqual(dom.domainalias_count, 1)
self.assertTrue(
Alias.objects.filter(
address="@{}".format(domal.name)).exists())
domal.name = "domalias.org"
domal.save()
domal.delete()
self.assertFalse(
Alias.objects.filter(
address="@{}".format(domal.name)).exists())
def test_form(self):
dom = Domain.objects.get(name="test.com")
values = dict(
name=dom.name, quota=dom.quota, enabled=dom.enabled,
aliases="domalias.net", aliases_1="domaliasé.com",
type="domain"
)
self.ajax_post(
reverse("admin:domain_change",
args=[dom.id]),
values
)
self.assertEqual(dom.domainalias_set.count(), 2)
del values["aliases_1"]
self.ajax_post(
reverse("admin:domain_change",
args=[dom.id]),
values
)
self.assertEqual(dom.domainalias_set.count(), 1)
| carragom/modoboa | modoboa/admin/tests/test_domain_alias.py | Python | isc | 1,687 |
"""Functions for Plotting the dF states.
To assess the quality of the free energy estimation, The dF between adjacent
lambda states can be ploted to assess the quality of the estimation.
The code for producing the dF states plot is modified based on
`Alchemical Analysis <https://github.com/MobleyLab/alchemical-analysis>`_.
"""
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties as FP
import numpy as np
from ..estimators import TI, BAR, MBAR
from ..postprocessors.units import get_unit_converter
def plot_dF_state(estimators, labels=None, colors=None, units='kT',
orientation='portrait', nb=10):
'''Plot the dhdl of TI.
Parameters
----------
estimators : :class:`~alchemlyb.estimators` or list
One or more :class:`~alchemlyb.estimators`, where the
dhdl value will be taken from. For more than one estimators
with more than one alchemical transformation, a list of list format
is used.
labels : List
list of labels for labelling different estimators.
colors : List
list of colors for plotting different estimators.
units : str
The unit of the estimate. Default: "kT"
orientation : string
The orientation of the figure. Can be `portrait` or `landscape`
nb : int
Maximum number of dF states in one row in the `portrait` mode
Returns
-------
matplotlib.figure.Figure
An Figure with the dF states drawn.
Note
----
The code is taken and modified from
`Alchemical Analysis <https://github.com/MobleyLab/alchemical-analysis>`_.
.. versionchanged:: 0.5.0
The `units` will be used to change the underlying data instead of only
changing the figure legend.
.. versionadded:: 0.4.0
'''
try:
len(estimators)
except TypeError:
estimators = [estimators, ]
formatted_data = []
for dhdl in estimators:
try:
len(dhdl)
formatted_data.append(dhdl)
except TypeError:
formatted_data.append([dhdl, ])
estimators = formatted_data
# Get the dF
dF_list = []
error_list = []
max_length = 0
convert = get_unit_converter(units)
for dhdl_list in estimators:
len_dF = sum([len(dhdl.delta_f_) - 1 for dhdl in dhdl_list])
if len_dF > max_length:
max_length = len_dF
dF = []
error = []
for dhdl in dhdl_list:
for i in range(len(dhdl.delta_f_) - 1):
dF.append(convert(dhdl.delta_f_).iloc[i, i + 1])
error.append(convert(dhdl.d_delta_f_).iloc[i, i + 1])
dF_list.append(dF)
error_list.append(error)
# Get the determine orientation
if orientation == 'landscape':
if max_length < 8:
fig, ax = plt.subplots(figsize=(8, 6))
else:
fig, ax = plt.subplots(figsize=(max_length, 6))
axs = [ax, ]
xs = [np.arange(max_length), ]
elif orientation == 'portrait':
if max_length < nb:
xs = [np.arange(max_length), ]
fig, ax = plt.subplots(figsize=(8, 6))
axs = [ax, ]
else:
xs = np.array_split(np.arange(max_length), max_length / nb + 1)
fig, axs = plt.subplots(nrows=len(xs), figsize=(8, 6))
mnb = max([len(i) for i in xs])
else:
raise ValueError("Not recognising {}, only supports 'landscape' or 'portrait'.".format(orientation))
# Sort out the colors
if colors is None:
colors_dict = {'TI': '#C45AEC', 'TI-CUBIC': '#33CC33',
'DEXP': '#F87431', 'IEXP': '#FF3030', 'GINS': '#EAC117',
'GDEL': '#347235', 'BAR': '#6698FF', 'UBAR': '#817339',
'RBAR': '#C11B17', 'MBAR': '#F9B7FF'}
colors = []
for dhdl in estimators:
dhdl = dhdl[0]
if isinstance(dhdl, TI):
colors.append(colors_dict['TI'])
elif isinstance(dhdl, BAR):
colors.append(colors_dict['BAR'])
elif isinstance(dhdl, MBAR):
colors.append(colors_dict['MBAR'])
else:
if len(colors) >= len(estimators):
pass
else:
raise ValueError(
'Number of colors ({}) should be larger than the number of data ({})'.format(
len(colors), len(estimators)))
# Sort out the labels
if labels is None:
labels = []
for dhdl in estimators:
dhdl = dhdl[0]
if isinstance(dhdl, TI):
labels.append('TI')
elif isinstance(dhdl, BAR):
labels.append('BAR')
elif isinstance(dhdl, MBAR):
labels.append('MBAR')
else:
if len(labels) == len(estimators):
pass
else:
raise ValueError(
'Length of labels ({}) should be the same as the number of data ({})'.format(
len(labels), len(estimators)))
# Plot the figure
width = 1. / (len(estimators) + 1)
elw = 30 * width
ndx = 1
for x, ax in zip(xs, axs):
lines = []
for i, (dF, error) in enumerate(zip(dF_list, error_list)):
y = [dF[j] for j in x]
ye = [error[j] for j in x]
if orientation == 'landscape':
lw = 0.1 * elw
elif orientation == 'portrait':
lw = 0.05 * elw
line = ax.bar(x + len(lines) * width, y, width,
color=colors[i], yerr=ye, lw=lw,
error_kw=dict(elinewidth=elw, ecolor='black',
capsize=0.5 * elw))
lines += (line[0],)
for dir in ['left', 'right', 'top', 'bottom']:
if dir == 'left':
ax.yaxis.set_ticks_position(dir)
else:
ax.spines[dir].set_color('none')
if orientation == 'landscape':
plt.yticks(fontsize=8)
ax.set_xlim(x[0]-width, x[-1] + len(lines) * width)
plt.xticks(x + 0.5 * width * len(estimators),
tuple(['%d--%d' % (i, i + 1) for i in x]), fontsize=8)
elif orientation == 'portrait':
plt.yticks(fontsize=10)
ax.xaxis.set_ticks([])
for i in x + 0.5 * width * len(estimators):
ax.annotate(r'$\mathrm{%d-%d}$' % (i, i + 1), xy=(i, 0),
xycoords=('data', 'axes fraction'), xytext=(0, -2),
size=10, textcoords='offset points', va='top',
ha='center')
ax.set_xlim(x[0]-width, x[-1]+len(lines)*width + (mnb - len(x)))
ndx += 1
x = np.arange(max_length)
ax = plt.gca()
for tick in ax.get_xticklines():
tick.set_visible(False)
if orientation == 'landscape':
leg = plt.legend(lines, labels, loc=3, ncol=2, prop=FP(size=10),
fancybox=True)
plt.title('The free energy change breakdown', fontsize=12)
plt.xlabel('States', fontsize=12, color='#151B54')
plt.ylabel(r'$\Delta G$ ({})'.format(units), fontsize=12, color='#151B54')
elif orientation == 'portrait':
leg = ax.legend(lines, labels, loc=0, ncol=2,
prop=FP(size=8),
title=r'$\Delta G$ ({})'.format(units) +
r'$\mathit{vs.}$ lambda pair',
fancybox=True)
leg.get_frame().set_alpha(0.5)
return fig
| alchemistry/alchemlyb | src/alchemlyb/visualisation/dF_state.py | Python | bsd-3-clause | 7,613 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Error(Model):
"""Cognitive Services error object.
:param error: The error body.
:type error: :class:`ErrorBody
<azure.mgmt.cognitiveservices.models.ErrorBody>`
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorBody'},
}
def __init__(self, error=None):
self.error = error
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
| v-iam/azure-sdk-for-python | azure-mgmt-cognitiveservices/azure/mgmt/cognitiveservices/models/error.py | Python | mit | 1,252 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 30 01:14:12 2017
@author: colm
"""
from alchemy_server import app
if __name__ == "__main__":
app.run() | colmcoughlan/alchemy-server | wsgi.py | Python | mit | 179 |
##########################################################################
#
# Copyright (c) 2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
class ConfigLoaderTest( unittest.TestCase ) :
def testLoadConfig( self ) :
config = {}
IECore.loadConfig(
IECore.SearchPath( os.path.dirname( __file__ ) + "/config/orderOne" ),
contextDict = { "config" : config },
)
self.assertEqual( config["a"], 1 )
def testOrder( self ) :
config = {}
IECore.loadConfig(
IECore.SearchPath( [
os.path.dirname( __file__ ) + "/config/orderTwo",
os.path.dirname( __file__ ) + "/config/orderOne",
] ),
contextDict = { "config" : config },
)
self.assertEqual( config["a"], 2 )
def testIgnoreExceptions( self ) :
config = {}
m = IECore.CapturingMessageHandler()
with m :
IECore.loadConfig(
IECore.SearchPath( [
os.path.dirname( __file__ ) + "/config/orderOne",
os.path.dirname( __file__ ) + "/config/exceptions",
] ),
contextDict = { "config" : config },
raiseExceptions = False
)
errors = [ msg for msg in m.messages if msg.level == IECore.Msg.Level.Error ]
self.assertEqual( len( errors ), 1 )
self.assertEqual( errors[0].level, IECore.Msg.Level.Error )
self.failUnless( "I am a very naughty boy" in errors[0].message )
self.assertEqual( config["a"], 1 )
def testThrowExceptions( self ) :
config = {}
self.assertRaises(
RuntimeError,
IECore.loadConfig,
IECore.SearchPath( [
os.path.dirname( __file__ ) + "/config/orderOne",
os.path.dirname( __file__ ) + "/config/exceptions",
] ),
contextDict = { "config" : config },
raiseExceptions = True
)
self.failIf( "a" in config )
def testScope( self ) :
config = {}
IECore.loadConfig(
IECore.SearchPath( os.path.dirname( __file__ ) + "/config/scope" ),
contextDict = { "config" : config },
raiseExceptions = True
)
config["functionToCallLater"]()
def testIgnoreFiles( self ) :
config = {}
IECore.loadConfig(
IECore.SearchPath( os.path.dirname( __file__ ) + "/config/ignoreFiles" ),
contextDict = { "config" : config },
)
self.failIf( "tildeConfigRan" in config )
self.failIf( "notDotPyRan" in config )
self.assertEqual( config["a"], 1000 )
def testOrderWithinDirectory( self ) :
os.utime( os.path.dirname( __file__ ) + "/config/orderDir/a.py", None )
config = {}
IECore.loadConfig(
IECore.SearchPath( os.path.dirname( __file__ ) + "/config/orderDir" ),
contextDict = { "config" : config },
)
self.assertEqual( config["lastRun"], "b" )
def testSubdirectory( self ) :
config = {}
IECore.loadConfig(
IECore.SearchPath( os.path.dirname( __file__ ) + "/config" ),
contextDict = { "config" : config },
subdirectory = "orderDir",
)
self.assertTrue( "lastRun" in config )
self.assertFalse( "a" in config )
def testSearchPathAsEnvVar( self ) :
os.environ["IECORE_CONFIGLOADERTEST_PATHS"] = "%s:%s" % (
os.path.dirname( __file__ ) + "/config/orderOne",
os.path.dirname( __file__ ) + "/config/orderTwo"
)
config = {}
IECore.loadConfig(
"IECORE_CONFIGLOADERTEST_PATHS",
contextDict = { "config" : config },
)
self.assertEqual( config["a"], 1 )
os.environ["IECORE_CONFIGLOADERTEST_PATHS"] = "%s:%s" % (
os.path.dirname( __file__ ) + "/config/orderTwo",
os.path.dirname( __file__ ) + "/config/orderOne"
)
config = {}
IECore.loadConfig(
"IECORE_CONFIGLOADERTEST_PATHS",
contextDict = { "config" : config },
)
self.assertEqual( config["a"], 2 )
def testFile( self ) :
config = {}
path = os.path.dirname( __file__ ) + "/config/getFile"
IECore.loadConfig(
IECore.SearchPath( path ),
contextDict = { "config" : config },
)
expectedFile = os.path.abspath( os.path.join( path, "config.py" ) )
self.assertEqual( config["myFile"], expectedFile )
def testDuplicatePathsIgnored( self ) :
config = {}
IECore.loadConfig(
IECore.SearchPath( [
os.path.dirname( __file__ ) + "/config/orderOne",
os.path.dirname( __file__ ) + "/config/orderTwo",
os.path.dirname( __file__ ) + "/config/orderOne",
] ),
contextDict = { "config" : config },
)
self.assertEqual( config["a"], 2 )
def testConfigIsolation( self ) :
IECore.loadConfig(
IECore.SearchPath( [
os.path.dirname( __file__ ) + "/config/isolation",
] ),
raiseExceptions = True
)
IECore.loadConfig(
IECore.SearchPath( [
os.path.dirname( __file__ ) + "/config/isolation",
] ),
{},
raiseExceptions = True
)
if __name__ == "__main__":
unittest.main()
| appleseedhq/cortex | test/IECore/ConfigLoaderTest.py | Python | bsd-3-clause | 6,281 |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'python-socketio'
copyright = '2018, Miguel Grinberg'
author = 'Miguel Grinberg'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'github_user': 'miguelgrinberg',
'github_repo': 'python-socketio',
'github_banner': True,
'github_button': True,
'github_type': 'star',
'fixed_sidebar': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-socketiodoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'python-socketio.tex', 'python-socketio Documentation',
'Miguel Grinberg', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'python-socketio', 'python-socketio Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'python-socketio', 'python-socketio Documentation',
author, 'python-socketio', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| miguelgrinberg/python-socketio | docs/conf.py | Python | mit | 5,500 |
from .vminute import main
import sys
def main_main():
main(sys.argv[1:])
| SatelliteQE/5minute | vminute/__init__.py | Python | gpl-2.0 | 79 |
from django import forms
from django.core import validators
def must_be_empty(value):
if value:
raise forms.ValidationError('is not empty')
class SuggestionForm(forms.Form):
name = forms.CharField()
email = forms.EmailField()
verify_email = forms.EmailField(label="Please verify your email address")
suggestions = forms.CharField(widget=forms.Textarea)
honeypot = forms.CharField(required=False,
widget=forms.HiddenInput,
label="Leave empty",
validators=[must_be_empty]
)
def clean(self):
cleaned_data = super().clean()
email = cleaned_data['email']
verify = cleaned_data['verify_email']
if email != verify:
raise forms.ValidationError(
"You need to enter the same email in both fields"
) | davejlin/treehouse | python/django/learning_site_forms/learning_site/forms.py | Python | unlicense | 924 |
"""
Cron tasks
==========
This module provides tools to manage periodic tasks using cron.
"""
from future.utils import iteritems
def add_task(name, timespec, user, command, environment=None):
"""
Add a cron task.
The *command* will be run as *user* periodically.
You can use any valid `crontab(5)`_ *timespec*, including the
``@hourly``, ``@daily``, ``@weekly``, ``@monthly`` and ``@yearly``
shortcuts.
You can also provide an optional dictionary of environment variables
that should be set when running the periodic command.
Examples::
from fabtools.cron import add_task
# Run every month
add_task('cleanup', '@monthly', 'alice', '/home/alice/bin/cleanup.sh')
# Run every tuesday and friday at 5:30am
add_task('reindex', '30 5 * * 2,4', 'bob', '/home/bob/bin/reindex.sh')
.. _crontab(5): http://manpages.debian.net/cgi-bin/man.cgi?query=crontab&sektion=5
"""
if environment is None:
environment = {}
lines = []
# Write optional environment variables first
for (key, value) in iteritems(environment):
lines.append('%(key)s=%(value)s\n' % locals())
# Write the main crontab line
lines.append('%(timespec)s %(user)s %(command)s\n' % locals())
from fabtools.require.files import file as require_file
require_file(
path='/etc/cron.d/%(name)s' % locals(),
contents=''.join(lines),
owner='root',
mode='0644',
use_sudo=True,
)
def add_daily(name, user, command, environment=None):
"""
Shortcut to add a daily cron task.
Example::
import fabtools
# Run every day
fabtools.cron.add_daily('backup', 'root', '/usr/local/bin/backup.sh')
"""
add_task(name, '@daily', user, command, environment)
| wagigi/fabtools-python | fabtools/cron.py | Python | bsd-2-clause | 1,825 |
# Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
"""Chromium mothership functions
These functions are used in configuration files to direct the mothership.
Use these functions to describe your SPU network. The SPU network is a DAG
of nodes. Each node contains a SPU chain. At the top of the graph is an
CRApplicationNode. It connects to one or more CRNetworkNodes.
Public functions and classes:
CR: Main class that controls the mothership
SPU: Class that defines a Stream Processing Unit.
CRNetworkNode: Sub class of CRNode that defines a node in the SPU
graph that handles incoming and outgoing network
traffic.
CRApplicationNode:
Sub class of CRNode that defines the start of the the
SPU graph.
CRAddStartupCallback:
Add a callback to be called on cr.Go()
Other internal functions/classes:
CRNode: Base class that defines a node in the SPU graph
CRDebug: Used to print out debugging messages.
CROutput: Used to print messages to a logfile.
Fatal: Used to print out a debugging messages and exit.
MakeString: Converts a Python object to a string.
SockWrapper: Internal convenience class for handling sockets
"""
import sys, string, types, traceback, re, threading, os, socket, select, signal, pickle, copy, time
from crconfig import arch, crdir, crbindir, crlibdir
# Version string
Version = "1.9"
# Default port we'll listen on (also set in cr_net.h)
DefaultMothershipPort = 10000
# This controls whether info/debug messages are printed
# (0=none, 1=info, 2=info+debug)
DebugLevel = 1
# It seems these aren't defined in all versions of Python
True = 1
False = 0
# Some help in figuring out the domains of some non-qualified hostnames.
# See QualifyHostname() below.
HostPrefixPairs = [
('iam','psc.edu'),
('tg-v','uc.teragrid.org')
]
def CRSetDebugLevel(level):
global DebugLevel
DebugLevel = level
def CRInfo( str ):
"""CRInfo(str)
Prints informational messages to stderr."""
global DebugLevel
if DebugLevel >= 1:
print >> sys.stderr, str
def CRDebug( str ):
"""CRDebug(str)
Prints debugging message to stderr."""
global DebugLevel
if DebugLevel >= 2:
print >> sys.stderr, str
def CROutput( str ):
"""CROutput(str)
Prints message to logfile."""
filename = os.environ.get("CR_PERF_MOTHERSHIP_LOGFILE")
if filename:
f = open(filename, "a")
if f:
f.write("%s\n" % str )
f.close()
else:
CRDebug("Unable to open performance monitoring log file %s\n" % file)
else:
CRDebug("NO Performance Logfile set, check CR_PERF_MOTHERSHIP_LOGFILE")
def CRAddStartupCallback( cb ):
"""CRAddStartupCallback( cb )
Causes cb(thisCR) to be called from thisCR.Go()."""
CR.startupCallbacks.append(cb)
allSPUs = {}
def Fatal( str ):
"""Fatal(str)
Prints debugging message to stderr and skeddadles."""
print >> sys.stderr, str
sys.exit(-1)
def MakeString( x ):
"""MakeString(x)
Converts an object to a string"""
if type(x) == types.StringType:
return x
else:
return repr(x)
def SameHost( host1, host2 ):
"""Return 1 if host1 and host2 name the same host. Return 0 otherwise.
For example, if host1='foo' and host2='foo.bar.com' we'll return 1.
"""
try:
if host1 == host2 or socket.gethostbyname(host1) == socket.gethostbyname(host2):
return 1
else:
return 0
except socket.gaierror:
if string.split(host1,".")[0] == string.split(host2,".")[0]:
return 1
else:
return 0
# Constraint tests. These are used to match hosts, either statically
# or dynamically. Each test must define whether it is appropriate
# for use with static host matching or dynamic host matching.
STATIC_CONSTRAINT = True
DYNAMIC_CONSTRAINT = False
ConstraintTests = { }
def NameConstraint(testName, matchName):
return SameHost(string.lower(testName), string.lower(matchName))
ConstraintTests["name"] = (NameConstraint, STATIC_CONSTRAINT)
def DynamicConstraint(testName, dummy):
return 1
ConstraintTests["dynamic"] = (DynamicConstraint, DYNAMIC_CONSTRAINT)
def RegexConstraint(testName, pattern):
return re.search(pattern, testName)
ConstraintTests["regex"] = (RegexConstraint, DYNAMIC_CONSTRAINT)
def RegexFullConstraint(testName, pattern):
fullName = QualifyHostname(testName)
return re.search(pattern, fullName)
ConstraintTests["regex_full"] = (RegexFullConstraint, DYNAMIC_CONSTRAINT)
def PatternConstraint(testName, compiledPattern):
return compiledPattern.search(testName)
ConstraintTests["pattern"] = (PatternConstraint, DYNAMIC_CONSTRAINT)
def PatternFullConstraint(testName, compiledPattern):
fullName = QualifyHostname(testName)
return compiledPattern.search(fullName)
ConstraintTests["pattern_full"] = (PatternFullConstraint, DYNAMIC_CONSTRAINT)
def MatchDynamicConstraints(node, hostToMatch):
for (constraintName, constraintArg) in node.constraints:
(testFunction, constraintType) = ConstraintTests[constraintName]
if not testFunction(hostToMatch, constraintArg):
return 0
return 1
def MatchStaticConstraints(node, hostToMatch):
for (constraintName, constraintArg) in node.constraints:
(testFunction, constraintType) = ConstraintTests[constraintName]
if constraintType != STATIC_CONSTRAINT or not testFunction(hostToMatch, constraintArg):
return 0
return 1
# This structure will contain a list of all dynamic host indicators
# found during definition; they will be assigned as servers come in
# through the MatchNode() routine (following).
dynamicHosts = { }
# This structure will contain an entry for every dynamic host
# indicator that has not yet been resolved. It is used to
# know when the main application (which needs a list of all
# servers) may continue.
dynamicHostsNeeded = { }
def MatchStaticNode(node, hostToMatch):
return MatchStaticConstraints(node, hostToMatch)
def MatchResolvedNode(node, hostToMatch):
if dynamicHosts.has_key(node.host):
return SameHost(string.lower(dynamicHosts[node.host]), string.lower(hostToMatch))
else:
return 0
# Only the "grandmothership" may resolve nodes.
def ResolveNode(node, hostToMatch):
dynamicHosts[node.host] = hostToMatch
try:
del dynamicHostsNeeded[node.host]
except:
pass
def MatchUnresolvedNode(node, hostToMatch):
if MatchDynamicConstraints(node, hostToMatch):
ResolveNode(node, hostToMatch)
return 1
else:
return 0
def QualifyHostname( host ):
"""Converts host to a fully qualified domain name.
Basicially, look if 'host' contains a dot. If not, search the
HostPrefixPairs list to find a suitable domain to append onto
the hostname."""
if string.find(host, '.') >= 0:
# OK as-is
return host
else:
# try to find a matching prefix in HostPrefixPairs list
for (prefix, domain) in HostPrefixPairs:
if string.find(host, prefix) == 0:
return host + '.' + domain
# Look if the CR_DEFAULT_DOMAIN env var is set
domain = os.environ.get("CR_DEFAULT_DOMAIN")
if domain:
return host + '.' + domain
# finally, try using the socket.getfqdn() function
return socket.getfqdn(host)
class SPU:
"""Main class that defines a Stream Processing Unit.
public functions:
Conf: Sets a key/value list in this SPU's configuration
AddServer: Tells a client node where to find its server.
AddDisplay: Adds a 'display' to the list of displays (for tilesort)
TileLayoutFunction: Registers a function to call when this SPU is
asked for a new tile layout.
"""
def __init__( self, name ):
"""Creates a SPU with the given name."""
self.name = name
self.config = {}
self.clientargs = []
self.servers = []
self.layoutFunction = None
self.displays = []
def Conf( self, key, *values ):
"""Set a SPU configuration option."""
# XXX we'll eventually force values to be a single value or a list!
if type(values) == types.TupleType and len(values) > 1:
print "***WARNING: Obsolete syntax detected in Conf('%s', ...)!" % key
print "***WARNING: Put brackets around N-element values (i.e. Python list syntax)."
if len(values) > 1:
self.config[key] = list(values)
else:
self.config[key] = values[0]
def __add_server( self, node, formatURL ):
self.servers.append( (node, formatURL) )
def AddServer( self, node, protocol='tcpip', port=7000 ):
"""AddServer(node, protocol='tcpip', port=7000)
Associates a server with an SPU and tells it how to connect to it.
The SPU will typically be a pack SPU or tilesort SPU.
"""
if (protocol.startswith('file') or protocol.startswith('swapfile')):
self.__add_server( node, "%s" % protocol )
# Don't tell the server "node" about this.
else:
# XXX use node.host or node.ipaddr here??? (BP)
# Note that this is a format that will be later converted;
# if there's a dynamic host reference, we cannot convert it now.
self.__add_server( node, "%s://%%(host)s:%d" % (protocol, port) )
# use this for tcp/ip : send hostname rather than ip
# (waiting for getaddrinfo, for probing which one is
# available)
if node != None:
node.Conf( 'port', port )
node.AddClient( self, protocol )
def AddDisplay(self, display_id, w, h, align_matrix, align_matrix_inv):
"""AddDisplay(display_id, w, h, align_matrix, align_matrix_inv)
Adds a display with a given id and size to spu, for the
tilesort SPU"""
self.displays.append( (display_id, w, h, align_matrix, align_matrix_inv) )
def TileLayoutFunction( self, layoutFunc ):
"""Set the tile layout callback function for a tilesort SPU."""
# Set the tile layout function for a tilesort SPU
assert self.name == "tilesort"
self.layoutFunction = layoutFunc
class CRNode:
"""Base class that defines a node in the SPU graph
public functions:
Rank: Sets the node's rank.
AddSPU: Adds a SPU to the front of the SPU chain.
SPUDir: Sets the directory SPUs start in.
AutoStart: Pass this method a string to start the process
associated with this CRNode from the mothership.
You can pass a list of strings as the argument
for use in os.spawnv() or a single string which
will be split into a list. Make sure the first
thing you pass is the full path to the executable.
Examples:
CRNode dummy( 'jimbobsbox' )
dummy.AutoStart( "/usr/bin/ssh jimbobsbox crserver" )
CRNode dummy( 'matilda' )
dummy.AutoStart( ["/usr/bin/ssh", "matilda", "setenv FILE /Poorly Named/Data.1 ; crserver "] )
"""
SPUIndex = 0
def __init__( self, host, constraint = "name", constraintArg = None ):
"""CRNode(host)
Creates a node on the given "host"."""
if (host == 'localhost'):
host = socket.getfqdn()
self.host = host
self.SPUs = []
self.spokenfor = 0
self.spusloaded = 0
self.config = {}
self.alias = host
self.autostart = ""
self.autostart_argv = []
self.dynamic_host = False
self.nodeIndex = -1 # set when added to a CR
self.crut_spokenfor = 0
# Add the default constraint to the node.
self.constraints = []
if constraintArg == None:
constraintArg = self.host
self.AddConstraint(constraint, constraintArg)
def Alias( self, name ):
self.alias = name
def Rank( self, rank ):
"""Rank(rank)
Sets the node's rank."""
self.config['rank'] = str( rank )
def AddSPU( self, spu ):
"""AddSPU(spu)
Adds the given SPU to the tail of the SPU chain."""
self.SPUs.append( spu )
spu.ID = CRNode.SPUIndex
spu.node = self
CRNode.SPUIndex += 1
allSPUs[spu.ID] = spu
def Conf( self, key, value ):
"""Sets a key/value list in this node's configuration"""
self.config[key] = value
def SPUDir( self, dir ):
"""SPUDir(dir)
Sets the directory that SPUs start in."""
self.Conf('spu_dir', dir)
def AutoStart( self, program ):
if type( program ) == types.StringType:
self.autostart_argv = string.split( program )
self.autostart = self.autostart_argv[0]
else:
self.autostart_argv = program
self.autostart = program[0]
def SetPosition(self, x, y):
# not used by mothership, set by graphical config tool
pass
def AddConstraint(self, constraint, arg = None):
# Make sure it's a valid constraint
try:
(testFunction, constraintType) = ConstraintTests[constraint]
except:
print "***WARNING: unknown constraint '%s' on host '%s' ignored" % (constraint, self.host)
return
if constraintType != STATIC_CONSTRAINT:
self.dynamic_host = True
dynamicHostsNeeded[self.host] = 1
self.constraints.append( (constraint, arg) )
def GetClients( self ):
"""Return list of (spu, protocol) tuples who are clients of
this node. The CRNetworkNode class will override this."""
return None
class CRNetworkNode(CRNode):
"""Sub class of CRNode that defines a node in the SPU graph that
handles incoming and outgoing network traffic.
public functions:
Conf: Sets a key/value list in this node's configuration
AddClient: Adds a SPU to the list of clients.
FileClient: Add a file-readback client
AddTile: Adds a tile to the list of tiles
AddTileToDisplay: Adds a tile to a specified collection of tiles (a display)
"""
def __init__( self, host='localhost', constraint = "name", constraintArg = None ):
"""CRNetworkNode(host='localhost')
Creates a network node for the given "host"."""
CRNode.__init__(self,host,constraint,constraintArg)
self.clients = [] # list of SPUs
self.file_clients = [] # list of "file://name" URLs
self.tiles = [] # list of (x,y,w,h) tuples
self.tiles_on_displays = []
def AddClient( self, spu, protocol ):
"""AddClient(spu, protocol)
Adds a spu, communicating with "protocol", to the list of clients."""
self.clients.append( (spu, protocol) )
def GetClients( self ):
"""Return list of (spu, protocol) tuples who are clients of
this node."""
return self.clients
def FileClient( self, fname ):
"""FileClient(node, fname)
Adds a file-readback client link from the named file."""
self.file_clients.append( "file://%s" % fname )
def AddTile( self, x, y, w, h ):
"""AddTile(x, y, w, h)
Defines a tile with the given geometry to be used by a
tilesort SPU.
"""
self.tiles.append( (x,y,w,h) )
def AddTileToDisplay( self, display_id, x, y, w, h ):
"""AddTileToDisplay(display_id, x, y, w, h)
Similar to AddTile, but for use with specifing displays.
Note that (x, y) are relative to the origin of the
display, not the mural!
"""
self.tiles_on_displays.append( (display_id,x,y,w,h) )
class CRVNCServerNode(CRNode):
"""This class is used for VNC/Replicate SPU configurations.
The config file should create one of these - it'll be shared by all
vncviewers that might be run."""
def __init__(self):
"""Create a new CR VNC Server node."""
CRNode.__init__(self, host="anyhost")
class CRUTServerNode(CRNode):
"""Sub class of CRNode that defines a node in the SPU graph that
handles outgoing network traffic for events.
public functions:
Conf: Sets a key/value list in this node's configuration
AddCRUTClient: Adds a client to the list of crutclients.
"""
def __init__( self, host='localhost', constraint = "name", constraintArg = None ):
"""CRUTServerNode(host='localhost')
Creates a network node for the given "host"."""
CRNode.__init__(self,host,constraint,constraintArg)
self.crutclients = []
#A crutserver will be creating events, it should be the only server
def __add_crut_client( self, node, url ):
self.crutclients.append( (node, url) )
def AddCRUTClient( self, node, protocol='tcpip', port=9000 ):
"""AddCRUTClient(node, protocol='tcpip', port=9000)
Tells a crutserver node where to find a client."""
self.__add_crut_client( node, "%s://%s:%d" % (protocol,node.host,port) )
class CRUTProxyNode(CRNode):
"""Sub class of CRNode that defines a node in the SPU graph that
handles incoming and outgoing network traffic for events.
public functions:
Conf: Sets a key/value list in this node's configuration
AddCRUTClient: Adds a client to the list of clients.
"""
def __init__( self, host='localhost', constraint = "name", constraintArg = None ):
"""CRUTProxyNode(host='localhost')
Creates a network node for the given "host"."""
CRNode.__init__(self,host,constraint,constraintArg)
self.crutclients = []
self.crutservers = []
def __add_crut_client( self, node, url ):
self.crutclients.append( (node, url) )
def AddCRUTClient( self, node, protocol='tcpip', port=9000 ):
"""AddCRUTClient(node, protocol='tcpip', port=9000)
Tells a crutproxy node where to find a client."""
self.__add_crut_client( node, "%s://%s:%d" % (protocol,node.host,port) )
def __add_crut_server( self, node, url ):
self.crutservers.append( (node, url) )
def AddCRUTServer( self, node, protocol='tcpip', port=9000 ):
self.__add_crut_server( node, "%s://%s:%d" % (protocol,node.host,port) )
if node != None:
node.AddCRUTClient( self, protocol, port)
class CRApplicationNode(CRNode):
"""Sub class of CRNode that defines the start of the the SPU graph.
public functions:
SetApplication: Sets the application that generates the OpenGL.
StartDir: Sets the starting directory of the app.
ClientDLL: Sets the DLL of the client.
"""
AppID = 0
def __init__( self, host='localhost', constraint = "name", constraintArg = None ):
"""CRApplicationNode(host='localhost')
Creates an application node for the given "host"."""
CRNode.__init__(self, host,constraint,constraintArg)
self.crutservers = []
self.crutclients = []
self.id = CRApplicationNode.AppID
CRApplicationNode.AppID += 1
self.Conf('start_dir', '.')
def SetApplication( self, app ):
"""SetApplication(name)
Sets the name of the application that's run."""
self.Conf('application', app)
def StartDir( self, dir ):
"""SetApplication(dir)
Sets the directory the application starts in."""
self.Conf('start_dir', dir)
def ClientDLL( self, dir ):
"""Set the directory to search for the crfaker library."""
self.Conf('client_dll', dir)
def __add_crut_client( self, node, url ):
self.crutclients.append( (node, url) )
def AddCRUTClient( self, node, protocol='tcpip', port=9000 ):
"""AddCRUTClient(node, protocol='tcpip', port=9000)
Tells a crutserver node where to find a client."""
self.__add_crut_client( node, "%s://%s:%d" % (protocol,node.host,port) )
def __add_crut_server( self, node, url ):
self.crutservers.append( (node, url) )
def AddCRUTServer( self, node, protocol='tcpip', port=9000 ):
self.__add_crut_server( node, "%s://%s:%d" % (protocol,node.host,port) )
if node != None:
node.AddCRUTClient( self, protocol, port)
class SockWrapper:
"""Internal convenience class for handling sockets"""
NOERROR_MORE = 100
NOERROR = 200
UNKNOWNHOST = 400
NOTHINGTOSAY = 401
UNKNOWNCOMMAND = 402
UNKNOWNSPU = 403
UNKNOWNPARAM = 404
UNKNOWNSERVER = 405
UNKNOWNPROTOCOL = 406
NOAPPLICATION = 407
INVALIDPARAM = 408
def __init__(self, sock):
self.sock = sock # A low-level socket object
self.file = sock.makefile( "r" )
self.SPUid = -1
self.node = None
# Info for brokered network connections. These are used to
# implement the 'acceptrequest' and 'connectrequest' routines.
# (hostname, port, endianness) of an outstanding accept request:
self.tcpip_accept_wait = None
# (hostname, port, endianness) of an outstanding connect request:
self.tcpip_connect_wait = None
# similar for other protocols:
self.sdp_accept_wait = None
self.sdp_connect_wait = None
self.ib_accept_wait = None
self.ib_connect_wait = None
self.gm_accept_wait = None
self.gm_connect_wait = None
self.teac_accept_wait = []
self.teac_connect_wait = []
self.tcscomm_accept_wait = []
self.tcscomm_connect_wait = []
def readline( self ):
return string.strip(self.file.readline())
def Send(self, str):
"""Append a newline to str and send it over the socket"""
self.sock.send( str + "\n" )
def Reply(self, code, s=None):
tosend = `code`
if s != None:
tosend += " " + str(s)
self.Send( tosend )
CRDebug( 'Replying (%d): "%s"' % ( code, s ) )
def Success( self, msg ):
"""Send a success message over the socket"""
self.Reply( SockWrapper.NOERROR, msg )
def MoreComing( self, msg ):
self.Reply( SockWrapper.NOERROR_MORE, msg )
def Failure( self, code, msg ):
"""Send a failure message over the socket"""
self.Reply( code, msg )
# Generic ways to map all known node capability types
NodeTypes = { } # key is a node type, like "faker" or "crutserver"
#
# Now, for each node type, insert a (validate, claim) tuple into the
# NodeTypes dictionary.
#
def FakerValidNode(node):
return (not node.spokenfor and isinstance(node, CRApplicationNode))
def FakerClaim(node, sock):
try:
application = node.config['application']
except:
if sock != None:
sock.Failure( SockWrapper.NOAPPLICATION, "Client node has no application!" )
return
node.spokenfor = 1
if sock != None:
sock.node = node
sock.Success( "%d %s" % (node.id, application) )
NodeTypes["faker"] = (FakerValidNode, FakerClaim)
def CrutProxyValidNode(node):
return (not node.spokenfor and isinstance(node, CRUTProxyNode))
def CrutProxyClaim(node, sock):
node.spokenfor = 1
if sock != None:
sock.node = node
sock.Success( " " )
NodeTypes["crutproxy"] = (CrutProxyValidNode, CrutProxyClaim)
def CrutServerValidNode(node):
return (not node.spokenfor and isinstance(node, CRUTServerNode))
def CrutServerClaim(node, sock):
node.spokenfor = 1
if sock != None:
sock.node = node
sock.Success( " " )
NodeTypes["crutserver"] = (CrutServerValidNode, CrutServerClaim)
# CRUTClients are different, in that they aren't unique nodes; they're
# a subset of application nodes (that identify themselves with the "crutclient" command)
def CrutClientValidNode(node):
return (not node.crut_spokenfor and isinstance(node, CRApplicationNode) and len(node.crutservers) > 0)
def CrutClientClaim(node, sock):
node.crut_spokenfor = 1
if sock != None:
sock.node = node
sock.Success( " " )
NodeTypes["crutclient"] = (CrutClientValidNode, CrutClientClaim)
def ServerValidNode(node):
return (not node.spokenfor and isinstance(node, CRNetworkNode))
def ServerClaim(node, sock):
node.spokenfor = 1
node.spusloaded = 1
if sock != None:
sock.node = node
spuchain = "%d" % len(node.SPUs)
for spu in node.SPUs:
spuchain += " %d %s" % (spu.ID, spu.name)
sock.Success( spuchain )
NodeTypes["server"] = (ServerValidNode, ServerClaim)
def VNCServerValidNode(node):
return isinstance(node, CRVNCServerNode)
def VNCServerClaim(node, sock):
# all servers can match one VNC server node
node.spusloaded = 1
if sock != None:
sock.node = node
spuchain = "%d" % len(node.SPUs)
for spu in node.SPUs:
spuchain += " %d %s" % (spu.ID, spu.name)
sock.Success( spuchain )
CRDebug("ServerClaim returning %s" % spuchain)
NodeTypes["vncserver"] = (VNCServerValidNode, VNCServerClaim)
class CRSpawner(threading.Thread):
"""A class used to start processes on nodes.
Since the mothership knows what should be running on each node, it
can start these processes if you tell it how to start a job remotely.
Each CRNode now has members named autostart and autostart_argv. If
you set these to non-null strings, the spawner will call os.spawnv()
with these values when the spawner's run() member is called.
The autostart member should be a string containing the full path
to an executable program to be run, i.e. "/usr/bin/ssh". The
autostart_argv member should be a vector containing the argument
list for the program, i.e.,
( "ssh", "mynode.mydotcom.com", "/usr/local/bin/crserver" )
NOTE: Yes, the program name should be the zeroth item in the list
of arguments, which means it is repeated.
To use this class, instantiate a CRSpawner object and call its
start() method. Call the waitForFinish() method if you want to
wait for the CRSpawner thread to exit/finish.
"""
def __init__( self, nodes, branches=0, maxnodes=1):
self.maxnodes = maxnodes
self.branches = branches
self.nodes = []
self.count = 0
for node in nodes:
self.nodes.append( node )
self.count = self.count + 1
threading.Thread.__init__(self)
def run( self ):
if self.branches < 2 or self.count <= self.maxnodes:
# This thread will sequentially spawn all listed nodes.
for node in self.nodes:
if node.autostart != "":
p = os.spawnv( os.P_NOWAIT, node.autostart, node.autostart_argv )
CRInfo("Autostart for node %s: %s" % (node.host, str(node.autostart_argv)))
else:
if isinstance(node, CRNetworkNode):
CRInfo("Start a crserver on %s" % node.host)
elif isinstance(node, CRUTServerNode):
CRInfo("Start a crutserver on %s" % node.host)
elif isinstance(node, CRUTProxyNode):
CRInfo("Start a crutproxy on %s" % node.host)
else:
CRInfo("Start a crappfaker on %s" % node.host)
else:
# We have more nodes than we want to handle in this
# thread. Instead of spawning processes, create new
# threads, and have those threads handle pieces of the
# nodes.
childsize = int((self.count + self.branches - 1)/self.branches)
for i in range(0, self.count, childsize):
child = CRSpawner(self.nodes[i:i+childsize], self.branches, self.maxnodes)
child.start()
#enddef
def waitForFinish(self):
"""This method won't return until this thread has completed."""
# NOTE: the join() method doesn't seem to do what we want.
while 1:
if self.isAlive():
time.sleep(1) # Wait a second, then try again
else:
return
#enddef
class CR:
"""Main class that controls the mothership
Most of the mothership network communication takes the form of
incoming strings that the mothership responds to with answer
strings. The do_* functions handle this communication language.
public functions:
AddNode: Adds a node to the SPU graph.
MTU: Sets the maximum communication buffer size.
Go: Starts the ball rolling.
AllSPUConf: Adds the key/values list to all SPUs' configuration.
Conf: Set a mothership parameter
GetConf: Return value of a mothership parameter
ContextRange: Sets the Quadrics context range.
NodeRange: Sets the Quadrics node range.
CommKey: Sets the Quadrics communication key
internal functions:
ProcessRequest: Handles an incoming request, mapping it to
an appropriate do_* function.
do_acceptrequest: Accepts the given socket.
do_clients: Sends the list of clients to a server.
do_connectrequest: Connects the given socket.
do_faker: Maps a faker app to an ApplicationNode.
do_opengldll: Identifies the application node in the graph.
do_rank: Sends the node's rank down.
do_disconnect: Disconnects from clients.
do_reset: Resets the mothership to its initial state.
do_server: Identifies the server in the graph.
do_vncserver: Identifies a new server for VNC replication.
do_serverids: Sends the list of server IDs.
do_serverparam: Sends the given server parameter.
do_fakerparam: Sends the given app faker parameter.
do_servers: Sends the list of servers.
do_servertiles: Sends the defined tiles for a server.
do_spu: Identifies a SPU.
do_spuparam: Sends the given SPU (or global) parameter.
do_tiles: Sends the defined tiles for a SPU.
do_setparam: Sets a mothership parameter value
do_getparam: Returns a mothership parameter value
do_logperf: Logs Performance Data to a logfile.
do_gettilelayout: Calls the user's LayoutTiles() function and returns
the list of new tiles.
do_getstatus: Returns information about the state of the nodes.
tileReply: Packages up a tile message for socket communication.
ClientDisconnect: Disconnects from a client
"""
startupCallbacks = []
def __init__( self ):
self.nodes = [] # list of all nodes
self.all_sockets = []
self.wrappers = {} # list of SockWrapper objects, indexed by socket no.
self.allSPUConf = []
self.daughters = []
self.conn_id = 1 # Next free connection ID number
self.enable_autostart = 1
self.config = {"MTU" : 1024 * 1024,
"low_context" : 32,
"high_context" : 35,
"low_node" : "iam0",
"high_node" : "iamvis20",
"comm_key": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
"autostart_branches": 0,
"autostart_max_nodes_per_thread": 1}
# This is set only on daughterships; "grandmotherships" don't
# have mothers, and never have pending resolutions.
self.mother = None
self.pendingResolution = [ ]
def AddNode( self, node ):
"""Adds a node to the Mothership."""
node.nodeIndex = len(self.nodes) # assign the node's ID now
self.nodes.append( node )
def Conf( self, key, value ):
"""Set a global mothership configuration value (via Python)"""
self.config[key] = value
def ContextRange( self, low_context, high_context ):
"""ContextRange( low_context, high_context )
Sets the context range to use with Elan."""
self.config["low_context"] = low_context
self.config["high_context"] = high_context
def NodeRange( self, low_node, high_node ):
"""NodeRange( low_node, high_node )
Sets the node range to use with Elan."""
period = low_node.find( "." )
if period != -1:
low_node = low_node[:period]
self.config["low_node"] = low_node
period = high_node.find( "." )
if period != -1:
high_node = high_node[:period]
self.config["high_node"] = high_node
def CommKey( self, byteList ):
"""CommKey( [byte0, byte1, ..., byte15] )
Sets the user key to use with Elan."""
self.config["comm_key"]= byteList
CRDebug("Setting comm key to %s"%str(byteList))
def AllSPUConf( self, regex, key, *values ):
"""AllSPUConf(regex, key, *values)
Adds the key/values list to the global SPU configuration."""
self.allSPUConf.append( (regex, key, map( MakeString, values) ) )
# XXX obsolete; use Conf('MTU', value) instead
def MTU( self, mtu ):
"""MTU(size)
Sets the maximum buffer size allowed in communication
between SPUs."""
self.Conf("MTU", mtu)
def FindSPUHost( self, spu ):
"""Seach all nodes to find the one that hosts the given SPU."""
for node in self.nodes:
for s in node.SPUs:
if s == spu:
return node
return None
def do_setparam( self, sock, args ):
"""Set a global mothership parameter value (via C)"""
params = args.split( " ", 1 )
key = params[0]
value = params[1]
self.Conf(key, value)
sock.Success( "OK" )
return
def do_getparam( self, sock, args ):
"""Get a global mothership parameter value (via C)"""
key = args
if not self.config.has_key(key):
response = ""
else:
response = str(self.config[key])
sock.Success( response )
return
def do_exit( self, sock, args ):
"""This is called in response to an 'exit' message from a client."""
raise KeyboardInterrupt
def get_mothership_port(self):
"""Get the mothership port. Use CRMOTHERSHIP env var if it's set,
otherwise return default value."""
# Port was not specified. Get it from
# CRMOTHERSHIP environment variable if possible..
PORT = DefaultMothershipPort # default value
if os.environ.has_key('CRMOTHERSHIP'):
motherString = os.environ['CRMOTHERSHIP']
loc = string.find(motherString,':')
if loc >= 0:
try:
PORT = int(motherString[loc+1:])
CRDebug("Using PORT %d"%PORT)
except Exception, val:
CRInfo("Could not parse port number from <%s>: %s"%(motherString,val))
CRInfo("Using default PORT!")
return PORT
def Go( self, PORT = -1 ):
"""Go(portNumber)
Starts the ball rolling.
This starts the mothership's event loop.
The optional parameter is the mothership port we'll listen on."""
if self.mother:
CRInfo("This is Chromium Daughtership, Version " + Version)
# You must always listen to your mother.
self.all_sockets.append(self.mother.sock)
self.wrappers[self.mother.sock] = self.mother
else:
CRInfo("This is Chromium, Version " + Version)
try:
if PORT == -1:
PORT = self.get_mothership_port()
for res in socket.getaddrinfo(None, PORT, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
(af, socktype, proto, canonname, sa) = res
try:
s = socket.socket( af, socktype )
except:
CRDebug( "Couldn't create socket of family %u, trying another one" % af )
continue
try:
s.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )
except:
CRDebug( "Couldn't set the SO_REUSEADDR option on the socket!" )
continue
try:
s.bind( sa )
except:
CRDebug( "Couldn't bind to port %d" % PORT )
continue
try:
s.listen(100)
except:
CRDebug( "Couldn't listen!" )
continue
#CRDebug( "Mothership ready" )
self.all_sockets.append(s)
# Call any callbacks which may have been
# set via CRAddStartupCallback()
for cb in CR.startupCallbacks:
cb(self)
# Create a single thread that will then go
# spawn nodes (for autostart nodes, this will
# actually start the servers or applications
# itself; for manual start nodes, a message
# will be printed directing the user to start
# the appropriate executable).
#
# This thread will either sequentially handle
# all nodes (by default, if autostart_branches=None)
# or will create a number of new threads (quite
# possibly recursively) to handle subsets of the
# nodes (if autostart_branches is greater than 1,
# this will be a tree of threads).
if self.enable_autostart:
spawner = CRSpawner(self.nodes, self.config['autostart_branches'], self.config['autostart_max_nodes_per_thread'])
spawner.start()
spawner.waitForFinish()
# If we're supposed to "phone home" with a signal, do so
# with the USR1 signal. This will happen when we're
# auto-starting on Linux - the OpenGL stub will wait
# until the mothership is going before attempting to
# make contact. (The CRSIGNAL envariable should never
# be set on Windows, since Windows Python doesn't
# seem to support os.kill().)
needToSignal = 0
if os.environ.has_key('CRSIGNAL'):
needToSignal = 1
# Begin main mothership loop here. Accept new connections
# from Chromium components and process configuration/etc
# requests.
while 1:
# We can only safely signal the mothership when all the
# dynamic nodes have been resolved; this is because the
# main application will ask about what servers are available,
# and we don't know the answer until all dynamic nodes are
# resolved. Note that this essentially prevents Windows
# users from using dynamic hosts, because they cannot signal.
if needToSignal and len(dynamicHostsNeeded) == 0:
process = int(os.environ['CRSIGNAL'])
CRInfo("Mothership signalling spawning process %d" % process)
os.kill(process,signal.SIGUSR1)
needToSignal = 0
ready = select.select( self.all_sockets, [], [], 0.1 )[0]
for sock in ready:
if sock == s:
# accept a new connection
(conn, addr) = s.accept()
self.wrappers[conn] = SockWrapper(conn)
self.all_sockets.append( conn )
else:
# process request from established connection
self.ProcessRequest( self.wrappers[sock] )
#end for
# end while
# endfor
# if we get here we weren't able to create the mothership's port
Fatal( "Couldn't find/create local TCP port (make sure that another mothership isn't already running)")
except KeyboardInterrupt:
try:
for sock in self.all_sockets:
sock.shutdown(2)
sock.close( )
except:
pass
CRInfo("\n\nThank you for using Chromium!")
except:
CRInfo("\n\nMOTHERSHIP EXCEPTION! TERRIBLE!")
traceback.print_exc(None, sys.stderr)
try:
for sock in self.all_sockets:
sock.shutdown(2)
sock.close( )
except:
pass
def ClientError( self, sock_wrapper, code, msg ):
"""ClientError(sock_wrapper, code, msg)
Sends an error message on the given socket."""
sock_wrapper.Reply( code, msg )
self.ClientDisconnect( sock_wrapper )
def ClientDisconnect( self, sock_wrapper ):
"""ClientDisconnect(sock_wrapper)
Disconnects from the client on the given socket."""
self.all_sockets.remove( sock_wrapper.sock )
del self.wrappers[sock_wrapper.sock]
try:
sock_wrapper.sock.close( )
except:
pass
def ConnectTCPIP( self, sock, connect_info ):
"""Connect routine for TCP/IP (see do_connectrequest())"""
(p, hostname, port_str, endianness_str) = connect_info
assert p == "tcpip"
hostname = socket.gethostbyname(QualifyHostname(hostname))
port = int(port_str)
endianness = int(endianness_str)
# Loop over all of the mothership's socket wrappers, looking for
# a socket which has an Accept pending on the same host and port.
# When found, return a new connection ID.
for server_sock in self.wrappers.values():
if server_sock.tcpip_accept_wait != None:
(server_hostname, server_port, server_endianness) = server_sock.tcpip_accept_wait
if SameHost(server_hostname, hostname) and server_port == port:
sock.Success("%d %d" % (self.conn_id, server_endianness))
# reply to the waiting server
server_sock.Success( "%d" % self.conn_id )
# we don't want to re-use this info!!
server_sock.tcpip_accept_wait = None
self.conn_id += 1
return
else:
CRDebug( "not connecting to \"%s:%d\" (!= \"%s:%d\")"
% (server_hostname, server_port, hostname, port) )
else:
CRDebug("tcpip_accept_wait")
# If we get here, the other end of the connection hasn't contacted
# the mothership yet. So, save this request's hostname, port and
# endianness for when the matching "acceptrequest" message comes in.
# When we get it, we'll finally reply on the saved client socket.
sock.tcpip_connect_wait = (hostname, port, endianness)
return
def ConnectSDP( self, sock, connect_info ):
"""Connect routine for SDP (see do_connectrequest())"""
(p, hostname, port_str, endianness_str) = connect_info
hostname = socket.gethostbyname(QualifyHostname(hostname))
port = int(port_str)
endianness = int(endianness_str)
for server_sock in self.wrappers.values():
if server_sock.sdp_accept_wait != None:
(server_hostname, server_port, server_endianness) = server_sock.sdp_accept_wait
if SameHost(server_hostname, hostname) and server_port == port:
sock.Success("%d %d" % (self.conn_id, server_endianness))
server_sock.Success( "%d" % self.conn_id )
# we don't want to re-use this info!!
server_sock.sdp_accept_wait = None
self.conn_id += 1
return
else:
CRDebug( "not connecting to \"%s:%d\" (!= \"%s:%d\")"
% (server_hostname, server_port, hostname, port) )
sock.sdp_connect_wait = (hostname, port, endianness)
return
def ConnectIB( self, sock, connect_info ):
"""Connect routine for InfiniBand (see do_connectrequest())"""
(p, hostname, port_str, node_id_str, endianness_str, lid1, qp_ous, qp) = connect_info
CRInfo("do_connectrequest processing ib protocol")
hostname = socket.gethostbyname(QualifyHostname(hostname))
port = int(port_str)
node_id = int(node_id_str)
endianness = int(endianness_str)
for server_sock in self.wrappers.values():
if server_sock.ib_accept_wait != None:
(server_hostname, server_port, server_node_id, server_endianness, server_lid1, server_qp_ous, server_qp) = server_sock.ib_accept_wait
if SameHost(server_hostname, hostname) and server_port == port:
sock.Success( "%d %d %d %s %s %s" % (self.conn_id, server_node_id, server_endianness, server_lid1, server_qp_ous, server_qp ) )
server_sock.Success( "%d %d %s %s %s" % (self.conn_id, node_id, lid1, qp_ous, qp ) )
# we don't want to re-use this info!!
server_sock.ib_accept_wait = None
self.conn_id += 1
return
else:
CRDebug( "not connecting to \"%s:%d\" (!= \"%s:%d\")"
% (server_hostname, server_port, hostname, port) )
sock.ib_connect_wait = (hostname, port, node_id, endianness, lid1, qp_ous, qp)
return
def ConnectGM( self, sock, connect_info ):
"""Connect routine for GM (see do_connectrequest())"""
(p, hostname, port_str, node_id_str, port_num_str, endianness_str) = connect_info
port = int(port_str)
node_id = int(node_id_str)
port_num = int(port_num_str)
endianness = int(endianness_str)
for server_sock in self.wrappers.values():
if server_sock.gm_accept_wait != None:
(server_hostname, server_port, server_node_id, server_port_num, server_endianness) = server_sock.gm_accept_wait
if SameHost(server_hostname, hostname) and server_port == port:
sock.Success( "%d %d %d %d" % (self.conn_id, server_node_id, server_port_num, server_endianness) )
server_sock.Success( "%d %d %d" % (self.conn_id, node_id, port_num) )
server_sock.gm_accept_wait = None
self.conn_id += 1
return
sock.gm_connect_wait = (hostname, port, node_id, port_num, endianness)
return
def ConnectQuadrics( self, sock, connect_info ):
"""Connect routine for Quadrics (see do_connectrequest())"""
(p, remote_hostname, remote_rank_str, my_hostname, my_rank_str, my_endianness_str) = connect_info
remote_rank = int(remote_rank_str)
my_rank = int(my_rank_str)
my_endianness = int(my_endianness_str)
for server_sock in self.wrappers.values():
if server_sock.teac_accept_wait != []:
(server_hostname, server_rank, server_endianness) = server_sock.teac_accept_wait[0]
if SameHost(server_hostname, remote_hostname) and server_rank == remote_rank:
server_sock.teac_accept_wait.pop(0)
sock.Success( "%d %d" % (self.conn_id, server_endianness) )
server_sock.Success( "%d %s %d %d" % (self.conn_id, my_hostname, my_rank, my_endianness) )
self.conn_id += 1
return
sock.teac_connect_wait.append( (my_hostname, my_rank, my_endianness, remote_hostname, remote_rank) )
return
def ConnectTcscomm( self, sock, connect_info ):
"""Connect routine for Quadrics-Tcscomm (see do_connectrequest())"""
(p, remote_hostname, remote_rank_str, my_hostname, my_rank_str, my_endianness_str) = connect_info
remote_rank = int(remote_rank_str)
my_rank = int(my_rank_str)
my_endianness = int(my_endianness_str)
for server_sock in self.wrappers.values():
if server_sock.tcscomm_accept_wait != []:
(server_hostname, server_rank, server_endianness) = server_sock.tcscomm_accept_wait[0]
if SameHost(server_hostname, remote_hostname) and server_rank == remote_rank:
server_sock.tcscomm_accept_wait.pop(0)
sock.Success( "%d %d" % (self.conn_id, server_endianness) )
server_sock.Success( "%d %s %d %d" % (self.conn_id, my_hostname, my_rank, my_endianness) )
self.conn_id += 1
return
sock.tcscomm_connect_wait.append( (my_hostname, my_rank, my_endianness, remote_hostname, remote_rank) )
return
def do_connectrequest( self, sock, args ):
"""
This function is called when the mothership receives a "connectrequest"
message from a network-specific Connect() function (in "util/") if the
connection is brokered.
We call a network-specific connect routine above which returns its
response on the given socket.
"""
connect_info = args.split(" ")
protocol = connect_info[0]
if protocol == 'tcpip' or protocol == 'udptcpip':
self.ConnectTCPIP(sock, connect_info)
elif protocol == 'sdp':
self.ConnectSDP(sock, connect_info)
elif protocol == 'ib':
self.ConnectIB(sock, connect_info)
elif protocol == 'gm':
self.ConnectGM(sock, connect_info)
elif protocol == 'quadrics':
self.ConnectQuadrics(sock, connect_info)
elif protocol == 'quadrics-tcscomm':
self.ConnectTcscomm(sock, connect_info)
else:
sock.Failure(SockWrapper.UNKNOWNPROTOCOL,
"Never heard of protocol %s" % protocol)
return
def AcceptTCPIP( self, sock, accept_info ):
"""Accept routine for TCP/IP (see do_acceptrequest())"""
(p, hostname, port_str, endianness_str) = accept_info
assert p == "tcpip"
# If the mothership doesn't recognize the remote host,
# the QualifyHostname() call can fail with a cryptic exception.
# This is fatal; but we can still give a better error
# than the cryptic message.
try:
hostname = socket.gethostbyname(QualifyHostname(hostname))
except:
Fatal( "Mothership error: could not qualify hostname '%s' - check /etc/hosts" % hostname)
port = int(port_str)
endianness = int(endianness_str)
# Loop over all of the mothership's socket wrappers, looking for
# a socket which has a Connect pending on the same host and port.
# When found, return a new connection ID and the server's endianness.
for client_sock in self.wrappers.values():
if client_sock.tcpip_connect_wait != None:
(client_hostname, client_port, client_endianness) = client_sock.tcpip_connect_wait
if SameHost(client_hostname, hostname) and client_port == port:
sock.Success( "%d" % self.conn_id )
# reply to the waiting client
client_sock.Success("%d %d" % (self.conn_id, endianness))
# we don't want to re-use this info!!
client_sock.tcpip_connect_wait = None
self.conn_id += 1
return
else:
CRDebug( "not accepting from \"%s:%d\" (!= \"%s:%d\")" % (client_hostname, client_port, hostname, port ) )
else:
CRDebug( "tcpip_connect_wait" )
# If we get here, the other end of the connection hasn't contacted
# the mothership yet. So, save this request's hostname, port and
# endianness for when the matching "connectrequest" message comes in.
# When we get it, we'll finally reply on the saved server socket.
sock.tcpip_accept_wait = (hostname, port, endianness)
return
def AcceptSDP( self, sock, accept_info ):
"""Accept routine for SDP (see do_acceptrequest())"""
(p, hostname, port_str, endianness_str) = accept_info
hostname = socket.gethostbyname(QualifyHostname(hostname))
port = int(port_str)
endianness = int(endianness_str)
for client_sock in self.wrappers.values():
if client_sock.sdp_connect_wait != None:
(client_hostname, client_port, client_endianness) = client_sock.sdp_connect_wait
if SameHost(client_hostname, hostname) and client_port == port:
sock.Success( "%d" % self.conn_id )
client_sock.Success("%d %d" % (self.conn_id, endianness))
# we don't want to re-use this info!!
client_sock.sdp_connect_wait = None
self.conn_id += 1
return
else:
CRDebug( "not accepting from \"%s:%d\" (!= \"%s:%d\")" % (client_hostname, client_port, hostname, port ) )
else:
CRDebug( "sdp_connect_wait" )
sock.sdp_accept_wait = (hostname, port, endianness)
return
def AcceptIB( self, sock, accept_info ):
"""Accept routine for InfiniBand (see do_acceptrequest())"""
(p, hostname, port_str, node_id_str, endianness_str, lid1, qp_ous, qp) = accept_info
CRInfo("do_acceptrequest processing ib protocol")
hostname = socket.gethostbyname(hostname)
port = int(port_str)
node_id = int(node_id_str)
endianness = int(endianness_str)
for client_sock in self.wrappers.values():
if client_sock.ib_connect_wait != None:
(client_hostname, client_port, client_node_id,
client_endianness, client_lid1, client_qp_ous, client_qp) = client_sock.ib_connect_wait
if SameHost(client_hostname, hostname) and client_port == port:
sock.Success( "%d %d %s %s %s" % (self.conn_id, client_node_id, client_lid1, client_qp_ous, client_qp ) )
client_sock.Success( "%d %d %d %s %s %s" % (self.conn_id, node_id, endianness, lid1, qp_ous, qp ) )
# we don't want to re-use this info!!
client_sock.ib_connect_wait = None
self.conn_id += 1
return
else:
CRDebug( "not accepting from \"%s:%d\" (!= \"%s:%d\")" % (client_hostname, client_port, hostname, port ) )
else:
CRDebug( "ib_connect_wait" )
sock.ib_accept_wait = (hostname, port, node_id, endianness, lid1, qp_ous, qp)
return
def AcceptGM( self, sock, accept_info ):
"""Accept routine for GM (see do_acceptrequest())"""
(p, hostname, port_str, node_id_str, port_num_str, endianness_str) = accept_info
port = int(port_str)
node_id = int(node_id_str)
port_num = int(port_num_str)
endianness = int(endianness_str)
for client_sock in self.wrappers.values():
if client_sock.gm_connect_wait != None:
(client_hostname, client_port, client_node_id, client_port_num, client_endianness) = client_sock.gm_connect_wait
if SameHost(client_hostname, hostname) and client_port == port:
sock.Success( "%d %d %d" % (self.conn_id, client_node_id, client_port_num) )
client_sock.Success( "%d %d %d %d" % (self.conn_id, node_id, port_num, endianness) )
self.conn_id += 1
client_sock.gm_connect_wait = None
return
sock.gm_accept_wait = (hostname, port, node_id, port_num, endianness)
return
def AcceptQuadrics( self, sock, accept_info ):
"""Accept routine for Quadrics (see do_acceptrequest())"""
(p, hostname, rank_str, endianness_str) = accept_info
rank = int(rank_str)
endianness = int(endianness_str)
for client_sock in self.wrappers.values():
if client_sock.teac_connect_wait != []:
(client_hostname, client_rank, client_endianness, server_hostname, server_rank) = client_sock.teac_connect_wait[0]
if SameHost(server_hostname, hostname) and server_rank == rank:
client_sock.teac_connect_wait.pop(0)
sock.Success( "%d %s %d %d" % (self.conn_id, client_hostname, client_rank, client_endianness) )
client_sock.Success( "%d %d" % (self.conn_id, endianness) )
self.conn_id += 1
return
sock.teac_accept_wait.append( (hostname, rank, endianness) )
return
def AcceptTcscomm( self, sock, accept_info ):
"""Accept routine for Quadrics-Tcscomm (see do_acceptrequest())"""
(p, hostname, rank_str, endianness_str) = accept_info
rank = int(rank_str)
endianness = int(endianness_str)
for client_sock in self.wrappers.values():
if client_sock.tcscomm_connect_wait != []:
(client_hostname, client_rank, client_endianness, server_hostname, server_rank) = client_sock.tcscomm_connect_wait[0]
if SameHost(server_hostname, hostname) and server_rank == rank:
client_sock.tcscomm_connect_wait.pop(0)
sock.Success( "%d %s %d %d" % (self.conn_id, client_hostname, client_rank, client_endianness) )
client_sock.Success("%d %d" % (self.conn_id, endianness))
self.conn_id += 1
return
sock.tcscomm_accept_wait.append( (hostname, rank, endianness) )
return
def do_acceptrequest( self, sock, args ):
"""
This function is called when the mothership receives a "acceptrequest"
message from a network-specific Accept() function (in "util/") if the
connection is brokered.
We call a network-specific accept routine above which returns its
response on the given socket.
"""
accept_info = args.split(" ")
protocol = accept_info[0]
if protocol == 'tcpip' or protocol == 'udptcpip':
self.AcceptTCPIP(sock, accept_info)
elif protocol == 'sdp':
self.AcceptSDP(sock, accept_info)
elif protocol == 'ib':
self.AcceptIB(sock, accept_info)
elif protocol == 'gm':
self.AcceptGM(sock, accept_info)
elif protocol == 'quadrics':
self.AcceptQuadrics(sock, accept_info)
elif protocol == 'quadrics-tcscomm':
self.AcceptTcscomm(sock, accept_info)
else:
sock.Failure(SockWrapper.UNKNOWNPROTOCOL,
"Never heard of protocol %s" % protocol)
def MatchNode(self, nodeTypeName, sock, args):
""" A (too?) clever routine. This handles all the work of matching
various types of nodes, with static matches or with dynamic matches.
It even handles dynamic resolution and errors.
Input: args: the hostname of the caller
Return: a node reference
"""
try:
(validFunc, claimFunc) = NodeTypes[nodeTypeName]
except:
print "*** ERROR: trying to match unknown node type '%s'" % nodeTypeName
return None
# Try first to resolve the host with a static match
nodenames = ""
listedNodenames = { }
for node in self.nodes:
if validFunc(node):
# Record all the static node names for a message later
if not node.dynamic_host and not listedNodenames.has_key(node.host):
listedNodenames[node.host] = 1
nodenames += node.host+" "
if MatchStaticNode(node,args):
claimFunc(node, sock)
return node
# No static node matches. Try dynamic nodes that are already resolved.
for node in self.nodes:
if validFunc(node):
if node.dynamic_host and not listedNodenames.has_key("[dynamic]"):
listedNodenames["[dynamic]"] = 1
nodenames += "[dynamic] "
if MatchResolvedNode(node,args):
claimFunc(node, sock)
return node
# If unresolved nodes are present, we can try to resolve them.
if len(dynamicHostsNeeded) > 0:
# Only the "grandmothership" (i.e., a mothership with no mother)
# may resolve nodes.
if not self.mother: # i.e. I'm the grandmother
index = 0
for node in self.nodes:
if validFunc(node) and MatchUnresolvedNode(node,args):
# We matched the server with an appropriate node.
# Tell the daughters.
self.Broadcast(self.daughters, "match %d %s" % (index, args))
claimFunc(node, sock)
return node
index += 1
else:
# A daughtership must ask its mother to resolve nodes; the
# answer will come back asynchronously, so we'll have to
# save our request and deal with it later.
# When we get the match back, we'll pull all matching pending
# resolutions from here and restart their processing.
# The exception raised prevents the main routine (which
# called us) from continuing with normal processing.
self.mother.Send("requestmatch %s %s" % (nodeTypeName, args))
self.pendingResolution.append( ("do_%s" % nodeTypeName, sock, args) )
return node
# Nothing matches, and we've tried most everything.
if sock != None:
sock.Failure( SockWrapper.UNKNOWNHOST, "Never heard of %s host %s. Expected one of: %s" % (nodeTypeName, args, nodenames))
return None
def Broadcast(self, sockets, message):
for s in sockets:
s.Send(message)
def do_faker( self, sock, args ):
"""do_faker(sock, args)
Maps the incoming "faker" app to a previously-defined node. I.e.
crappfakers identify themselves to the mothership with this message.
Will return to the crappfaker the command line arguments for starting
the OpenGL application."""
self.MatchNode( "faker", sock, args)
def do_vncserver( self, sock, args ):
"""do_newserver(sock, args)
Called by a crserver to identify itself as a VNC/replication server."""
# One CRVNCServerNode instance will match any number of crservers.
# NOTE: we ignore args (the hostname)
self.MatchNode("vncserver", sock, "anyhost")
def do_crutproxy( self, sock, args ):
CRDebug ( " Seeing if we have a crutproxy." )
"""do_crutserver(sock, args)
Hopefully tells us that we have a crutserver running somewhere."""
self.MatchNode("crutproxy", sock, args)
def do_crutserver( self, sock, args ):
"""do_crutserver(sock, args)
Hopefully tells us that we have a crutserver running somewhere."""
self.MatchNode("crutserver", sock, args)
def do_crutclient( self, sock, args ):
"""do_crutserver(sock, args)
Hopefully tells us that we have a crutclient running somewhere."""
self.MatchNode("crutclient", sock, args)
def do_server( self, sock, args ):
"""do_server(sock, args)
Servers send this message to identify themselves to the mothership."""
self.MatchNode("server", sock, args)
def do_match(self, sock, args):
"""
This can either come in as a result of a request we made for a match,
or spontaneously (to notify us of a match the mothership has made).
We are to notify our daughters, log the node ourselves, and to
release and activate any resolutions that were waiting on this node.
"""
self.Broadcast(self.daughters, "match %s" % args)
words = string.split(args)
node = self.nodes[int(words[0])]
hostname = words[1]
ResolveNode(node, hostname)
stillUnresolved = []
for (pendingCommand, pendingSock, pendingHost) in self.pendingResolution:
if MatchResolvedNode(node, pendingHost):
fn = getattr(self, pendingCommand)
fn(pendingSock, pendingHost)
else:
stillUnresolved.append((pendingCommand, pendingSock, pendingHost))
self.pendingResolution = stillUnresolved
def do_requestmatch(self, sock, args):
"""
This can only come from a daughter to a mother. If we're the
grandmother, we process it. Otherwise, we pass it up. We'll
eventually get a "match" command back, with information we need.
"""
if self.mother:
self.mother.Send("requestmatch %s" % args)
return
# Here, we're the grandmother. We can resolve this by doing a normal
# match at our level. Note that we don't really have a socket - the
# socket we are passed is a daughter, not the real client.
words = string.split(args)
nodeTypeName = words[0]
hostName = words[1]
node = self.MatchNode(nodeTypeName, None, hostName)
if node == None:
# This is bad. Daughters will likely hang, failing to respond
# to connections, because they're waiting for a match.
print "*** ERROR: requestmatch couldn't match a node!"
return
# The MatchNode method will already have passed the necessary information
# on to the daughterships, so we don't have to do it again.
return
def do_opengldll( self, sock, args ):
"""do_opengldll(sock, args)
The OpenGL faker library (libcrfaker.so) identifies itself to the
mothership with this message. Returns the client's SPU chain."""
(id_string, hostname) = args.split( " " )
app_id = int(id_string)
for node in self.nodes:
if isinstance(node,CRApplicationNode):
if ((app_id == -1 and SameHost(hostname, node.host)) or node.id == app_id) and not node.spusloaded:
node.spusloaded = 1
spuchain = "%d" % len(node.SPUs)
for spu in node.SPUs:
spuchain += " %d %s" % (spu.ID, spu.name)
sock.Success( spuchain )
sock.node = node
return
# If you get this error message and don't know why, check if there's
# a stale mothership process still running.
sock.Failure(SockWrapper.UNKNOWNHOST, "Unexpected identification message from crfaker (app %d)" % app_id)
def do_spu( self, sock, args ):
"""do_spu(sock, args)
SPUs send this message to the mothership to identify themselves."""
try:
spuid = int(args)
except:
sock.Failure( SockWrapper.UNKNOWNSPU, "Bogus SPU name: %s" % args )
return
if not allSPUs.has_key( spuid ):
sock.Failure( SockWrapper.UNKNOWNSPU, "Never heard of SPU %d" % spuid )
return
sock.SPUid = spuid
sock.Success( "Hello, %s SPU!" % allSPUs[spuid].name )
def do_spuparam( self, sock, args ):
"""do_spuparam(sock, args)
Sends the given SPU (or global) parameter."""
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU, "You can't ask for SPU parameters without telling me what SPU id you are!" )
return
spu = allSPUs[sock.SPUid]
if not spu.config.has_key( args ):
# Okay, there's no specific parameter for the SPU.
# Try the global SPU configurations.
for (regex, key, values) in self.allSPUConf:
if args == key and re.search( regex, spu.name ) != -1:
response = values
break
else:
sock.Failure( SockWrapper.UNKNOWNPARAM,
"SPU %d (%s) doesn't have param %s"
% (sock.SPUid, allSPUs[sock.SPUid].name, args) )
return
else:
response = spu.config[args]
CRDebug("responding with args = " + `response`)
# sock.Success( string.join( response, " " ) )
sock.Success( response )
def do_get_spu_rank( self, sock, args ):
"""When a number of SPUs are connected to a server, this function
will return the rank/index of this SPU with respect to the server.
For example, if there are three pack SPUs connected to a server and
each pack SPU calls this function, we'll uniquely return "0", "1"
and "2" to the those SPUs."""
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU,
"You can't ask for SPU peers without telling me" +
"what SPU id you are!" )
return
### This is a bit tricky. Some searching is involved.
# Find the last SPU in SPU chain that I belong to.
spu = allSPUs[sock.SPUid]
spuHost = self.FindSPUHost(spu)
lastSPU = spuHost.SPUs[-1]
# Check if there's no upstream server node.
if len(lastSPU.servers) == 0:
sock.Success( "0" )
return
# Get the last SPU's server node.
(serverNode, url) = lastSPU.servers[0]
rank = 0
# Loop over client SPUs of the server node.
for (clientSpu, protocol) in serverNode.GetClients():
# Find the node that hosts this SPU.
clientNode = self.FindSPUHost(clientSpu)
# Try to find target SPU in this node's SPU chain.
for s in clientNode.SPUs:
if s == spu:
# Found it!
sock.Success( str(rank) )
return
rank += 1
# Strange, maybe this SPU is on a crserver.
sock.Success( "-1" )
def do_crutserverparam( self, sock, args ):
"""do_crutserverparam(sock, args)
Sends the given crutserver parameter."""
if sock.node == None or not isinstance(sock.node,CRUTServerNode):
sock.Failure( SockWrapper.UNKNOWNSERVER, "You can't ask for server parameters without telling me what crutserver you are!" )
return
if not sock.node.config.has_key( args ):
sock.Failure( SockWrapper.UNKNOWNPARAM, "Server doesn't have param %s" % (args) )
return
#sock.Success( string.join( sock.node.config[args], " " ) )
sock.Success( sock.node.config[args] )
def do_serverparam( self, sock, args ):
"""do_serverparam(sock, args)
Sends the given server parameter."""
if sock.node == None or not (isinstance(sock.node,CRNetworkNode) or isinstance(sock.node, CRVNCServerNode)):
sock.Failure( SockWrapper.UNKNOWNSERVER, "You can't ask for server parameters without telling me what server you are!" )
return
if not sock.node.config.has_key( args ):
sock.Failure( SockWrapper.UNKNOWNPARAM, "Server doesn't have param %s" % (args) )
return
#sock.Success( string.join( sock.node.config[args], " " ) )
sock.Success( sock.node.config[args] )
def do_fakerparam( self, sock, args ):
"""do_fakerparam(sock, args)
Sends the given app faker parameter."""
if sock.node == None or not isinstance(sock.node,CRApplicationNode):
sock.Failure( SockWrapper.UNKNOWNSERVER, "You can't ask for faker parameters without telling me what app faker you are!" )
return
if not sock.node.config.has_key( args ):
sock.Failure( SockWrapper.UNKNOWNPARAM, "Faker doesn't have param %s" % (args) )
return
sock.Success( sock.node.config[args] )
def do_servers( self, sock, args ):
"""do_servers(sock, args)
Returns list of servers attached to a (tilesort/pack) SPU."""
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU, "You can't ask for servers without telling me what SPU id you are!" )
return
spu = allSPUs[sock.SPUid]
if len(spu.servers) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "SPU %d doesn't have servers!" % (sock.SPUid) )
return
servers = "%d " % len(spu.servers)
for i in range(len(spu.servers)):
(node, formatURL) = spu.servers[i]
# The formatURL string may include a reference to the
# resolved hostname. Replace it if it does.
if node:
host = node.host
if node.dynamic_host:
if dynamicHosts.has_key(host):
host = dynamicHosts[host]
else:
sock.Failure( SockWrapper.UNKNOWNSERVER, "Server for dynamic host '%s' must be started before the appfaker" % (host) )
return
url = formatURL % {'host': host}
else:
# probably a file: URL
url = formatURL
servers += "%s" % (url)
if i != len(spu.servers) -1:
servers += ','
sock.Success( servers )
def do_crutservers( self, sock, args ):
if len(sock.node.crutservers) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "CRUTClient %d doesn't have servers" % (sock.SPUid) )
return
crutservers = "%d " % len(sock.node.crutservers)
for i in range(len(sock.node.crutservers)):
(node,url) = sock.node.crutservers[i]
crutservers+= "%s" % (url)
if i != len(sock.node.crutservers) -1:
crutservers += " "
sock.Success( crutservers )
def do_crutclients(self, sock, args ):
#don't error here, you may not have any clients (e.g. last node in fan configuration)
if len(sock.node.crutclients) == 0:
sock.Success("0 CRUTserver doesn't have clients.")
return
crutclients = "%d " % len(sock.node.crutclients)
for i in range(len(sock.node.crutclients)):
(nocde,url) = sock.node.crutclients[i]
crutclients += "%s" % (url)
if i != len(sock.node.crutclients) -1:
crutclients += " "
sock.Success( crutclients )
def do_serverids( self, sock, args ):
"""do_serverids(sock, args)
Sends the list of server IDs.
XXX How is this different from do_servers? (ahern)
"""
# XXX this might only be temporary (BrianP)
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU, "You can't ask for server ids without telling me what SPU id you are!" )
return
spu = allSPUs[sock.SPUid]
if len(spu.servers) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "SPU %d doesn't have servers!" % (sock.SPUid) )
return
servers = "%d " % len(spu.servers)
for i in range(len(spu.servers)):
(node, url) = spu.servers[i]
if node == None:
sock.Failure( SockWrapper.UNKNOWNSERVER, "Sorry, I don't know what SPU the server is running, you didn't tell me." )
return
servers += "%d" % (node.SPUs[0].ID)
if i != len(spu.servers) - 1:
servers += ' '
sock.Success( servers )
def do_tiles( self, sock, args ):
"""do_tiles(sock, args)
Returns the list of tiles associated with a SPU's Nth server."""
# Note, an SPU asks for the tiles, but the tiles are really associated
# with the servers that the (tilesort) SPU will talk to. The arg to
# this query indicates which server to return the tiles for.
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU, "You can't ask for tiles without telling me what SPU id you are!" )
return
spu = allSPUs[sock.SPUid]
if len(spu.servers) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "SPU %d doesn't have servers!" % (sock.SPUid) )
return
server_num = int(args)
if server_num < 0 or server_num >= len(spu.servers):
sock.Failure( SockWrapper.UNKNOWNSERVER, "SPU %d doesn't have a server numbered %d" % (sock.SPUid, server_num) )
(node, url) = spu.servers[server_num]
if node == None:
sock.Failure( SockWrapper.UNKNOWNSERVER, "No tiles for Null node")
return
self.tileReply( sock, node )
def do_servertiles( self, sock, args ):
"""do_servertiles(sock, args)
Sends the defined tiles for a server."""
if sock.node == None or not isinstance(sock.node,CRNetworkNode):
sock.Failure( SockWrapper.UNKNOWNSERVER, "You can't ask for tiles without telling me what server you are!" )
return
self.tileReply( sock, sock.node )
def do_server_param( self, sock, args ):
"""Return a server parameter to the calling SPU."""
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU, "You can't ask for SPU parameters without telling me what SPU id you are!" )
return
spu = allSPUs[sock.SPUid]
args = string.split(args)
server_num = int(args[0])
param = args[1]
if server_num < 0 or server_num >= len(spu.servers):
sock.Failure( SockWrapper.UNKNOWNSERVER, "SPU %d doesn't have a server numbered %d" % (sock.SPUid, server_num) )
(node, url) = spu.servers[server_num]
if node.config.has_key(param):
sock.Success( node.config[param] )
else:
sock.Success( "" )
def tileReply( self, sock, node ):
"""tileReply(sock, node)
Packages up a tile message for socket communication.
"""
if len(node.tiles) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "server doesn't have tiles!" )
return
tiles = "%d " % len(node.tiles)
for i in range(len(node.tiles)):
tile = node.tiles[i] # tile is (x, y, w, h)
tiles += "%d %d %d %d" % tile
if i != len(node.tiles) - 1:
tiles += ","
sock.Success( tiles )
def do_serverdisplaytiles( self, sock, args ):
"""do_serverdisplaytiles(sock, args)
Sends the defined tiles for a server."""
if sock.node == None or not isinstance(sock.node,CRNetworkNode):
sock.Failure( SockWrapper.UNKNOWNSERVER, "You can't ask for tiles without telling me what server you are!" )
return
self.displaytileReply( sock, sock.node )
def displaytileReply( self, sock, node ):
"""tileReply(sock, node)
Packages up a tile message for socket communication.
"""
if len(node.tiles_on_displays) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "server doesn't have tiles!" )
return
tiles = "%d " % len(node.tiles_on_displays)
for i in range(len(node.tiles_on_displays)):
tile = node.tiles_on_displays[i]
tiles += "%d %d %d %d %d" % tile
if i != len(node.tiles) - 1:
tiles += ","
sock.Success( tiles )
def do_displays( self, sock, args ):
"""do_displays(sock, args)
Send the displays associated with a SPU"""
n_displays = 0
for spu in range(len(allSPUs)):
n_displays += len(allSPUs[spu].displays)
displays = "%d " % n_displays
for spu in range(len(allSPUs)):
for i in range(len(allSPUs[spu].displays)):
display = allSPUs[spu].displays[i]
tmp_display = "%d %d %d %s %s" % display
reggie = re.compile('\]|\[|,')
displays += "%s" % reggie.sub(' ', tmp_display)
if i != len(allSPUs[spu].displays) - 1:
displays += ","
sock.Success( displays )
def do_display_tiles( self, sock, args ):
"""do_tiles(sock, args)
Sends the defined tiles for a SPU."""
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU, "You can't ask for tiles without telling me what SPU id you are!" )
return
spu = allSPUs[sock.SPUid]
if len(spu.servers) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "SPU %d doesn't have servers!" % (sock.SPUid) )
return
server_num = int(args)
if server_num < 0 or server_num >= len(spu.servers):
sock.Failure( SockWrapper.UNKNOWNSERVER, "SPU %d doesn't have a server numbered %d" % (sock.SPUid, server_num) )
(node, url) = spu.servers[server_num]
self.displayTileReply( sock, node )
def displayTileReply( self, sock, node ):
"""displayTileReply(sock, node)
Packages up a tile message for socket communication.
"""
if len(node.tiles_on_displays) == 0:
sock.Failure( SockWrapper.UNKNOWNPARAM, "server doesn't have display tiles!" )
return
tiles = "%d " % len(node.tiles_on_displays)
for i in range(len(node.tiles_on_displays)):
tile = node.tiles_on_displays[i]
tiles += "%d %d %d %d %d" % tile
if i != len(node.tiles_on_displays) - 1:
tiles += ","
sock.Success( tiles )
def do_getvncclient( self, sock, args ):
"""do_clients(sock, args)
Like do_clients, return list of clients of this server, but this
function is for vnc only.
Note that the client/server terminology of Chromium (in this
configuration anyway) is just the opposite of VNC's terminology."""
# NOTE: we ignore args (the hostname)
if sock.node == None or not isinstance(sock.node, CRVNCServerNode):
sock.Failure( SockWrapper.UNKNOWNSERVER,
"You can't ask for vnc clients without telling " +
"me which VNC server node you are!" )
return
# Just find the replicate SPU
for i in allSPUs.keys():
spu = allSPUs[i]
if spu.name == "replicate":
sock.Success("1 tcpip %d" % spu.ID);
return
sock.Failure(SockWrapper.NOTHINGTOSAY,
"getvncclient: Didn't find VNC ApplicationNode and SPU")
def do_clients( self, sock, args ):
"""Returns a list of the clients who talk to this server.
Example: '2 tcpip 4, ib 5' means there are two clients. The first
is SPU #4 using TCP/IP, the second is SPU #5 using Infiniband."""
if sock.node == None or not isinstance(sock.node,CRNetworkNode):
sock.Failure( SockWrapper.UNKNOWNSERVER, "You can't ask for clients without telling me what server you are!" )
return
total_clients = len(sock.node.clients) + len(sock.node.file_clients)
clients = "%d " % total_clients
for i in range(len(sock.node.clients)):
(spu, protocol) = sock.node.clients[i]
clients += "%s %d" % (protocol, spu.ID)
if i != total_clients-1:
clients += ','
for i in range(len(sock.node.file_clients)):
fname = sock.node.file_clients[i]
clients += "%s %d" % (fname, -1)
if i-len(sock.node.clients) != total_clients-1:
clients += ','
sock.Success( clients )
def do_reset( self, sock, args ):
"""do_reset(sock, args)
Resets the mothership to its initial state."""
for node in self.nodes:
node.spokenfor = 0
node.spusloaded = 0
node.crut_spokenfor = 0
# respawn auto-start nodes
for cb in CR.startupCallbacks:
cb(self)
spawner = CRSpawner( self.nodes )
spawner.start()
sock.Success( "Server Reset" )
def do_rank( self, sock, args ):
"""do_rank( sock, args )
Retrieves the node's rank and sends it on the socket (for Quadrics)."""
if sock.node == None:
sock.Failure( SockWrapper.UNKNOWNSERVER, "Identify yourself!" )
return
if not sock.node.config.has_key( 'rank' ):
sock.Failure( SockWrapper.UNKNOWNPARAM, "Node didn't say what it's rank is." )
return
sock.Success( sock.node.config['rank'] )
def do_disconnect( self, sock, args ):
"""do_disconnect(sock, args)
Disconnects from clients."""
sock.Success( "Bye" )
self.ClientDisconnect( sock )
def do_logperf( self, sock, args ):
"""do_logperf(sock, args)
Logs Data to a logfile."""
CROutput("%s" % args)
sock.Success( "Dumped" )
def do_gettilelayout( self, sock, args ):
"""Call the user's tile layout function and return the resulting
list of tiles."""
if sock.SPUid == -1:
sock.Failure( SockWrapper.UNKNOWNSPU,
"You can't ask for a new tile layout without "
"telling me what (tilesort) SPU id you are!" )
return
spu = allSPUs[sock.SPUid]
if spu.name != "tilesort":
# this is bad
sock.Success("0")
return
argv = string.split(args)
assert len(argv) == 2
muralWidth = int(argv[0])
muralHeight = int(argv[1])
fn = getattr(spu, "layoutFunction" )
if fn == None:
# XXX return failure?
sock.Success("0")
return
tiles = fn(muralWidth, muralHeight)
# reformat the tiles list into a string
result = str(len(tiles)) + " "
for t in tiles:
result += "%d %d %d %d %d, " % (t[0], t[1], t[2], t[3], t[4])
if result[-2:] == ", ":
result = result[:-2] # remove trailing ", "
assert len(result) < 8000 # see limit in getNewTiling in tilesort SPU
sock.Success( result )
return
def do_getstatus( self, sock, args ):
"""Returns status information for the mothership.
The first argument determines what information is sent:
0 [or nonexistent] - Send simple summary info back.
1 - Send detailed summary info back.
2 - Send node count.
3 n attr - Send attr value for node n.
# Not yet implemented, intended for GUI use
4 [n] - Send node setup information for node n [if n not given, is sent for all nodes].
5 [n] - Send node status information for node n [if n not given, is sent for all nodes].
"""
args = string.split(args)
node_types = [ [CRNetworkNode, "network node"],
[CRUTServerNode, "CRUT server node"],
[CRUTProxyNode, "CRUT proxy node"],
[CRApplicationNode, "application node"] ]
TYPE, NAME, COUNT, CONNECTED = 0, 1, 2, 3
result = ""
if len(args) == 0 or (args[0] == "0" or args[0] == "1"):
total_connected = 0
# Set the node type count and node type connected counts to 0
for node_type in node_types:
node_type.append(0)
node_type.append(0)
for node in self.nodes:
for node_type in node_types:
if isinstance(node, node_type[TYPE]):
node_type[COUNT] = node_type[COUNT] + 1
if node.spokenfor:
node_type[CONNECTED] = node_type[CONNECTED] + 1
total_connected = total_connected + 1
result = "%d nodes, %d connected" % (len(self.nodes), total_connected)
is_detailed = (len(args) > 0 and args[0] == "1")
for node_type in node_types:
if node_type[COUNT]:
if is_detailed:
result = result + ("<br> %sS:" % string.upper(node_type[NAME])) + self.__create_detailed_summary(node_type[TYPE])
else:
result = result + "<br> %d %ss, %d connected" % (node_type[COUNT], node_type[NAME], node_type[CONNECTED])
elif args[0] == "2":
result = "%d" % len(self.nodes)
elif args[0] == "3":
if len(args) < 2:
sock.Failure(SockWrapper.INVALIDPARAM, "getstatus usage: 3 n attr - Get attr value for node n.")
return
try:
attr = getattr(self.nodes[int(args[1])], args[2])
except AttributeError:
sock.Failure(SockWrapper.INVALIDPARAM, "Invalid node attribute: %s" % args[2])
return
except IndexError:
sock.Failure(SockWrapper.INVALIDPARAM, "Node index out of range: %s" % args[1])
return
except ValueError:
sock.Failure(SockWrapper.INVALIDPARAM, "Invalid node index: %s" % args[1])
return
result = MakeString(attr)
sock.Success( result )
def __create_detailed_summary ( self, node_type ):
"""Creates a detailed summary string."""
result = ""
for node_num in range(len(self.nodes)):
node = self.nodes[node_num]
if isinstance(node, node_type):
if node.spokenfor:
result = result + "<br> %s[%d] has connected" % (node.host, node_num)
else:
result = result + "<br> %s[%d] has NOT connected" % (node.host, node_num)
return result
def do_daughter( self, sock, args ):
# This socket has identified itself as a daughter socket. She
# wants the node graph in reply; and in the future, she'll receive
# propagated commands.
self.daughters.append(sock)
# Make a copy of the node graph; we'll munge the copy up
# before sending it along.
copyCR = copy.copy(self)
# The daughter has no interest in any of our connections;
# and the mothership has already autostarted everything
copyCR.all_sockets = []
copyCR.wrappers = {}
copyCR.daughters = []
copyCR.mother = None
copyCR.enable_autostart = None
# Package the copy of CR up with the other necessary globals
globals = { }
globals['cr'] = copyCR
globals['allSPUs'] = allSPUs
globals['dynamicHosts'] = dynamicHosts
globals['dynamicHostsNeeded'] = dynamicHostsNeeded
# Send them to the daughtership
pickledGlobals = pickle.dumps(globals)
# The current interface only sends one line at a time
lines = pickledGlobals.splitlines()
for line in lines:
sock.MoreComing(line)
sock.Success("hi sweetheart")
def ProcessRequest( self, sock_wrapper ):
"""ProcessRequest(sock_wrapper)
Handles an incoming request, mapping it to an appropriate
do_* function."""
try:
line = sock_wrapper.readline()
CRDebug("Processing mothership request: \"%s\"" % line)
except:
# Client is gone. Make sure it isn't a special client
if sock_wrapper in self.daughters:
CRDebug("Daughter quit without saying goodbye? How rude!")
self.daughters.remove(sock_wrapper)
self.ClientDisconnect( sock_wrapper )
return
elif sock_wrapper == self.mother:
Fatal("Mother is gone; so am I.")
else:
CRDebug( "Client quit without saying goodbye? How rude!" )
self.ClientDisconnect( sock_wrapper )
return
words = string.split( line )
if len(words) == 0:
self.ClientError( sock_wrapper,
SockWrapper.NOTHINGTOSAY, "Request was empty?" )
#sock_wrapper.Failure( SockWrapper.NOTHINGTOSAY, "Request was empty?" )
return
command = string.lower( words[0] )
arguments = string.join( words[1:] )
try:
fn = getattr(self, 'do_%s' % command )
except AttributeError:
sock_wrapper.Failure( SockWrapper.UNKNOWNCOMMAND, "Unknown command: %s" % command )
return
fn( sock_wrapper, arguments)
class CRDaughtership:
def __init__( self, mother = None ):
self.mother = None
self.cr = None
# Poor little lost daughtership, looking for her mother
if mother == None:
if os.environ.has_key('CRMOTHERSHIP'):
mother = os.environ['CRMOTHERSHIP']
if mother == None:
CRInfo("I lost my mother - using localhost on default port")
motherHost = 'localhost'
motherPort = DefaultMothershipPort
else:
colon = string.find(mother, ':')
if colon >= 0:
motherHost = mother[0:colon-1]
try:
motherPort = int(mother[colon+1:])
except:
CRInfo("Illegal port number %s, using default" % mother[colon+1:])
motherPort = DefaultMothershipPort
else:
motherHost = mother
motherPort = DefaultMothershipPort
# When we start the daughtership as a surrogate mothership, it will
# read the CRMOTHERSHIP variable to configure itself. Make sure it
# reads the CRDAUGHTERSHIP variable instead, by munging the environment.
if os.environ.has_key('CRDAUGHTERSHIP'):
os.environ['CRMOTHERSHIP'] = os.environ['CRDAUGHTERSHIP']
else:
os.environ['CRMOTHERSHIP'] = ':10001'
# Try all available socket types to reach our mothership
motherSocket = None
for res in socket.getaddrinfo(motherHost, motherPort, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
(af, socktype, proto, canonname, sa) = res
try:
motherSocket = socket.socket( af, socktype, proto )
except:
CRDebug( "Couldn't create socket of family %u, trying another one" % af )
motherSocket = None
continue
try:
motherSocket.connect( sa )
except:
sa.close()
CRDebug( "Couldn't connect to mothership at %s:%d" % (motherHost, motherPort))
motherSocket = None
continue
if motherSocket == None:
Fatal("Could not open connection to mothership at %s:%d" % (motherHost, motherPort))
self.mother = SockWrapper(motherSocket)
# Tell the mothership that we are a daughtership, so that we'll
# receive propagated commands.
self.mother.Send("daughter")
# The response will come in multiple lines
done = False
pickledGlobals = ""
while not done:
reply = self.mother.readline()
words = string.split(reply, None, 1)
if len(words) == 0:
Fatal("Mothership returned empty reply?")
if words[0] == "200":
# Done
done = 1
elif words[0] == "100":
# More coming
pickledGlobals = pickledGlobals + words[1] + "\n"
else:
Fatal("Mothership doesn't recognize its daughter [%s]" % words[0])
# By now we've got the whole pickle. See if we can unpickle it.
try:
globals = pickle.loads(pickledGlobals)
except:
Fatal("Could not unpickle Cr globals")
# Unpack all the globals that we were given
try:
global allSPUs, dynamicHosts, dynamicHostsNeeded
self.cr = globals['cr']
allSPUs = globals['allSPUs']
dynamicHosts = globals['dynamicHosts']
dynamicHostsNeeded = globals['dynamicHostsNeeded']
except KeyError, badKey:
Fatal("Globals were missing the key '%s'" % badKey)
# Modify the CR configuration so it knows it has a mother.
# Some commands will then automatically propagate to the
# mothership from us.
self.cr.mother = self.mother
# The mothership should already have taken care of eliminating
# other things we don't want to see (like the mothership's own
# sockets, etc.), so we should be ready to go.
def Go(self):
# Just tell the Chromium configuration to go. It should be
# all set up and ready.
self.cr.Go()
def CreateDaemon(Logfile=None):
"""Detach this process from the controlling terminal and run it in the
background as a daemon.
----------------------------------------------------------------------
This code found at:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/278731
Author: Chad J. Schroeder
Copyright (C) 2005 Chad J. Schroeder
"""
# Default daemon parameters.
# File mode creation mask of the daemon.
UMASK = 0
# Default working directory for the daemon.
WORKDIR = "/"
# Default maximum for the number of available file descriptors.
MAXFD = 1024
# The standard I/O file descriptors are redirected to /dev/null by default,
# or to the specified file if a logfile was specified.
if (hasattr(os, "devnull")):
DEVNULL = os.devnull
else:
DEVNULL = "/dev/null"
INPUT_FROM = DEVNULL
if Logfile == None:
OUTPUT_TO = DEVNULL
else:
OUTPUT_TO = Logfile
try:
# Fork a child process so the parent can exit. This returns control to
# the command-line or shell. It also guarantees that the child will not
# be a process group leader, since the child receives a new process ID
# and inherits the parent's process group ID. This step is required
# to insure that the next call to os.setsid is successful.
pid = os.fork()
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The first child.
# To become the session leader of this new session and the process group
# leader of the new process group, we call os.setsid(). The process is
# also guaranteed not to have a controlling terminal.
os.setsid()
# Is ignoring SIGHUP necessary?
#
# It's often suggested that the SIGHUP signal should be ignored before
# the second fork to avoid premature termination of the process. The
# reason is that when the first child terminates, all processes, e.g.
# the second child, in the orphaned group will be sent a SIGHUP.
#
# "However, as part of the session management system, there are exactly
# two cases where SIGHUP is sent on the death of a process:
#
# 1) When the process that dies is the session leader of a session that
# is attached to a terminal device, SIGHUP is sent to all processes
# in the foreground process group of that terminal device.
# 2) When the death of a process causes a process group to become
# orphaned, and one or more processes in the orphaned group are
# stopped, then SIGHUP and SIGCONT are sent to all members of the
# orphaned group." [2]
#
# The first case can be ignored since the child is guaranteed not to have
# a controlling terminal. The second case isn't so easy to dismiss.
# The process group is orphaned when the first child terminates and
# POSIX.1 requires that every STOPPED process in an orphaned process
# group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
# second child is not STOPPED though, we can safely forego ignoring the
# SIGHUP signal. In any case, there are no ill-effects if it is ignored.
#
# import signal # Set handlers for asynchronous events.
# signal.signal(signal.SIGHUP, signal.SIG_IGN)
try:
# Fork a second child and exit immediately to prevent zombies. This
# causes the second child process to be orphaned, making the init
# process responsible for its cleanup. And, since the first child is
# a session leader without a controlling terminal, it's possible for
# it to acquire one by opening a terminal in the future (System V-
# based systems). This second fork guarantees that the child is no
# longer a session leader, preventing the daemon from ever acquiring
# a controlling terminal.
pid = os.fork() # Fork a second child.
except OSError, e:
raise Exception, "%s [%d]" % (e.strerror, e.errno)
if (pid == 0): # The second child.
# Since the current working directory may be a mounted filesystem, we
# avoid the issue of not being able to unmount the filesystem at
# shutdown time by changing it to the root directory.
os.chdir(WORKDIR)
# We probably don't want the file mode creation mask inherited from
# the parent, so we give the child complete control over permissions.
os.umask(UMASK)
else:
# exit() or _exit()? See below.
os._exit(0) # Exit parent (the first child) of the second child.
else:
# exit() or _exit()?
# _exit is like exit(), but it doesn't call any functions registered
# with atexit (and on_exit) or any registered signal handlers. It also
# closes any open file descriptors. Using exit() may cause all stdio
# streams to be flushed twice and any temporary files may be unexpectedly
# removed. It's therefore recommended that child branches of a fork()
# and the parent branch(es) of a daemon use _exit().
os._exit(0) # Exit parent of the first child.
# Close all open file descriptors. This prevents the child from keeping
# open any file descriptors inherited from the parent. There is a variety
# of methods to accomplish this task. Three are listed below.
#
# Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
# number of open file descriptors to close. If it doesn't exists, use
# the default value (configurable).
#
# try:
# maxfd = os.sysconf("SC_OPEN_MAX")
# except (AttributeError, ValueError):
# maxfd = MAXFD
#
# OR
#
# if (os.sysconf_names.has_key("SC_OPEN_MAX")):
# maxfd = os.sysconf("SC_OPEN_MAX")
# else:
# maxfd = MAXFD
#
# OR
#
# Use the getrlimit method to retrieve the maximum file descriptor number
# that can be opened by this process. If there is not limit on the
# resource, use the default value.
#
import resource # Resource usage information.
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
# On old versions of Python, which don't have resource.RLIM_INFINITY,
# default to MAXFD.
try:
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
except:
maxfd = MAXFD
# Iterate through and close all file descriptors.
for fd in range(0, maxfd):
try:
os.close(fd)
except OSError: # ERROR, fd wasn't open to begin with (ignored)
pass
# Redirect the standard I/O file descriptors to the specified file. Since
# the daemon has no controlling terminal, most daemons redirect stdin,
# stdout, and stderr to /dev/null. This is done to prevent side-effects
# from reads and writes to the standard I/O file descriptors.
# This call to open is guaranteed to return the lowest file descriptor,
# which will be 0 (stdin), since it was closed above.
os.open(INPUT_FROM, os.O_RDONLY) # standard input (0)
os.open(OUTPUT_TO, os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600) # standard output (1)
os.dup2(1, 2) # standard error (2)
return(0)
| alown/chromium | mothership/server/mothership.py | Python | bsd-3-clause | 89,999 |
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from fuel_agent_ci.objects import Object
LOG = logging.getLogger(__name__)
class Ssh(Object):
__typename__ = 'ssh'
def __init__(self, env, name, host, key_filename, user='root', timeout=5):
self.env = env
self.name = name
self.host = host
self.user = user
self.key_filename = key_filename
self.timeout = timeout
def status(self):
status = self.env.driver.ssh_status(self)
LOG.debug('SSH %s status %s' % (self.name, status))
return status
def put_content(self, content, remote_filename):
if self.status():
LOG.debug('Putting content %s' % self.name)
self.env.driver.ssh_put_content(self, content, remote_filename)
raise Exception('Wrong ssh status: %s' % self.name)
def put_file(self, filename, remote_filename):
if self.status():
LOG.debug('Putting file %s' % self.name)
self.env.driver.ssh_put_file(self, filename, remote_filename)
raise Exception('Wrong ssh status: %s' % self.name)
def run(self, command, command_timeout=10):
if self.status():
LOG.debug('Running command %s' % self.name)
return self.env.driver.ssh_run(self, command, command_timeout)
raise Exception('Wrong ssh status: %s' % self.name)
def wait(self, timeout=200):
begin_time = time.time()
# this loop does not have sleep statement
# because it relies on self.timeout which is by default 5 seconds
while time.time() - begin_time < timeout:
if self.status(self):
return True
LOG.debug('Waiting for ssh connection to be '
'available: %s' % self.name)
return False
| koder-ua/nailgun-fcert | fuel_agent_ci/fuel_agent_ci/objects/ssh.py | Python | apache-2.0 | 2,369 |
'''Moa
=======
Adapter for moa classes.
'''
from cplcom import config_name
| matham/cplcom | cplcom/moa/__init__.py | Python | mit | 84 |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from session import Session
from cookie import Cookie
from general import General
from dbconnection import dbConnection
from loghelper import Logging
from json import JsonBuilder
class SciNameBuilder(object):
g = General()
def __init__(self, cookie_value, dbconnection = None):
#Cookies
self.cookie_value = cookie_value
#Logging
self.logger = Logging.getLogger("sciname")
#Define Cookie
self.cookie = Cookie()
self.cookie_value = self.cookie.read('Sicol_Session')
#Load Session
self.session = Session()
self.session.load(cookie_value)
#Define Sqlite Connection
if dbconnection:
self.dbconnection = dbconnection
else:
self.dbconnection = dbConnection(cookie_value)
self.execute = self.dbconnection.execute
self.cursor = self.dbconnection.cursor
self.fetch = self.dbconnection.fetch
def check_existing(self, hi_tax, sciname, id_sciname=None):
if not hi_tax:
hi_tax = ''
id_sciname = str(id_sciname)
sciname_data = { 'compound_hitax_sciname': '%s|%s' % (hi_tax, sciname)}
self.execute('check_sciname', sciname_data)
current_id_sciname = str(self.fetch('one'))
self.logger.debug("current id: %s" % (current_id_sciname))
# if nothing was returned from the database, there isn't such sciname
if not current_id_sciname:
self.logger.debug("not found - False")
return False
# if a specific id was passed, it only "exists" if it's another sciname
# or in other words, if the id of the existing item is different from the
# one we are probably updating
if id_sciname:
self.logger.debug("found: %s <> %s = %s" % (id_sciname, current_id_sciname, id_sciname <> current_id_sciname))
return id_sciname <> current_id_sciname
# if id_sciname was not passed and there is such a sciname on the datbase,
# we consider it exists
self.logger.debug("found: True")
return True
def update(self, id_subcoll, id_lang, id_sciname, form):
hi_tax = form.getvalue('higher_taxa_html')
sciname = form.getvalue('sciname_html')
if self.check_existing(hi_tax, sciname, id_sciname):
import exception
raise exception.SicolException (_("Another taxa with that Higher Taxa and Scientific Name combination already exists."))
sciname_data = { 'id_sciname': id_sciname, 'hi_tax': hi_tax, 'sciname': sciname, 'sciname_no_auth': form.getvalue('sciname_no_auth') }
self.execute('update_sciname', sciname_data)
self.execute('delete_sciname_hierarchy', {'id_sciname': id_sciname})
self.insert_hierarchy(id_subcoll, id_lang, id_sciname, form)
def insert(self, id_subcoll, id_lang, form):
hi_tax = form.getvalue('higher_taxa_html')
sciname = form.getvalue('sciname_html')
if self.check_existing(hi_tax, sciname):
import exception
raise exception.SicolException (_("Another taxa with that Higher Taxa and Scientific Name combination already exists."))
self.logger.debug('form: %s' % (str(form)))
#isolates id_taxon_group
id_taxon_group = form.getvalue('taxon_group')
self.logger.debug('id_taxon_group: %s' % (id_taxon_group))
#creates the sciname record
sciname_data = { 'hi_tax': form.getvalue('higher_taxa_html'), 'sciname': form.getvalue('sciname_html'), 'sciname_no_auth': form.getvalue('sciname_no_auth') }
self.logger.debug('sciname_data: %s' % (str(sciname_data)))
self.execute('insert_sciname', sciname_data)
self.execute('last_insert_id')
id_sciname = self.fetch('one')
self.logger.debug('id_sciname: %s' % (str(id_sciname)))
self.insert_hierarchy(id_subcoll, id_lang, id_sciname, form)
return id_sciname
def insert_hierarchy(self, id_subcoll, id_lang, id_sciname, form):
#retrieves all scientific name fields for this subcoll and language
id_taxon_group = form.getvalue('taxon_group')
rows = self.getfields(id_subcoll, id_lang, id_taxon_group)
for row in rows:
row['type_prefix'] = ('sci', 'hi')[row['hi_tax']]
key_value = '%(type_prefix)s_value_%(id_taxon_group)s_%(seq)s' % row
key_author = '%(type_prefix)s_author_%(id_taxon_group)s_%(seq)s' % row
key_hierarchy = '%(type_prefix)s_hierarchy_%(id_taxon_group)s_%(seq)s' % row
self.logger.debug('keys: v:[%s] a:[%s] h:[%s]' % (key_value, key_author, key_hierarchy))
value = form.getvalue(key_value)
author = form.getvalue(key_author)
id_hierarchy = form.getvalue(key_hierarchy)
self.logger.debug('values: v:[%s] a:[%s] h:[%s]' % (value, author, id_hierarchy))
if value:
sciname_detail_data = {
'id_sciname': id_sciname,
'id_hierarchy': id_hierarchy,
'value' : value,
'author' : author
}
self.execute('insert_sciname_hierarchy', sciname_detail_data)
def html(self, id_subcoll, id_lang, id_sciname, id_taxon_group = None, sciname_hierarchy = None):
from labels import label_dict
dict = {}
#retrieves all scientific name fields for this subcoll and language
rows = self.getfields(id_subcoll, id_lang)
#this is the template of items for one taxon group
html_template = '''
<div id='sciname_builder_%(id_taxon_group)s' class='%(css_class)s'>
<p>
<table>
<tbody>
<tr>
<td>
<fieldset>
<legend>%%(label_Species_General_Higher_Taxa)s</legend>
<p><table>%(higher_taxa)s</table></p>
</fieldset>
</td>
<td>
<fieldset>
<legend>%%(label_Species_General_Scientific_Name)s</legend>
<p><table>%(sciname)s</table></p>
</fieldset>
</td>
</tr>
</tbody>
</table>
</p>
</div>'''
#this is the template for each field, the author is optional
html_detail = '''
<tr>
<td><p><label id="label_%(type_prefix)s_value_%(id_taxon_group)s_%(seq)s">%(rank)s</label></p></td>
<td>
<input name='%(type_prefix)s_value_%(id_taxon_group)s_%(seq)s' id='%(type_prefix)s_value_%(id_taxon_group)s_%(seq)s' class='sci_name' value='%(value)s' %(onblur)s onchange='applySciName();'>
<input type='hidden' name='%(type_prefix)s_hierarchy_%(id_taxon_group)s_%(seq)s' value='%(id_hierarchy)s'>
</td>
%(author_html)s
</tr>'''
#when field has author, this template is added to the field template
html_author_detail = '''
<td><p><label if="label_%(type_prefix)s_author_%(id_taxon_group)s_%(seq)s">%(label_Species_General_Author)s</label></p></td>
<td><input name='%(type_prefix)s_author_%(id_taxon_group)s_%(seq)s' id='%(type_prefix)s_author_%(id_taxon_group)s_%(seq)s' class='sci_author' value='%(author)s' onchange='applySciName();'></td>'''
#complete html holder
html_body = []
#holds the json variable that will represent all fields
js_data = {}
#helper html chunks
hitax_html = []
sciname_html = []
#holds the current taxon group for level breaking (below)
current_taxon_group = None
#previous row
prev_row = {}
#for each field found on the database
for row in rows:
# check if new taxon_group
if not current_taxon_group or current_taxon_group <> row['id_taxon_group']:
self.add_sciname_block(dict, html_template, html_body, hitax_html, sciname_html, current_taxon_group, prev_row, id_taxon_group)
# reset variables
hitax_html = []
sciname_html = []
current_taxon_group = row['id_taxon_group']
#prefixes differ for high_taxa and sciname for handling correctly on client-side
row['type_prefix'] = ['sci', 'hi'][row['hi_tax']]
#if sciname_hierarchy was passed, we fill the fields with saved values
id_hierarchy = row['id_hierarchy']
if sciname_hierarchy and id_taxon_group and sciname_hierarchy.has_key(id_hierarchy) and current_taxon_group == id_taxon_group:
this_dict = sciname_hierarchy[id_hierarchy]
row['value'] = this_dict['value']
row['author'] = this_dict['author']
else:
row['value'] = ''
row['author'] = ''
#helper dict of attributes to be added to the field
helper_dict = row.copy()
helper_dict['author_html'] = ''
if (row['required'] == 1):
helper_dict['onblur'] = "onblur='isEmpty(this, null);'"
helper_dict['rank'] += ' *'
else:
helper_dict['onblur'] = ""
#adds author_html to the field html
if (row['has_author'] == 1):
complete_dict = helper_dict.copy()
complete_dict.update(label_dict)
helper_dict['author_html'] = html_author_detail % complete_dict
#appends on the correct html chunk
if row['hi_tax'] == 1:
#higher taxa fieldset group
hitax_html.append(html_detail % helper_dict)
else:
#scientific name fieldset group
sciname_html.append(html_detail % helper_dict)
#variables for being used as key on js_data dict
taxon_group = row['id_taxon_group']
seq = row['seq']
#if the dict doesn't have this taxon_group yet, add it
if not js_data.has_key(taxon_group):
js_data[taxon_group] = {}
#if the dict doesn't have this seq on this taxon_group yet, add it
if not js_data[taxon_group].has_key(seq):
js_data[taxon_group][seq] = {}
#add the field to the correct taxon_group and seq
js_data[taxon_group][seq] = row
#saves previous row for filling the template correctly
prev_row = row.copy()
#last iteration of the loop
self.add_sciname_block(dict, html_template, html_body, hitax_html, sciname_html, current_taxon_group, row, id_taxon_group)
#creates a json variable from the dictionary
js = JsonBuilder.createJson(js_data)
#assembles final HTML block to be rendered
html = "\n".join(html_body) % label_dict
#transfer the javascript and the html to the html template file
template = self.g.read_html('sciname.form') % { 'sciname_builder_js': js, 'sciname_builder_html' : html }
#and returns it
return template
def getfields(self, id_subcoll, id_lang, id_taxon_group = None):
#sciname data
data = { 'id_subcoll': id_subcoll, 'id_lang': id_lang, 'taxon_group': ' ' }
if id_taxon_group:
data['taxon_group'] = 'AND id_taxon_group=%s' % (id_taxon_group)
#retrieves all scientific name fields for this subcoll and language
self.execute('get_sciname_builder_fields', data, raw_mode=True)
return self.fetch('all')
def add_sciname_block(self, dict, html_template, html_body, hitax_html, sciname_html, current_taxon_group, field, id_taxon_group = None):
if current_taxon_group:
#when loading the page, if has loaded data, we'll display the sciname builder
#for that taxon group by default
dict['css_class'] = 'sciname_builder'
if id_taxon_group and id_taxon_group == current_taxon_group:
dict['css_class'] = 'sciname_builder_current'
dict['id_taxon_group'] = field['id_taxon_group']
dict['higher_taxa'] = "\n".join(hitax_html)
dict['sciname'] = "\n".join(sciname_html)
self.logger.debug("Append [%s]\n<= [%s]" % (html_template, dict))
html_body.append(html_template % dict)
| cria/microSICol | py/modules/sciname.py | Python | gpl-2.0 | 13,168 |
"""
Contains a class to evaluate python code and return True or False
"""
__author__ = "Stephen Henrie"
__date__ = "04/13/2013"
from ndg.xacml.core.context.exceptions import XacmlContextTypeError
from ndg.xacml.core.functions import (AbstractFunction, FunctionClassFactoryInterface)
from ndg.xacml.core.attributevalue import (AttributeValue,
AttributeValueClassFactory)
from ndg.xacml.utils import TypedList as Bag
from pyon.core.governance.governance_dispatcher import GovernanceDispatcher
from pyon.util.execute import execute_method
from pyon.util.log import log
class EvaluateCode(AbstractFunction):
"""Generic equal function for all types
@cvar TYPE: attribute type for the given implementation. Derived classes
should set appropriately
@type TYPE: NoneType
"""
FUNCTION_NS = 'urn:oasis:names:tc:xacml:ooi:function:evaluate-code'
ATTRIB1_TYPE = basestring
ATTRIB2_TYPE = dict
def evaluate(self, *inputs):
"""Match input attribute values
@param attribute1: a segment of code to evaluate
@type attribute1: ndg.xacml.core.attributevalue.AttributeValue derived
@param attribute2: a dict with the message parameters
@type attribute2: ndg.xacml.core.attributevalue.AttributeValue derived
@return: True if code evaluates to True, False otherwise
@rtype: bool
"""
error_msg = ''
eval_code = inputs[0]
if not isinstance(eval_code, AttributeValue) and not isinstance(eval_code.elementType, self.__class__.ATTRIB1_TYPE):
raise XacmlContextTypeError('Expecting %r derived type for '
'"attribute1"; got %r' %
(self.__class__.ATTRIB1_TYPE,
type(eval_code)))
if isinstance(inputs[1], Bag):
parameter_dict = inputs[1][0]
else:
parameter_dict = inputs[1]
if not isinstance(parameter_dict, AttributeValue) and not isinstance(parameter_dict.elementType, self.__class__.ATTRIB2_TYPE):
raise XacmlContextTypeError('Expecting %r derived type for '
'"attribute2"; got %r' %
(self.__class__.ATTRIB2_TYPE,
type(parameter_dict)))
try:
exec eval_code.value
pref = locals()["policy_func"]
ret_val, error_msg = pref(process=parameter_dict.value['process'], message=parameter_dict.value['message'], headers=parameter_dict.value['headers'])
if not ret_val:
parameter_dict.value['annotations'][GovernanceDispatcher.POLICY__STATUS_REASON_ANNOTATION] = error_msg
except Exception, e:
log.exception(e)
ret_val = False
parameter_dict.value['annotations'][GovernanceDispatcher.POLICY__STATUS_REASON_ANNOTATION] = e.message
return ret_val
class EvaluateFunction(AbstractFunction):
"""Generic equal function for all types
@cvar TYPE: attribute type for the given implementation. Derived classes
should set appropriately
@type TYPE: NoneType
"""
FUNCTION_NS = 'urn:oasis:names:tc:xacml:ooi:function:evaluate-function'
ATTRIB1_TYPE = basestring
ATTRIB2_TYPE = dict
def evaluate(self, *inputs):
"""Match input attribute values
@param attribute1: the name of a function to execute
@type attribute1: ndg.xacml.core.attributevalue.AttributeValue derived
@param attribute2: an object where the function is located
@type attribute2: ndg.xacml.core.attributevalue.AttributeValue derived
@param attribute3: an optional dict with the message parameters
@type attribute3: ndg.xacml.core.attributevalue.AttributeValue derived
@return: True if code evaluates to True, False otherwise
@rtype: bool
"""
error_msg = ''
function_name = inputs[0]
if not isinstance(function_name, AttributeValue) and not isinstance(function_name.elementType, self.__class__.ATTRIB1_TYPE):
raise XacmlContextTypeError('Expecting %r derived type for '
'"attribute1"; got %r' %
(self.__class__.ATTRIB1_TYPE,
type(function_name)))
if isinstance(inputs[1], Bag):
parameter_dict = inputs[1][0]
else:
parameter_dict = inputs[1]
if not isinstance(parameter_dict, AttributeValue) and not isinstance(parameter_dict.elementType, self.__class__.ATTRIB2_TYPE):
raise XacmlContextTypeError('Expecting %r derived type for '
'"attribute2"; got %r' %
(self.__class__.ATTRIB2_TYPE,
type(parameter_dict)))
try:
ret_val, error_msg = execute_method(execution_object=parameter_dict.value['process'], method_name=function_name.value, **parameter_dict.value)
if not ret_val:
parameter_dict.value['annotations'][GovernanceDispatcher.POLICY__STATUS_REASON_ANNOTATION] = error_msg
except Exception, e:
log.exception(e)
ret_val = False
parameter_dict.value['annotations'][GovernanceDispatcher.POLICY__STATUS_REASON_ANNOTATION] = e.message
return ret_val
class FunctionClassFactory(FunctionClassFactoryInterface):
"""Class Factory for and XACML function class
@cvar FUNCTION_NS: URN for and function
@type FUNCTION_NS: string
"""
def __call__(self, identifier):
'''Create class for the And XACML function identifier
@param identifier: XACML and function identifier
@type identifier: basestring
@return: and function class or None if identifier doesn't match
@rtype: ndg.xacml.core.functions.v1.and.And / NoneType
'''
if identifier == EvaluateCode.FUNCTION_NS:
return EvaluateCode
elif identifier == EvaluateFunction.FUNCTION_NS:
return EvaluateCode
else:
return None
| ooici/pyon | pyon/core/governance/policy/evaluate.py | Python | bsd-2-clause | 6,287 |
import subprocess
def exists(env):
return True
def generate(env):
env.Tool('python')
env['NUMPYINCLUDES'] = env.Dir(
subprocess.check_output([
env.subst('$PYTHON'),
'-c',
"import numpy; print(numpy.get_include())"
])
.decode()
.strip()
)
| ricotabor/opendrop | site_scons/site_tools/numpy.py | Python | gpl-2.0 | 327 |
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# netmon directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, 'netmon'))
execute_from_command_line(sys.argv)
| Landver/netmon | manage.py | Python | mit | 1,026 |
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import getpass
import grp
import os
import pwd
import pytest
import time
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfS3, SkipIfIsilon
from tests.util.filesystem_utils import WAREHOUSE, IS_DEFAULT_FS, get_fs_path
@SkipIfS3.insert
class TestInsertBehaviour(ImpalaTestSuite):
"""Tests for INSERT behaviour that isn't covered by checking query results"""
TEST_DB_NAME = "insert_empty_result_db"
def setup_method(self, method):
# cleanup and create a fresh test database
if method.__name__ == "test_insert_select_with_empty_resultset":
self.cleanup_db(self.TEST_DB_NAME)
self.execute_query("create database if not exists {0} location '{1}/{0}.db'"
.format(self.TEST_DB_NAME, WAREHOUSE))
def teardown_method(self, method):
if method.__name__ == "test_insert_select_with_empty_resultset":
self.cleanup_db(self.TEST_DB_NAME)
@pytest.mark.execute_serially
def test_insert_removes_staging_files(self):
TBL_NAME = "insert_overwrite_nopart"
insert_staging_dir = ("test-warehouse/functional.db/%s/"
"_impala_insert_staging" % TBL_NAME)
self.hdfs_client.delete_file_dir(insert_staging_dir, recursive=True)
self.client.execute(("INSERT OVERWRITE functional.%s"
" SELECT int_col FROM functional.tinyinttable" % TBL_NAME))
ls = self.hdfs_client.list_dir(insert_staging_dir)
assert len(ls['FileStatuses']['FileStatus']) == 0
@pytest.mark.execute_serially
def test_insert_preserves_hidden_files(self):
"""Test that INSERT OVERWRITE preserves hidden files in the root table directory"""
TBL_NAME = "insert_overwrite_nopart"
table_dir = "test-warehouse/functional.db/%s/" % TBL_NAME
hidden_file_locations = [".hidden", "_hidden"]
dir_locations = ["dir", ".hidden_dir"]
for dir_ in dir_locations:
self.hdfs_client.make_dir(table_dir + dir_)
for file_ in hidden_file_locations:
self.hdfs_client.create_file(table_dir + file_, '', overwrite=True)
self.client.execute(("INSERT OVERWRITE functional.%s"
" SELECT int_col FROM functional.tinyinttable" % TBL_NAME))
for file_ in hidden_file_locations:
try:
self.hdfs_client.get_file_dir_status(table_dir + file_)
except:
err_msg = "Hidden file '%s' was unexpectedly deleted by INSERT OVERWRITE"
pytest.fail(err_msg % (table_dir + file_))
for dir_ in dir_locations:
try:
self.hdfs_client.get_file_dir_status(table_dir + file_)
except:
err_msg = "Directory '%s' was unexpectedly deleted by INSERT OVERWRITE"
pytest.fail(err_msg % (table_dir + dir_))
@pytest.mark.execute_serially
def test_insert_alter_partition_location(self):
"""Test that inserts after changing the location of a partition work correctly,
including the creation of a non-existant partition dir"""
PART_DIR = "tmp/test_insert_alter_partition_location"
QUALIFIED_PART_DIR = get_fs_path('/' + PART_DIR)
TBL_NAME = "functional.insert_alter_partition_location"
self.execute_query_expect_success(self.client, "DROP TABLE IF EXISTS %s" % TBL_NAME)
self.hdfs_client.delete_file_dir(PART_DIR, recursive=True)
self.execute_query_expect_success(self.client,
"CREATE TABLE %s (c int) PARTITIONED BY (p int)" % TBL_NAME)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s ADD PARTITION(p=1)" % TBL_NAME)
self.execute_query_expect_success(self.client,
"ALTER TABLE %s PARTITION(p=1) SET LOCATION '%s'" %
(TBL_NAME, QUALIFIED_PART_DIR))
self.execute_query_expect_success(self.client,
"INSERT OVERWRITE %s PARTITION(p=1) VALUES(1)" % TBL_NAME)
result = self.execute_query_expect_success(self.client,
"SELECT COUNT(*) FROM %s" % TBL_NAME)
assert int(result.get_data()) == 1
# Should have created the partition dir, which should contain exactly one file (not in
# a subdirectory)
ls = self.hdfs_client.list_dir(PART_DIR)
assert len(ls['FileStatuses']['FileStatus']) == 1
@SkipIfIsilon.hdfs_acls
@pytest.mark.xfail(run=False, reason="Fails intermittently on test clusters")
@pytest.mark.execute_serially
def test_insert_inherit_acls(self):
"""Check that ACLs are inherited when we create new partitions"""
ROOT_ACL = "default:group:dummy_group:rwx"
TEST_ACL = "default:group:impala_test_users:r-x"
def check_has_acls(part, acl=TEST_ACL):
path = "test-warehouse/functional.db/insert_inherit_acls/" + part
result = self.hdfs_client.getacl(path)
assert acl in result['AclStatus']['entries']
# Add a spurious ACL to functional.db directory
self.hdfs_client.setacl("test-warehouse/functional.db", ROOT_ACL)
self.execute_query_expect_success(self.client, "DROP TABLE IF EXISTS"
" functional.insert_inherit_acls")
self.execute_query_expect_success(self.client, "CREATE TABLE "
"functional.insert_inherit_acls (col int)"
" PARTITIONED BY (p1 int, p2 int, p3 int)")
# Check that table creation inherited the ACL
check_has_acls("", ROOT_ACL)
self.execute_query_expect_success(self.client, "ALTER TABLE "
"functional.insert_inherit_acls ADD PARTITION"
"(p1=1, p2=1, p3=1)")
check_has_acls("p1=1", ROOT_ACL)
check_has_acls("p1=1/p2=1", ROOT_ACL)
check_has_acls("p1=1/p2=1/p3=1", ROOT_ACL)
self.hdfs_client.setacl(
"test-warehouse/functional.db/insert_inherit_acls/p1=1/", TEST_ACL)
self.execute_query_expect_success(self.client, "INSERT INTO "
"functional.insert_inherit_acls "
"PARTITION(p1=1, p2=2, p3=2) VALUES(1)")
check_has_acls("p1=1/p2=2/")
check_has_acls("p1=1/p2=2/p3=2")
# Check that SETACL didn't cascade down to children (which is more to do with HDFS
# than Impala, but worth asserting here)
check_has_acls("p1=1/p2=1", ROOT_ACL)
check_has_acls("p1=1/p2=1/p3=1", ROOT_ACL)
# Change ACLs on p1=1,p2=2 and create a new leaf at p3=30
self.hdfs_client.setacl(
"test-warehouse/functional.db/insert_inherit_acls/p1=1/p2=2/",
"default:group:new_leaf_group:-w-")
self.execute_query_expect_success(self.client, "INSERT INTO "
"functional.insert_inherit_acls "
"PARTITION(p1=1, p2=2, p3=30) VALUES(1)")
check_has_acls("p1=1/p2=2/p3=30", "default:group:new_leaf_group:-w-")
@SkipIfIsilon.hdfs_acls
def test_insert_file_permissions(self):
"""Test that INSERT correctly respects file permission (minimum ACLs)"""
TBL = "functional.insert_acl_permissions"
TBL_PATH = "test-warehouse/functional.db/insert_acl_permissions"
INSERT_QUERY = "INSERT INTO %s VALUES(1)" % TBL
self.execute_query_expect_success(self.client, "DROP TABLE IF EXISTS"
" functional.insert_acl_permissions")
self.execute_query_expect_success(self.client, "CREATE TABLE "
"functional.insert_acl_permissions (col int) ")
# Check that a simple insert works
self.execute_query_expect_success(self.client, INSERT_QUERY)
# Remove the permission to write and confirm that INSERTs won't work
self.hdfs_client.setacl(TBL_PATH, "user::r-x,group::r-x,other::r-x")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
self.execute_query_expect_failure(self.client, INSERT_QUERY)
# Now add group access, still should fail (because the user will match and take
# priority)
self.hdfs_client.setacl(TBL_PATH, "user::r-x,group::rwx,other::r-x")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
self.execute_query_expect_failure(self.client, INSERT_QUERY)
# Now make the target directory non-writable with posix permissions, but writable with
# ACLs (ACLs should take priority). Note: chmod affects ACLs (!) so it has to be done
# first.
self.hdfs_client.chmod(TBL_PATH, "000")
self.hdfs_client.setacl(TBL_PATH, "user::rwx,group::r-x,other::r-x")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
self.execute_query_expect_success(self.client, INSERT_QUERY)
# Finally, change the owner
self.hdfs_client.chown(TBL_PATH, "another_user", "another_group")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
# Should be unwritable because 'other' ACLs don't allow writes
self.execute_query_expect_failure(self.client, INSERT_QUERY)
# Give write perms back to 'other'
self.hdfs_client.setacl(TBL_PATH, "user::rwx,group::r-x,other::rwx")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
# Should be writable because 'other' ACLs allow writes
self.execute_query_expect_success(self.client, INSERT_QUERY)
@SkipIfIsilon.hdfs_acls
def test_insert_acl_permissions(self):
"""Test that INSERT correctly respects ACLs"""
TBL = "functional.insert_acl_permissions"
TBL_PATH = "test-warehouse/functional.db/insert_acl_permissions"
INSERT_QUERY = "INSERT INTO %s VALUES(1)" % TBL
self.execute_query_expect_success(self.client, "DROP TABLE IF EXISTS"
" functional.insert_acl_permissions")
self.execute_query_expect_success(self.client, "CREATE TABLE "
"functional.insert_acl_permissions (col int) ")
# Check that a simple insert works
self.execute_query_expect_success(self.client, INSERT_QUERY)
USER = getpass.getuser()
GROUP = grp.getgrgid(pwd.getpwnam(USER).pw_gid).gr_name
# First, change the owner to someone other than user who runs impala service
self.hdfs_client.chown(TBL_PATH, "another_user", GROUP)
# Remove the permission to write and confirm that INSERTs won't work
self.hdfs_client.setacl(TBL_PATH,
"user::r-x,user:" + USER + ":r-x,group::r-x,other::r-x")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
self.execute_query_expect_failure(self.client, INSERT_QUERY)
# Add the permission to write. if we're not the owner of the file, INSERTs should work
self.hdfs_client.setacl(TBL_PATH,
"user::r-x,user:" + USER + ":rwx,group::r-x,other::r-x")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
self.execute_query_expect_success(self.client, INSERT_QUERY)
# Now add group access, still should fail (because the user will match and take
# priority)
self.hdfs_client.setacl(TBL_PATH,
"user::r-x,user:" + USER + ":r-x,group::rwx,other::r-x")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
self.execute_query_expect_failure(self.client, INSERT_QUERY)
# Check that the mask correctly applies to the anonymous group ACL
self.hdfs_client.setacl(TBL_PATH, "user::r-x,group::rwx,other::rwx,mask::r--")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
# Should be unwritable because mask applies to unnamed group and disables writing
self.execute_query_expect_failure(self.client, INSERT_QUERY)
# Check that the mask correctly applies to the named user ACL
self.hdfs_client.setacl(TBL_PATH, "user::r-x,user:" + USER +
":rwx,group::r-x,other::rwx,mask::r--")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
# Should be unwritable because mask applies to named user and disables writing
self.execute_query_expect_failure(self.client, INSERT_QUERY)
# Now make the target directory non-writable with posix permissions, but writable with
# ACLs (ACLs should take priority). Note: chmod affects ACLs (!) so it has to be done
# first.
self.hdfs_client.chmod(TBL_PATH, "000")
self.hdfs_client.setacl(TBL_PATH, "user::rwx,user:foo:rwx,group::rwx,other::r-x")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
self.execute_query_expect_success(self.client, INSERT_QUERY)
# Finally, change the owner/group
self.hdfs_client.chown(TBL_PATH, "test_user", "invalid")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
# Should be unwritable because 'other' ACLs don't allow writes
self.execute_query_expect_failure(self.client, INSERT_QUERY)
# Give write perms back to 'other'
self.hdfs_client.setacl(TBL_PATH, "user::r-x,user:foo:rwx,group::r-x,other::rwx")
self.execute_query_expect_success(self.client,
"REFRESH functional.insert_acl_permissions")
# Should be writable because 'other' ACLs allow writes
self.execute_query_expect_success(self.client, INSERT_QUERY)
@SkipIfIsilon.hdfs_acls
def test_load_permissions(self):
# We rely on test_insert_acl_permissions() to exhaustively check that ACL semantics
# are correct. Here we just validate that LOADs can't be done when we cannot read from
# or write to the src directory, or write to the dest directory.
TBL = "functional.load_acl_permissions"
TBL_PATH = "test-warehouse/functional.db/load_acl_permissions"
FILE_PATH = "tmp/impala_load_test"
FILE_NAME = "%s/impala_data_file" % FILE_PATH
LOAD_FILE_QUERY = "LOAD DATA INPATH '/%s' INTO TABLE %s" % (FILE_NAME, TBL)
LOAD_DIR_QUERY = "LOAD DATA INPATH '/%s' INTO TABLE %s" % (FILE_PATH, TBL)
self.hdfs_client.make_dir(FILE_PATH)
self.hdfs_client.setacl(FILE_PATH, "user::rwx,group::rwx,other::---")
self.execute_query_expect_success(self.client, "DROP TABLE IF EXISTS"
" functional.load_acl_permissions")
self.execute_query_expect_success(self.client, "CREATE TABLE "
"functional.load_acl_permissions (col int)")
self.hdfs_client.delete_file_dir(FILE_NAME)
self.hdfs_client.create_file(FILE_NAME, "1")
self.execute_query_expect_success(self.client, LOAD_FILE_QUERY)
# Now remove write perms from the source directory
self.hdfs_client.create_file(FILE_NAME, "1")
self.hdfs_client.setacl(FILE_PATH, "user::---,group::---,other::---")
self.hdfs_client.setacl(TBL_PATH, "user::rwx,group::r-x,other::r-x")
self.execute_query_expect_success(self.client,
"REFRESH functional.load_acl_permissions")
self.execute_query_expect_failure(self.client, LOAD_FILE_QUERY)
self.execute_query_expect_failure(self.client, LOAD_DIR_QUERY)
# Remove write perms from target
self.hdfs_client.setacl(FILE_PATH, "user::rwx,group::rwx,other::rwx")
self.hdfs_client.setacl(TBL_PATH, "user::r-x,group::r-x,other::r-x")
self.execute_query_expect_success(self.client,
"REFRESH functional.load_acl_permissions")
self.execute_query_expect_failure(self.client, LOAD_FILE_QUERY)
self.execute_query_expect_failure(self.client, LOAD_DIR_QUERY)
# Finally remove read perms from file itself
self.hdfs_client.setacl(FILE_NAME, "user::-wx,group::rwx,other::rwx")
self.hdfs_client.setacl(TBL_PATH, "user::rwx,group::rwx,other::rwx")
self.execute_query_expect_success(self.client,
"REFRESH functional.load_acl_permissions")
self.execute_query_expect_failure(self.client, LOAD_FILE_QUERY)
# We expect this to succeed, it's not an error if all files in the dir cannot be read
self.execute_query_expect_success(self.client, LOAD_DIR_QUERY)
@pytest.mark.execute_serially
def test_insert_select_with_empty_resultset(self):
"""Test insert/select query won't trigger partition directory or zero size data file
creation if the resultset of select is empty."""
def check_path_exists(path, should_exist):
fail = None
try:
self.hdfs_client.get_file_dir_status(path)
if not should_exist:
pytest.fail("file/dir '%s' unexpectedly exists" % path)
except Exception, e:
if should_exist:
pytest.fail("file/dir '%s' does not exist" % path)
db_path = "test-warehouse/%s.db/" % self.TEST_DB_NAME
table_path = db_path + "test_insert_empty_result"
partition_path = "{0}/year=2009/month=1".format(table_path)
check_path_exists(table_path, False)
table_name = self.TEST_DB_NAME + ".test_insert_empty_result"
self.execute_query_expect_success(self.client, ("CREATE TABLE %s (id INT, col INT)"
" PARTITIONED BY (year INT, month INT)" % table_name))
check_path_exists(table_path, True)
check_path_exists(partition_path, False)
# Run an insert/select stmt that returns an empty resultset.
insert_query = ("INSERT INTO TABLE {0} PARTITION(year=2009, month=1)"
"select 1, 1 from {0} LIMIT 0".format(table_name))
self.execute_query_expect_success(self.client, insert_query)
# Partition directory should not be created
check_path_exists(partition_path, False)
# Insert one record
insert_query_one_row = ("INSERT INTO TABLE %s PARTITION(year=2009, month=1) "
"values(2, 2)" % table_name)
self.execute_query_expect_success(self.client, insert_query_one_row)
# Partition directory should be created with one data file
check_path_exists(partition_path, True)
ls = self.hdfs_client.list_dir(partition_path)
assert len(ls['FileStatuses']['FileStatus']) == 1
# Run an insert/select statement that returns an empty resultset again
self.execute_query_expect_success(self.client, insert_query)
# No new data file should be created
new_ls = self.hdfs_client.list_dir(partition_path)
assert len(new_ls['FileStatuses']['FileStatus']) == 1
assert new_ls['FileStatuses'] == ls['FileStatuses']
# Run an insert overwrite/select that returns an empty resultset
insert_query = ("INSERT OVERWRITE {0} PARTITION(year=2009, month=1)"
" select 1, 1 from {0} LIMIT 0".format(table_name))
self.execute_query_expect_success(self.client, insert_query)
# Data file should be deleted
new_ls2 = self.hdfs_client.list_dir(partition_path)
assert len(new_ls2['FileStatuses']['FileStatus']) == 0
assert new_ls['FileStatuses'] != new_ls2['FileStatuses']
# Test for IMPALA-2008 insert overwrite to an empty table with empty dataset
empty_target_tbl = "test_overwrite_with_empty_target"
create_table = "create table %s.%s (id INT, col INT)" % (self.TEST_DB_NAME,
empty_target_tbl)
self.execute_query_expect_success(self.client, create_table)
insert_query = ("INSERT OVERWRITE {0}.{1} select 1, 1 from {0}.{1} LIMIT 0"
.format(self.TEST_DB_NAME, empty_target_tbl))
self.execute_query_expect_success(self.client, insert_query)
# Delete target table directory, query should fail with
# "No such file or directory" error
target_table_path = "%s%s" % (db_path, empty_target_tbl)
self.hdfs_client.delete_file_dir(target_table_path, recursive=True)
self.execute_query_expect_failure(self.client, insert_query)
@SkipIfIsilon.hdfs_acls
def test_multiple_group_acls(self):
"""Test that INSERT correctly respects multiple group ACLs"""
TBL = "functional.insert_group_acl_permissions"
TBL_PATH = "test-warehouse/functional.db/insert_group_acl_permissions"
INSERT_QUERY = "INSERT INTO %s VALUES(1)" % TBL
self.execute_query_expect_success(self.client, "DROP TABLE IF EXISTS " + TBL)
self.execute_query_expect_success(self.client, "CREATE TABLE %s (col int)" % TBL)
USER = getpass.getuser()
TEST_USER = "test_user"
# Get the list of all groups of USER except the user's owning group.
OWNINGROUP = grp.getgrgid(pwd.getpwnam(USER).pw_gid).gr_name
GROUPS = [g.gr_name for g in grp.getgrall() if USER in g.gr_mem]
if (len(GROUPS) < 1):
pytest.xfail(reason="Cannot run test, user belongs to only one group.")
# First, change the owner to someone other than user who runs impala service
self.hdfs_client.chown(TBL_PATH, "another_user", OWNINGROUP)
# Set two group ACLs, one contains requested permission, the other doesn't.
self.hdfs_client.setacl(TBL_PATH,
"user::r-x,user:{0}:r-x,group::---,group:{1}:rwx,other::r-x"
.format(TEST_USER, GROUPS[0]))
self.execute_query_expect_success(self.client, "REFRESH " + TBL)
self.execute_query_expect_success(self.client, INSERT_QUERY)
# Two group ACLs but with mask to deny the permission.
self.hdfs_client.setacl(TBL_PATH,
"user::r-x,group::r--,group:{0}:rwx,mask::r-x,other::---".format(GROUPS[0]))
self.execute_query_expect_success(self.client, "REFRESH " + TBL)
self.execute_query_expect_failure(self.client, INSERT_QUERY)
| scalingdata/Impala | tests/query_test/test_insert_behaviour.py | Python | apache-2.0 | 22,376 |
"""
created 09/05/17
For executation of the kallisto quantification step
To be run with three arguements
* basedir - top level output directory
* input directory - contains folders with .fastq.gz files
* max_threads - how many threads to allocate to kallisto
Returns kallisto quantifications and associated log files to a directory
within the top level output dir.
An example pair of files is:
25uM_1_R1_trimmed_1P.fastq.gz
25uM_1_R1_trimmed_2P.fastq.gz
Outputs kallisto files for each read pair and
associated log files in a nested directory
"""
# --- packages
import os
import sys
from subprocess import call
# --- variables using sys.argv
basedir = sys.argv[1]
inputdirectory = sys.argv[2]
max_threads = sys.argv[3]
processed = basedir + "kallisto/"
# --- functions
def kallisto_call(read1):
"""
l is the lock object
read1 is the forward read
calls kallisto quant for the read pair specified by the arguements
Rewrite this to be more specific for a single read pair
so it can be parallelised
also review how to actually do this... current way does not seem to.
"""
dividing = read1.split(".")
basename = dividing[0].replace("_1P", "")
read2 = read1.replace("1P", "2P")
call(
"kallisto quant -i " + basedir +
"transcriptome_kallisto.idx -t " +
max_threads + " -o " + processed + basename + " -b 100 " +
inputdirectory + read1 + " " + inputdirectory + read2, shell=True)
# --- __main__ call
if __name__ == "__main__":
# --- check dirs and create if neccessary
if not os.path.exists(processed):
os.makedirs(processed)
# --- create list of read1 pair file names
read_list = []
for fname in os.listdir(inputdirectory):
if "1P" in fname:
read_list.append(fname)
# --- call kallisto_call on each read pair in parallel
for read in read_list:
kallisto_call(read)
| samleenz/rnaseq_pipe | kallisto_quant.py | Python | mit | 1,936 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lunchy.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| chlab/lunchy | manage.py | Python | mit | 249 |
#!/usr/bin/env python2.7
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
from matplotlib.colors import SymLogNorm #PowerNorm
from shapely.geometry import Polygon, MultiPolygon, Point
#import seaborn as sns
# --------------------------------------------------------------------------------
def make_map(ax_0, cru_data, clipping, cticks, title, **kargs):
# extract coordinate information
lat = cru_data["lat"]
lon = cru_data["lon"]
data_var = cru_data['value']
data_units = cru_data['meta']['units']
# now create a global map canvas to plot on
globe_map = Basemap(llcrnrlon=-120, llcrnrlat=-40, \
urcrnrlon=max(lon), urcrnrlat=40, \
resolution='l', projection='cyl', \
lat_0=0, lon_0=0, ax=ax_0)
# draw spatial extras to denote land and sea
sea_color = 'dimgray'
globe_map.drawmapboundary(fill_color=sea_color)
globe_map.drawcoastlines(color='black', linewidth=0.5)
globe_map.fillcontinents(color='lightgray', lake_color=sea_color, zorder=0)
globe_map.drawparallels(np.arange(-90, 90, 20), color='grey', labels=[1, 0, 0, 0])
globe_map.drawmeridians(np.arange(0, 360, 30), color='grey', labels=[0, 0, 0, 1])
# compute map proj coordinates
lons, lats = np.meshgrid(lon, lat)
x, y = globe_map(lons, lats)
# plot data on the map
cs = globe_map.contourf(x, y, data_var, **kargs)
# add a colorbar
cbar = globe_map.colorbar(cs, location='right', pad="2%", size="2%")
cbar.set_label(data_units)
cbar.set_ticks(cticks)
# Title
ax_0.set_title(title, fontsize=12)
sav_geom = PathPatch(clipping, transform=ax_0.transData)
# Clip and Rasterize the contour collections
for contour in cs.collections:
contour.set_clip_path(sav_geom)
contour.set_rasterized(True)
return globe_map
# --------------------------------------------------------------------------------
def pickle3_load(bin_file):
"""
There is some bug with unpacking binary values from pickle objects in
python 3 - this is my temporary fix.
"""
with open(bin_file, 'rb') as f:
u = pickle._Unpickler(f)
u.encoding = 'latin1'
return u.load()
# --------------------------------------------------------------------------------
def main():
get_file = lambda x: "cru_ts3.23.1901.2014.{0}.100mean.pkl".format(x)
# import data
tair_data = pickle.load(open(FILEPATH + get_file('tmp'), 'rb'))
rain_data = pickle.load(open(FILEPATH + get_file('pre'), 'rb'))
# import savanna bioregion geometry
sav_geom = pickle.load(open(PATCHPATH, 'rb'))
# convert vertices to multipolygon object
#sav_poly = MultiPolygon([Polygon(pl) for pl in sav_geom['poly']])
sav_poly = [Polygon(pl) for pl in sav_geom['poly']]
# transform geo-coords values to grids (same dim as the climate data)
lat_grid, lon_grid = np.meshgrid(tair_data['lon'], tair_data['lat'])
# flatten all the data to store as a pandas dataframe so we can use with
# seaborn graphics
tair = tair_data['value'].flatten()
# ** scale rainfall up to an annual value (mm/month -> mm/year)
rain = rain_data['value'].flatten() * 12
lons = lon_grid.flatten()
lats = lat_grid.flatten()
# create labels to differentiate savanna from global points
output = []
# >> slow as hell
# for i, (gx, gy) in enumerate(zip(lons, lats)):
# print "percent: {0:.6f}".format(float(i)/float(len(lons)))
# for j, sp in enumerate(sav_poly):
# if Point(gx, gy).contains(sp):
# output.append("savanna")
# >> faster but still way too slow
# for i, (gx, gy) in enumerate(zip(lons, lats)):
# print "percent: {0:.6f}".format(float(i)/float(len(lons)))
# if sav_geom['clip'].contains_point((gx, gy)):
# output.append("savanna")
moo = sav_geom['clip'].contains_points(zip(lons, lats))
print(len(output))
return 1
print(len(tair))
print(len())
fig = plt.figure(figsize=(10, 6), frameon=False)
plt.plot(rain*12, tair, 'o', color='gray', markeredgecolor='none', alpha=0.2)
plt.xlim([1e-1, 2e4])
plt.ylim([-35, 35])
plt.xscale('log')
#plt.show()
if __name__ == "__main__":
FILEPATH = os.path.expanduser("~/Work/Research_Work/Climatologies/CRU/CRU_TS3/")
IMAGEPATH = os.path.expanduser("~/Work/Research_Work/GiS_Data/Images/blue_marble/noaa_world_topo_bathymetric_lg.jpg")
PATCHPATH = os.path.expanduser("~/Savanna/Data/GiS/Savanna_Bioregion_Path.pkl")
SAVEPATH = os.path.expanduser("~/Work/Research_Work/Working_Publications/Savannas/SavReview/figures/Fig1_globalsav.pdf")
# PFTS that <broadly> define/encompass global savannas
PFTS = ["Tropical moist deciduous forest", \
"Tropical dry forest", \
"Subtropical dry forest", \
"Tropical shrubland"]
MAPCOLOR = 'viridis'
main()
| rhyswhitley/spatial_plots | src/climate_space.py | Python | cc0-1.0 | 5,036 |
import json
from os.path import join, dirname, exists, relpath
import os
import traceback
import logging
from werkzeug.utils import secure_filename
from flask import request, abort, jsonify
from datashape import Mono, discover
from datashape.predicates import iscollection
from blaze.utils import json_dumps
from blaze.server.server import to_tree, from_tree
from blaze import into, compute
from blaze.expr import Expr, Symbol, Selection, Broadcast, symbol
from blaze.expr.parser import exprify
from .app import mbsbp
from .settings import settings
from .errors import ServerException
logger = logging.getLogger(__name__)
@mbsbp.route('/datashape')
def dataset():
return str(discover(settings.datamanager.all_datasets()))
@mbsbp.app_errorhandler(ServerException)
def error(e):
response = jsonify(e.to_dict())
response.status_code = e.status_code
return response
def _compserver(payload):
dataset = settings.datamanager.all_datasets()
ns = payload.get('namespace', dict())
ns[':leaf'] = symbol('leaf', discover(dataset))
expr = from_tree(payload['expr'], namespace=ns)
assert len(expr._leaves()) == 1
leaf = expr._leaves()[0]
try:
result = compute(expr, {leaf: dataset})
except Exception as e:
logger.exception(e)
msg = traceback.format_exc()
raise ServerException(msg, status_code=500)
return expr, result
@mbsbp.route('/compute.json', methods=['POST', 'PUT', 'GET'])
#TODO add read-only authentication checks by parsing the expr graph
def compserver():
if not request.json:
raise ServerException('Expected JSON data', status_code=404)
payload = request.json
expr, result = _compserver(payload)
if iscollection(expr.dshape):
result = into(list, result)
return json.dumps({'datashape': str(expr.dshape),
'names' : expr.fields,
'data': result}, default=json_dumps)
@mbsbp.route("/upload", methods=['POST'])
def upload():
username = settings.auth_backend.current_username()
f = request.files['file']
path = settings.datamanager.data_path(username, f.filename, absolute=True)
if not settings.auth_backend.can_write(path, username):
return abort(403)
if not exists (dirname(path)):
os.makedirs(dirname(path))
f.save(path)
path = settings.datamanager.data_path(username, f.filename, absolute=False)
return jsonify(path=path)
@mbsbp.route("/ls/<username>", methods=['GET'])
@mbsbp.route("/ls", methods=['GET'])
def ls(username=None):
return jsonify(files=settings.datamanager.ls(username=username))
@mbsbp.route("/configure", methods=['POST'])
def configure():
kwargs = request.json['kwargs']
uri = request.json['uri']
delete = request.json.get('_delete', False)
username = settings.auth_backend.current_username()
protocol, fusername, fpath, datapath = settings.datamanager.parse(uri)
complete_path = settings.datamanager.data_path(fusername, fpath)
if not settings.auth_backend.can_write(complete_path, username):
return abort(403)
if delete:
settings.datamanager.delete(uri.encode('utf-8'))
else:
settings.datamanager.configure(uri.encode('utf-8'), **kwargs)
return jsonify(status='success')
| ContinuumIO/multiuserblazeserver | mbs/views.py | Python | bsd-3-clause | 3,284 |
#!/usr/bin/python
# coding: utf-8
class Solution(object):
def networkDelayTime(self, times, N, K):
"""
:type times: List[List[int]]
:type N: int
:type K: int
:rtype: int
"""
graph = collections.defaultdict(list)
for u, v, w in times:
graph[u].append((v, w))
dist = {node: float('inf') for node in xrange(1, N+1)}
seen = [False] * (N+1)
dist[K] = 0
while True:
cand_node = -1
cand_dist = float('inf')
for i in xrange(1, N+1):
if not seen[i] and dist[i] < cand_dist:
cand_dist = dist[i]
cand_node = i
if cand_node < 0: break
seen[cand_node] = True
for nei, d in graph[cand_node]:
dist[nei] = min(dist[nei], dist[cand_node] + d)
ans = max(dist.values())
return ans if ans < float('inf') else -1
def networkDelayTimeFast(self, times, N, K):
graph = collections.defaultdict(list)
for u, v, w in times:
graph[u].append((v, w))
pq = [(0, K)]
dist = {}
while pq:
d, node = heapq.heappop(pq)
if node in dist: continue
dist[node] = d
for nei, d2 in graph[node]:
if nei not in dist:
heapq.heappush(pq, (d+d2, nei))
return max(dist.values()) if len(dist) == N else -1 | Lanceolata/code-problems | python/leetcode_easy/Question_743_Network_Delay_Time.py | Python | mit | 1,483 |
# Copyright 2009-2012, Peter A. Bigot
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain a
# copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functions that support activities related to the Document Object Model."""
import pyxb
import pyxb.namespace
import pyxb.utils.saxutils
import pyxb.utils.saxdom
import xml.dom
import logging
_log = logging.getLogger(__name__)
# The DOM implementation to be used for all processing. Default is whatever
# your Python install uses. If it's minidom, it should work.
__DOMImplementation = xml.dom.getDOMImplementation()
def GetDOMImplementation ():
"""Return the DOMImplementation object used for pyxb operations.
This is primarily used as the default implementation when generating DOM
trees from a binding instance. It defaults to whatever
xml.dom.getDOMImplementation() returns in your installation (often
xml.dom.minidom). It can be overridden with SetDOMImplementation()."""
global __DOMImplementation
return __DOMImplementation
def SetDOMImplementation (dom_implementation):
"""Override the default DOMImplementation object."""
global __DOMImplementation
__DOMImplementation = dom_implementation
return __DOMImplementation
# Unfortunately, the DOMImplementation interface doesn't provide a parser. So
# abstract this in case somebody wants to substitute a different one. Haven't
# decided how to express that yet.
def StringToDOM (text, **kw):
"""Convert string to a DOM instance.
@see: L{pyxb._SetXMLStyle}."""
if pyxb.XMLStyle_minidom == pyxb._XMLStyle:
parser = pyxb.utils.saxutils.make_parser()
return xml.dom.minidom.parseString(text, parser)
import saxdom
return saxdom.parseString(text, **kw)
def NodeAttribute (node, attribute_ncname, attribute_ns=None):
"""Namespace-aware search for an optional attribute in a node.
@param attribute_ncname: The local name of the attribute.
@type attribute_ncname: C{str} or C{unicode}
@keyword attribute_ns: The namespace of the attribute. Defaults to None
since most attributes are not in a namespace. Can be provided as either a
L{pyxb.namespace.Namespace} instance, or a string URI.
@type attribute_ns: C{None} or C{str} or C{unicode} or L{pyxb.namespace.Namespace}
@return: The value of the attribute, or C{None} if the attribute is not
present. (Unless C{None}, the value will always be a (unicode) string.)
"""
ns_uri = attribute_ns
if isinstance(attribute_ns, pyxb.namespace.Namespace):
ns_uri = attribute_ns.uri()
attr = node.getAttributeNodeNS(ns_uri, attribute_ncname)
if attr is None:
return None
return attr.value
def LocateUniqueChild (node, tag, absent_ok=True, namespace=pyxb.namespace.XMLSchema):
"""Locate a unique child of the DOM node.
This function returns the sole child of node which is an ELEMENT_NODE
instance and has a tag consistent with the given tag. If multiple nodes
with a matching C{tag} are found, or C{absent_ok} is C{False} and no
matching tag is found, an exception is raised.
@param node: An a xml.dom.Node ELEMENT_NODE instance
@param tag: the NCName of an element in the namespace
@keyword absent_ok: If C{True} (default), C{None} is returned if no match
can be found. If C{False}, an exception is raised if no match can be
found.
@keyword namespace: The namespace to which the child element belongs.
Default is the XMLSchema namespace.
@rtype: C{xml.dom.Node}
@raise pyxb.SchemaValidationError: multiple elements are identified
@raise pyxb.SchemaValidationError: C{absent_ok} is C{False} and no element is identified.
"""
candidate = None
for cn in node.childNodes:
if (xml.dom.Node.ELEMENT_NODE == cn.nodeType) and namespace.nodeIsNamed(cn, tag):
if candidate:
raise pyxb.SchemaValidationError('Multiple %s elements nested in %s' % (name, node.nodeName))
candidate = cn
if (candidate is None) and not absent_ok:
raise pyxb.SchemaValidationError('Expected %s elements nested in %s' % (name, node.nodeName))
return candidate
def LocateMatchingChildren (node, tag, namespace=pyxb.namespace.XMLSchema):
"""Locate all children of the DOM node that have a particular tag.
This function returns a list of children of node which are ELEMENT_NODE
instances and have a tag consistent with the given tag.
@param node: An a xml.dom.Node ELEMENT_NODE instance.
@param tag: the NCName of an element in the namespace, which defaults to the
XMLSchema namespace.
@keyword namespace: The namespace to which the child element belongs.
Default is the XMLSchema namespace.
@rtype: C{list(xml.dom.Node)}
"""
matches = []
for cn in node.childNodes:
if (xml.dom.Node.ELEMENT_NODE == cn.nodeType) and namespace.nodeIsNamed(cn, tag):
matches.append(cn)
return matches
def LocateFirstChildElement (node, absent_ok=True, require_unique=False, ignore_annotations=True):
"""Locate the first element child of the node.
@param node: An a xml.dom.Node ELEMENT_NODE instance.
@keyword absent_ok: If C{True} (default), C{None} is returned if no match
can be found. If C{False}, an exception is raised if no match can be
found.
@keyword require_unique: If C{False} (default), it is acceptable for there
to be multiple child elements. If C{True}, presence of multiple child
elements raises an exception.
@keyword ignore_annotations: If C{True} (default), annotations are skipped
wheen looking for the first child element. If C{False}, an annotation
counts as an element.
@rtype: C{xml.dom.Node}
@raise SchemaValidationError: C{absent_ok} is C{False} and no child
element was identified.
@raise SchemaValidationError: C{require_unique} is C{True} and multiple
child elements were identified
"""
candidate = None
for cn in node.childNodes:
if xml.dom.Node.ELEMENT_NODE == cn.nodeType:
if ignore_annotations and pyxb.namespace.XMLSchema.nodeIsNamed(cn, 'annotation'):
continue
if require_unique:
if candidate:
raise pyxb.SchemaValidationError('Multiple elements nested in %s' % (node.nodeName,))
candidate = cn
else:
return cn
if (candidate is None) and not absent_ok:
raise pyxb.SchemaValidationError('No elements nested in %s' % (node.nodeName,))
return candidate
def HasNonAnnotationChild (node):
"""Return True iff C{node} has an ELEMENT_NODE child that is not an
XMLSchema annotation node.
@rtype: C{bool}
"""
for cn in node.childNodes:
if (xml.dom.Node.ELEMENT_NODE == cn.nodeType) and (not pyxb.namespace.XMLSchema.nodeIsNamed(cn, 'annotation')):
return True
return False
def ExtractTextContent (node):
"""Walk all the children, extracting all text content and
catenating it into the return value.
Returns C{None} if no text content (including whitespace) is found.
This is mainly used to strip comments out of the content of complex
elements with simple types.
@rtype: C{unicode} or C{str}
"""
text = []
for cn in node.childNodes:
if xml.dom.Node.TEXT_NODE == cn.nodeType:
text.append(cn.data)
elif xml.dom.Node.CDATA_SECTION_NODE == cn.nodeType:
text.append(cn.data)
elif xml.dom.Node.COMMENT_NODE == cn.nodeType:
pass
else:
raise pyxb.BadDocumentError('Non-text node %s found in content' % (cn,))
if 0 == len(text):
return None
return ''.join(text)
class _BDSNamespaceSupport (object):
"""Class holding information relevant to generating the namespace aspects
of a DOM instance."""
# Namespace declarations required on the top element
__namespaces = None
# Integer counter to help generate unique namespace prefixes
__namespacePrefixCounter = None
def defaultNamespace (self):
"""The registered default namespace.
@rtype: L{pyxb.namespace.Namespace}
"""
return self.__defaultNamespace
__defaultNamespace = None
def setDefaultNamespace (self, default_namespace):
"""Set the default namespace for the generated document.
Even if invoked post construction, the default namespace will affect
the entire document, as all namespace declarations are placed in the
document root.
@param default_namespace: The namespace to be defined as the default
namespace in the top-level element of the document. May be provided
as a real namespace, or just its URI.
@type default_namespace: L{pyxb.namespace.Namespace} or C{str} or
C{unicode}.
"""
if self.__defaultNamespace is not None:
del self.__namespaces[self.__defaultNamespace]
if isinstance(default_namespace, basestring):
default_namespace = pyxb.namespace.NamespaceForURI(default_namespace, create_if_missing=True)
if (default_namespace is not None) and default_namespace.isAbsentNamespace():
raise pyxb.UsageError('Default namespace must not be an absent namespace')
self.__defaultNamespace = default_namespace
if self.__defaultNamespace is not None:
self.__namespaces[self.__defaultNamespace] = None
def namespacePrefixMap (self):
"""Return a map from Namespace instances to the prefix by which they
are represented in the DOM document."""
return self.__namespacePrefixMap.copy()
__namespacePrefixMap = None
def declareNamespace (self, namespace, prefix=None, add_to_map=False):
"""Add the given namespace as one to be used in this document.
@param namespace: The namespace to be associated with the document.
@type namespace: L{pyxb.namespace.Namespace}
@keyword prefix: Optional prefix to be used with this namespace. If
not provided, a unique prefix is generated or a standard prefix is
used, depending on the namespace.
@keyword add_to_map: If C{False} (default), the prefix is not added to
the namespace prefix map. If C{True} it is added. (Often, things
added to the prefix map are preserved across resets, which is often
not desired for specific prefix/namespace pairs).
@todo: ensure multiple namespaces do not share the same prefix
@todo: provide default prefix in L{pyxb.namespace.Namespace}
@todo: support multiple prefixes for each namespace
"""
if not isinstance(namespace, pyxb.namespace.Namespace):
raise pyxb.UsageError('declareNamespace: must be given a namespace instance')
if namespace.isAbsentNamespace():
raise pyxb.UsageError('declareNamespace: namespace must not be an absent namespace')
if prefix is None:
prefix = self.__namespaces.get(namespace)
if prefix is None:
prefix = self.__namespacePrefixMap.get(namespace)
if prefix is None:
prefix = namespace.prefix()
if prefix is None:
self.__namespacePrefixCounter += 1
prefix = 'ns%d' % (self.__namespacePrefixCounter,)
if prefix == self.__namespaces.get(namespace):
return prefix
if prefix in self.__prefixes:
raise pyxb.LogicError('Prefix %s is already in use' % (prefix,))
self.__namespaces[namespace] = prefix
self.__prefixes.add(prefix)
#_log.debug('%x declared namespace %s as %s', id(self), namespace, prefix)
if add_to_map:
self.__namespacePrefixMap[namespace] = prefix
return prefix
def namespacePrefix (self, namespace):
"""Return the prefix to be used for the given namespace.
This will L{declare <declareNamespace>} the namespace if it has not
yet been observed.
@param namespace: The namespace for which a prefix is needed. If the
provided namespace is C{None} or an absent namespace, the C{None}
value will be returned as the corresponding prefix.
"""
if (namespace is None) or namespace.isAbsentNamespace():
return None
if isinstance(namespace, basestring):
namespace = pyxb.namespace.NamespaceForURI(namespace, create_if_missing=True)
if not (namespace in self.__namespaces):
return self.declareNamespace(namespace)
return self.__namespaces[namespace]
def namespaces (self):
"""Return the set of Namespace instances known to this instance."""
return self.__namespaces
# Restore the namespace map to its default, which is the undeclared
# namespace for XML schema instances (C{xsi}
def __resetNamespacePrefixMap (self):
self.__namespacePrefixMap = { pyxb.namespace.XMLSchema_instance : 'xsi' }
def reset (self, prefix_map=False):
"""Reset this instance to the state it was when created.
This flushes the list of namespaces for the document. The
defaultNamespace is not modified."""
self.__namespaces = { }
if self.__defaultNamespace is not None:
self.__namespaces[self.__defaultNamespace] = None
self.__prefixes = set()
self.__namespacePrefixCounter = 0
if prefix_map:
self.__resetNamespacePrefixMap()
def __init__ (self, default_namespace=None, namespace_prefix_map=None, inherit_from=None):
"""Create a new namespace declaration configuration.
@keyword default_namespace: Optional L{pyxb.namespace.Namespace}
instance that serves as the default namespace (applies to unqualified
names).
@keyword namespace_prefix_map: Optional map from
L{pyxb.namespace.Namespace} instances to C{str} values that are to be
used as the corresponding namespace prefix when constructing
U{qualified names<http://www.w3.org/TR/1999/REC-xml-names-19990114/#dt-qname>}.
@keyword inherit_from: Optional instance of this class from which
defaults are inherited. Inheritance is overridden by values of other
keywords in the initializer.
"""
self.__prefixes = set()
self.__namespacePrefixCounter = 0
self.__namespaces = { }
self.__defaultNamespace = None
self.__resetNamespacePrefixMap()
if inherit_from is not None:
if default_namespace is None:
default_namespace = inherit_from.defaultNamespace()
self.__namespacePrefixMap.update(inherit_from.__namespacePrefixMap)
self.__namespacePrefixCount = inherit_from.__namespacePrefixCounter
self.__namespaces.update(inherit_from.__namespaces)
self.__prefixes.update(inherit_from.__prefixes)
if default_namespace is not None:
self.setDefaultNamespace(default_namespace)
prefixes = set(self.__namespacePrefixMap.values())
prefixes.update(self.__prefixes)
if namespace_prefix_map is not None:
prefixes = set()
for (ns, pfx) in namespace_prefix_map.items():
ns = pyxb.namespace.NamespaceInstance(ns)
if pfx in prefixes:
raise pyxb.LogicError('Cannot assign same prefix to multiple namespacess: %s' % (pfx,))
prefixes.add(pfx)
self.__namespacePrefixMap[ns] = pfx
class BindingDOMSupport (object):
"""This holds DOM-related information used when generating a DOM tree from
a binding instance."""
def implementation (self):
"""The DOMImplementation object to be used.
Defaults to L{pyxb.utils.domutils.GetDOMImplementation()}, but can be
overridden in the constructor call using the C{implementation}
keyword."""
return self.__implementation
__implementation = None
def document (self):
"""Return the document generated using this instance."""
return self.__document
__document = None
def requireXSIType (self):
"""Indicates whether {xsi:type<http://www.w3.org/TR/xmlschema-1/#xsi_type>} should be added to all elements.
Certain WSDL styles and encodings seem to require explicit notation of
the type of each element, even if it was specified in the schema.
This value can only be set in the constructor."""
return self.__requireXSIType
__requireXSIType = None
def reset (self, **kw):
"""Reset this instance to the state it was when created.
This creates a new root document with no content, and flushes the list
of namespaces for the document. The defaultNamespace and
requireXSIType are not modified."""
self.__document = self.implementation().createDocument(None, None, None)
self.__namespaceSupport.reset(**kw)
@classmethod
def Reset (self, **kw):
"""Reset the global defaults for default/prefix/namespace informmation."""
self.__NamespaceSupport.reset(**kw)
def __init__ (self, implementation=None, default_namespace=None, require_xsi_type=False, namespace_prefix_map=None):
"""Create a new instance used for building a single document.
@keyword implementation: The C{xml.dom} implementation to use.
Defaults to the one selected by L{GetDOMImplementation}.
@keyword default_namespace: The namespace to configure as the default
for the document. If not provided, there is no default namespace.
@type default_namespace: L{pyxb.namespace.Namespace}
@keyword require_xsi_type: If C{True}, an U{xsi:type
<http://www.w3.org/TR/xmlschema-1/#xsi_type>} attribute should be
placed in every element.
@type require_xsi_type: C{bool}
@keyword namespace_prefix_map: A map from pyxb.namespace.Namespace
instances to the preferred prefix to use for the namespace in xmlns
declarations. The default one assigns 'xsi' for the XMLSchema
instance namespace.
@type namespace_prefix_map: C{map} from L{pyxb.namespace.Namespace} to C{str}
@raise pyxb.LogicError: the same prefix is associated with multiple
namespaces in the C{namespace_prefix_map}.
"""
if implementation is None:
implementation = GetDOMImplementation()
self.__implementation = implementation
self.__requireXSIType = require_xsi_type
self.__namespaceSupport = _BDSNamespaceSupport(default_namespace, namespace_prefix_map, inherit_from=self.__NamespaceSupport)
self.reset()
__namespaceSupport = None
__NamespaceSupport = _BDSNamespaceSupport()
# Namespace declarations required on the top element
def defaultNamespace (self):
"""The default namespace for this instance"""
return self.__namespaceSupport.defaultNamespace()
@classmethod
def DefaultNamespace (cls):
"""The global default namespace (used on instance creation if not overridden)"""
return cls.__NamespaceSupport.defaultNamespace()
def setDefaultNamespace (self, default_namespace):
return self.__namespaceSupport.setDefaultNamespace(default_namespace)
@classmethod
def SetDefaultNamespace (cls, default_namespace):
return cls.__NamespaceSupport.setDefaultNamespace(default_namespace)
def declareNamespace (self, namespace, prefix=None):
"""Declare a namespace within this instance only."""
return self.__namespaceSupport.declareNamespace(namespace, prefix, add_to_map=True)
@classmethod
def DeclareNamespace (cls, namespace, prefix=None):
"""Declare a namespace that will be added to each created instance."""
return cls.__NamespaceSupport.declareNamespace(namespace, prefix, add_to_map=True)
def namespacePrefix (self, namespace):
"""Obtain the prefix for the given namespace using this instance's configuration."""
return self.__namespaceSupport.namespacePrefix(namespace)
def namespacePrefixMap (self):
"""Get the map from namespaces to prefixes for this instance"""
return self.__namespaceSupport.namespacePrefixMap().copy()
@classmethod
def NamespacePrefixMap (cls):
"""Get the map of default namespace-to-prefix mappings"""
return cls.__NamespaceSupport.namespacePrefixMap().copy()
def addAttribute (self, element, expanded_name, value):
"""Add an attribute to the given element.
@param element: The element to which the attribute should be added
@type element: C{xml.dom.Element}
@param expanded_name: The name of the attribute. This may be a local
name if the attribute is not in a namespace.
@type expanded_name: L{pyxb.namespace.Namespace} or C{str} or C{unicode}
@param value: The value of the attribute
@type value: C{str} or C{unicode}
"""
name = expanded_name
namespace = None
if isinstance(name, pyxb.namespace.ExpandedName):
name = expanded_name.localName()
namespace = expanded_name.namespace()
prefix = self.namespacePrefix(namespace)
if prefix is not None:
name = '%s:%s' % (prefix, name)
element.setAttributeNS(namespace, name, value)
def finalize (self):
"""Do the final cleanup after generating the tree. This makes sure
that the document element includes XML Namespace declarations for all
namespaces referenced in the tree.
@return: The document that has been created.
@rtype: C{xml.dom.Document}"""
for ( ns, pfx ) in self.__namespaceSupport.namespaces().items():
if pfx is None:
self.document().documentElement.setAttributeNS(pyxb.namespace.XMLNamespaces.uri(), 'xmlns', ns.uri())
else:
self.document().documentElement.setAttributeNS(pyxb.namespace.XMLNamespaces.uri(), 'xmlns:%s' % (pfx,), ns.uri())
return self.document()
def createChildElement (self, expanded_name, parent=None):
"""Create a new element node in the tree.
@param expanded_name: The name of the element. A plain string
indicates a name in no namespace.
@type expanded_name: L{pyxb.namespace.ExpandedName} or C{str} or C{unicode}
@keyword parent: The node in the tree that will serve as the child's
parent. If C{None}, the document element is used. (If there is no
document element, then this call creates it as a side-effect.)
@return: A newly created DOM element
@rtype: C{xml.dom.Element}
"""
if parent is None:
parent = self.document().documentElement
if parent is None:
parent = self.__document
if isinstance(expanded_name, (str, unicode)):
expanded_name = pyxb.namespace.ExpandedName(None, expanded_name)
if not isinstance(expanded_name, pyxb.namespace.ExpandedName):
raise pyxb.LogicError('Invalid type %s for expanded name' % (type(expanded_name),))
ns = expanded_name.namespace()
name = expanded_name.localName()
ns_uri = xml.dom.EMPTY_NAMESPACE
pfx = self.namespacePrefix(ns)
if pfx is not None:
ns_uri = ns.uri()
name = '%s:%s' % (pfx, name)
element = self.__document.createElementNS(ns_uri, name)
return parent.appendChild(element)
def _makeURINodeNamePair (self, node):
"""Convert namespace information from a DOM node to text for new DOM node.
The namespaceURI and nodeName are extracted and parsed. The namespace
(if any) is registered within the document, along with any prefix from
the node name. A pair is returned where the first element is the
namespace URI or C{None}, and the second is a QName to be used for the
expanded name within this document.
@param node: An xml.dom.Node instance, presumably from a wildcard match.
@rtype: C{( str, str )}"""
ns = None
if node.namespaceURI is not None:
ns = pyxb.namespace.NamespaceForURI(node.namespaceURI, create_if_missing=True)
if node.ELEMENT_NODE == node.nodeType:
name = node.tagName
elif node.ATTRIBUTE_NODE == node.nodeType:
name = node.name
# saxdom uses the uriTuple as the name field while minidom uses
# the QName. @todo saxdom should be fixed.
if isinstance(name, tuple):
name = name[1]
else:
raise pyxb.UsageError('Unable to determine name from DOM node %s' % (node,))
pfx = None
local_name = name
if 0 < name.find(':'):
(pfx, local_name) = node_name.split(':', 1)
if ns is None:
raise pyxb.LogicError('QName with prefix but no available namespace')
ns_uri = None
node_name = local_name
if ns is not None:
ns_uri = ns.uri()
self.declareNamespace(ns, pfx)
if pfx is None:
pfx = self.namespacePrefix(ns)
if pfx is not None:
node_name = '%s:%s' % (pfx, local_name)
return (ns_uri, node_name)
def _deepClone (self, node, docnode):
if node.ELEMENT_NODE == node.nodeType:
(ns_uri, node_name) = self._makeURINodeNamePair(node)
clone_node = docnode.createElementNS(ns_uri, node_name)
attrs = node.attributes
for ai in xrange(attrs.length):
clone_node.setAttributeNodeNS(self._deepClone(attrs.item(ai), docnode))
for child in node.childNodes:
clone_node.appendChild(self._deepClone(child, docnode))
return clone_node
if node.TEXT_NODE == node.nodeType:
return docnode.createTextNode(node.data)
if node.ATTRIBUTE_NODE == node.nodeType:
(ns_uri, node_name) = self._makeURINodeNamePair(node)
clone_node = docnode.createAttributeNS(ns_uri, node_name)
clone_node.value = node.value
return clone_node
if node.COMMENT_NODE == node.nodeType:
return docnode.createComment(node.data)
raise pyxb.IncompleteImplementationError('Unable to clone type %s DOM node %s' % (node.nodeType, node))
def cloneIntoImplementation (self, node):
"""Create a deep copy of the node in the target implementation.
Used when converting a DOM instance from one implementation (e.g.,
L{pyxb.utils.saxdom}) into another (e.g., L{xml.dom.minidom})."""
new_doc = self.implementation().createDocument(None, None, None)
return self._deepClone(node, new_doc)
def appendChild (self, child, parent):
"""Add the child to the parent.
@note: If the child and the parent use different DOM implementations,
this operation will clone the child into a new instance, and give that
to the parent.
@param child: The value to be appended
@type child: C{xml.dom.Node}
@param parent: The new parent of the child
@type parent: C{xml.dom.Node}
@rtype: C{xml.dom.Node}"""
# @todo This check is incomplete; is there a standard way to find the
# implementation of an xml.dom.Node instance?
if isinstance(child, pyxb.utils.saxdom.Node):
child = self.cloneIntoImplementation(child)
return parent.appendChild(child)
## Local Variables:
## fill-column:78
## End:
| jonfoster/pyxb1 | pyxb/utils/domutils.py | Python | apache-2.0 | 28,251 |
# test_utils.py -- Tests for git compatibility utilities
# Copyright (C) 2010 Google, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for git compatibility utilities."""
from dulwich.tests import (
SkipTest,
TestCase,
)
from dulwich.tests.compat import utils
class GitVersionTests(TestCase):
def setUp(self):
super(GitVersionTests, self).setUp()
self._orig_run_git = utils.run_git
self._version_str = None # tests can override to set stub version
def run_git(args, **unused_kwargs):
self.assertEqual(['--version'], args)
return 0, self._version_str
utils.run_git = run_git
def tearDown(self):
super(GitVersionTests, self).tearDown()
utils.run_git = self._orig_run_git
def test_git_version_none(self):
self._version_str = b'not a git version'
self.assertEqual(None, utils.git_version())
def test_git_version_3(self):
self._version_str = b'git version 1.6.6'
self.assertEqual((1, 6, 6, 0), utils.git_version())
def test_git_version_4(self):
self._version_str = b'git version 1.7.0.2'
self.assertEqual((1, 7, 0, 2), utils.git_version())
def test_git_version_extra(self):
self._version_str = b'git version 1.7.0.3.295.gd8fa2'
self.assertEqual((1, 7, 0, 3), utils.git_version())
def assertRequireSucceeds(self, required_version):
try:
utils.require_git_version(required_version)
except SkipTest:
self.fail()
def assertRequireFails(self, required_version):
self.assertRaises(SkipTest, utils.require_git_version,
required_version)
def test_require_git_version(self):
try:
self._version_str = b'git version 1.6.6'
self.assertRequireSucceeds((1, 6, 6))
self.assertRequireSucceeds((1, 6, 6, 0))
self.assertRequireSucceeds((1, 6, 5))
self.assertRequireSucceeds((1, 6, 5, 99))
self.assertRequireFails((1, 7, 0))
self.assertRequireFails((1, 7, 0, 2))
self.assertRaises(ValueError, utils.require_git_version,
(1, 6, 6, 0, 0))
self._version_str = b'git version 1.7.0.2'
self.assertRequireSucceeds((1, 6, 6))
self.assertRequireSucceeds((1, 6, 6, 0))
self.assertRequireSucceeds((1, 7, 0))
self.assertRequireSucceeds((1, 7, 0, 2))
self.assertRequireFails((1, 7, 0, 3))
self.assertRequireFails((1, 7, 1))
except SkipTest as e:
# This test is designed to catch all SkipTest exceptions.
self.fail('Test unexpectedly skipped: %s' % e)
| seewindcn/tortoisehg | src/ext/dulwich/tests/compat/test_utils.py | Python | gpl-2.0 | 3,422 |
"""
Prepends a list of files with text.
"""
import os
import sys
def prepend(prefix, ext):
cwd = os.getcwd()
for filename in os.listdir(cwd):
if os.path.splitext(filename)[1] != ext:
continue
newname = prefix + filename
try:
os.rename(filename, newname)
except Exception as ex:
print ' *** ', ex
continue
print filename, '->', newname
def main():
if len(sys.argv) <= 2:
print 'usage: pre <filename-prefix> <extension>'
return 1
prepend(sys.argv[1], sys.argv[2])
return 0
if __name__ == '__main__':
main()
| joeyespo/prepend | prepend.py | Python | mit | 641 |
"""
These settings are used by the ``manage.py`` command.
With normal tests we want to use the fastest possible way which is an
in-memory sqlite database but if you want to create South south_migrations you
need a persistant database.
Unfortunately there seems to be an issue with either South or syncdb so that
defining two routers ("default" and "south") does not work.
"""
from distutils.version import StrictVersion
import django
from .test_settings import * # NOQA
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite',
}
}
django_version = django.get_version()
if StrictVersion(django_version) < StrictVersion('1.7'):
INSTALLED_APPS.append('south', )
| bitmazk/django-booking | booking/tests/settings.py | Python | mit | 725 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import awx.main.fields
from awx.main.migrations import _migration_utils as migration_utils
from awx.main.migrations._multi_cred import migrate_workflow_cred, migrate_workflow_cred_reverse
from awx.main.migrations._scan_jobs import remove_scan_type_nodes
class Migration(migrations.Migration):
dependencies = [
('main', '0013_v330_multi_credential'),
]
operations = [
migrations.AddField(
model_name='schedule',
name='char_prompts',
field=awx.main.fields.JSONField(default=dict, blank=True),
),
migrations.AddField(
model_name='schedule',
name='credentials',
field=models.ManyToManyField(related_name='schedules', to='main.Credential'),
),
migrations.AddField(
model_name='schedule',
name='inventory',
field=models.ForeignKey(related_name='schedules', on_delete=django.db.models.deletion.SET_NULL, default=None, blank=True, to='main.Inventory', null=True),
),
migrations.AddField(
model_name='schedule',
name='survey_passwords',
field=awx.main.fields.JSONField(default=dict, editable=False, blank=True),
),
migrations.AddField(
model_name='workflowjobnode',
name='credentials',
field=models.ManyToManyField(related_name='workflowjobnodes', to='main.Credential'),
),
migrations.AddField(
model_name='workflowjobnode',
name='extra_data',
field=awx.main.fields.JSONField(default=dict, blank=True),
),
migrations.AddField(
model_name='workflowjobnode',
name='survey_passwords',
field=awx.main.fields.JSONField(default=dict, editable=False, blank=True),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='credentials',
field=models.ManyToManyField(related_name='workflowjobtemplatenodes', to='main.Credential'),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='extra_data',
field=awx.main.fields.JSONField(default=dict, blank=True),
),
migrations.AddField(
model_name='workflowjobtemplatenode',
name='survey_passwords',
field=awx.main.fields.JSONField(default=dict, editable=False, blank=True),
),
# Run data migration before removing the old credential field
migrations.RunPython(migration_utils.set_current_apps_for_migrations, migrations.RunPython.noop),
migrations.RunPython(migrate_workflow_cred, migrate_workflow_cred_reverse),
migrations.RunPython(remove_scan_type_nodes, migrations.RunPython.noop),
migrations.RemoveField(
model_name='workflowjobnode',
name='credential',
),
migrations.RemoveField(
model_name='workflowjobtemplatenode',
name='credential',
),
migrations.CreateModel(
name='JobLaunchConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('extra_data', awx.main.fields.JSONField(blank=True, default=dict)),
('survey_passwords', awx.main.fields.JSONField(blank=True, default=dict, editable=False)),
('char_prompts', awx.main.fields.JSONField(blank=True, default=dict)),
('credentials', models.ManyToManyField(related_name='joblaunchconfigs', to='main.Credential')),
('inventory', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='joblaunchconfigs', to='main.Inventory')),
('job', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='launch_config', to='main.UnifiedJob')),
],
),
migrations.AddField(
model_name='workflowjobtemplate',
name='ask_variables_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_credential_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_diff_mode_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_inventory_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_job_type_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_limit_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_skip_tags_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_tags_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_variables_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
migrations.AlterField(
model_name='jobtemplate',
name='ask_verbosity_on_launch',
field=awx.main.fields.AskForField(blank=True, default=False),
),
]
| GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awx/main/migrations/0014_v330_saved_launchtime_configs.py | Python | apache-2.0 | 6,144 |
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
from .settings import URL_ALIAS_MODULES
def get_url_alias_modules(collect=None):
if collect is None:
collect = URL_ALIAS_MODULES
alias_modules = []
for path in collect:
i = path.rfind('.')
module, attr = path[:i], path[i+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing url alias module %s: "%s"' % (module, e))
try:
klass = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a "%s" url alias class' % (module, attr))
alias_modules.append(klass())
return alias_modules
default_url_alias_modules = get_url_alias_modules()
| ddanier/django_url_alias | django_url_alias/aliases.py | Python | bsd-3-clause | 861 |
"""Integration project URL Configuration"""
from django.contrib import admin
from django.urls import re_path
from django.views.generic import TemplateView
urlpatterns = [
re_path(r"^admin/", admin.site.urls),
re_path(
r"^$", TemplateView.as_view(template_name="home.html"), name="home"
),
]
| jambonsw/django-improved-user | example_integration_project/config/urls.py | Python | bsd-2-clause | 312 |
#!/usr/bin/env python
'''
Validate conversion between quaternions and euler angles.
Usage: testEuler.py rxyz 10
See: http://matthew-brett.github.io/transforms3d/reference/transforms3d.euler.html
'''
import sys
import numpy as np
from euclid import Quaternion, Vector3
from transforms3d.euler import quat2euler, euler2quat
from transforms3d.taitbryan import quat2euler as tb_quat2euler
from transforms3d.taitbryan import euler2quat as tb_euler2quat
try:
axes_order = sys.argv[1] or 'szxy'
except IndexError:
axes_order = 'szxy'
try:
n_angles = sys.argv[2] or 5
except IndexError:
n_angles = 5
# Define quaternion rotations about X, Y, and Z axes
qxs = [ Quaternion.new_rotate_axis(a, Vector3(1, 0, 0)) for a in np.linspace(0, np.pi, n_angles) ]
qys = [ Quaternion.new_rotate_axis(a, Vector3(0, 1, 0)) for a in np.linspace(0, np.pi, n_angles) ]
qzs = [ Quaternion.new_rotate_axis(a, Vector3(0, 0, 1)) for a in np.linspace(0, np.pi, n_angles) ]
print("Successive rotations about X, Y, and Z axes as converted to Euler angles using Quaternion.get_euler() [same as axes='szxy']")
print('X: {}').format([ q.get_euler() for q in qxs ])
print('Y: {}').format([ q.get_euler() for q in qys ])
print('Z: {}').format([ q.get_euler() for q in qzs ])
print("Successive rotations about X, Y, and Z axes as converted to Euler angles using quat2euler(), axes='{}'").format(axes_order)
print('X: {}').format([ quat2euler((q.w, q.x, q.y, q.z), axes=axes_order) for q in qxs ])
print('Y: {}').format([ quat2euler((q.w, q.x, q.y, q.z), axes=axes_order) for q in qys ])
print('Z: {}').format([ quat2euler((q.w, q.x, q.y, q.z), axes=axes_order) for q in qzs ])
print("Successive rotations about X, Y, and Z axes as converted to Euler angles using tb_quat2euler(), [same as axes='szyx'")
print('X: {}').format([ tb_quat2euler((q.w, q.x, q.y, q.z)) for q in qxs ])
print('Y: {}').format([ tb_quat2euler((q.w, q.x, q.y, q.z)) for q in qys ])
print('Z: {}').format([ tb_quat2euler((q.w, q.x, q.y, q.z)) for q in qzs ])
print('\nGenerating quaterion rotations using euler2quat()...')
qtxs = [ euler2quat(a, 0, 0, axes=axes_order) for a in np.linspace(0, np.pi, n_angles) ]
qtys = [ euler2quat(0, a, 0, axes=axes_order) for a in np.linspace(0, np.pi, n_angles) ]
qtzs = [ euler2quat(0, 0, a, axes=axes_order) for a in np.linspace(0, np.pi, n_angles) ]
print("Successive rotations about X, Y, and Z axes as converted to Euler angles using quat2euler(), [same as axes='sxyz'").format(axes_order)
print('X: {}').format([ quat2euler(q, axes=axes_order) for q in qtxs ])
print('Y: {}').format([ quat2euler(q, axes=axes_order) for q in qtys ])
print('Z: {}').format([ quat2euler(q, axes=axes_order) for q in qtzs ])
print("\nIt seems that there is a 'gimbol lock' problem where instead of '0.0's we are getting +/- 3.141592 values.")
| stoqs/stoqs | stoqs/loaders/CCE/bed2netcdf/testEuler.py | Python | gpl-3.0 | 2,837 |
# -*- coding: utf-8 -*-
"""
In the 20×20 grid below, four numbers along a diagonal line have been marked in red.
08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48
The product of these numbers is 26 × 63 × 78 × 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20×20 grid?
"""
def multDelta(i, j, dx, dy):
res = 1
cnt = 0
while i < 20 and j < 20 and cnt < 4:
res *= grid[ i ][ j ]
i += dx
j += dy
cnt += 1
return res
grid = [ [8, 2, 22, 97, 38, 15, 0, 40, 0, 75, 4, 5, 7, 78, 52, 12, 50, 77, 91, 8],
[49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 4, 56, 62, 0],
[81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 3, 49, 13, 36, 65],
[52, 70, 95, 23, 4, 60, 11, 42, 69, 24, 68, 56, 1, 32, 56, 71, 37, 2, 36, 91],
[22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66, 33, 13, 80],
[24, 47, 32, 60, 99, 3, 45, 2, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17, 12, 50],
[32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18, 38, 64, 70],
[67, 26, 20, 68, 2, 62, 12, 20, 95, 63, 94, 39, 63, 8, 40, 91, 66, 49, 94, 21],
[24, 55, 58, 5, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89, 63, 72],
[21, 36, 23, 9, 75, 0, 76, 44, 20, 45, 35, 14, 0, 61, 33, 97, 34, 31, 33, 95],
[78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 3, 80, 4, 62, 16, 14, 9, 53, 56, 92],
[16, 39, 5, 42, 96, 35, 31, 47, 55, 58, 88, 24, 0, 17, 54, 24, 36, 29, 85, 57],
[86, 56, 0, 48, 35, 71, 89, 7, 5, 44, 44, 37, 44, 60, 21, 58, 51, 54, 17, 58],
[19, 80, 81, 68, 5, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 4, 89, 55, 40],
[4, 52, 8, 83, 97, 35, 99, 16, 7, 97, 57, 32, 16, 26, 26, 79, 33, 27, 98, 66],
[88, 36, 68, 87, 57, 62, 20, 72, 3, 46, 33, 67, 46, 55, 12, 32, 63, 93, 53, 69],
[4, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 8, 46, 29, 32, 40, 62, 76, 36],
[20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 4, 36, 16],
[20, 73, 35, 29, 78, 31, 90, 1, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57, 5, 54],
[1, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 1, 89, 19, 67, 48] ]
product_grid = [0]*400
for i in range( 20 ):
for j in range( 20 ):
v = multDelta(i, j, 0, 1)
h = multDelta(i, j, 1, 0)
d = multDelta(i, j, 1, 1)
ad= multDelta(i, j, 1, -1)
product_grid[i*20 + j] = max(v, h, d, ad)
print( max( product_grid ) )
| sunwukong123/projectE | 11 - Largest product in a grid.py | Python | gpl-3.0 | 3,921 |
# -*- coding: utf-8 -*-
#
# Eta documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 8 12:36:06 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Eta'
copyright = u'2017, TypeLead, Inc'
author = u'Rahul Muttineni'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# @VERSION_CHANGE@
version = u'0.0.9'
# The full version, including alpha/beta/rc tags.
release = u'0.0.9b1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'eta'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "eta_lang_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {'logo_only': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), "themes"]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
# @VERSION_CHANGE@
# html_title = u'Eta v0.0.9'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Etadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Eta.tex', u'Eta Documentation',
u'Rahul Muttineni', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'eta', u'Eta Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Eta', u'Eta Documentation',
author, 'Eta', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Activate the theme.
| pparkkin/eta | docs/source/conf.py | Python | bsd-3-clause | 9,885 |
import random
import json
import pyimgur
import traceback
import datetime
import re
import math
from typing import Callable
from typing import List
from PIL import Image, ImageFont, ImageDraw, ImageSequence
from os import path
from os import listdir
from os import makedirs
from io import BytesIO
from textwrap import shorten
import discord
from discord.ext import commands
import utils
import checks
from cogs.tagsystem import TagItem
from cogs.requestsystem import request
class Images(utils.SessionCog):
"""Image related commands."""
def __init__(self, bot):
super(Images, self).__init__(bot)
self.memes = bot.content.memes
async def get_random_image(self, album_id):
"""Get a random image from an imgur album."""
image_list = (await self.bot.loop.run_in_executor(self.bot.imgur.get_album(album_id))).images
return random.choice(image_list).link
async def baka_image(self, ctx, t: str):
t = t[:7] + '...' if len(t) > 10 else t
i = 'you idiot...'
f = ImageFont.truetype(path.join("config", "ZinPenKeba-R.otf"), 12)
im = Image.open(path.join('images', 'collections', 'pout', 'baka.png'))
d = ImageDraw.Draw(im)
tw, th = d.textsize(t, f)
iw, ih = d.textsize(i, f)
d.text((250 - (tw // 2), 125 - (th // 2)), t, (0, 0, 0), font=f)
d.text((255 - (iw // 2), 150 - (ih // 2)), i, (0, 0, 0), font=f)
with BytesIO() as fp:
im.save(fp, 'PNG')
fp.seek(0)
await self.bot.send_file(ctx.message.channel, fp, filename='baka.png')
@commands.command(pass_context=True, aliases=('b',))
async def baka(self, ctx, user: str=None):
"""...Baka"""
if user is None:
t = ctx.message.author.display_name
else:
try:
t = commands.MemberConverter(ctx, user).convert().display_name
except commands.BadArgument:
t = user
await self.baka_image(ctx, t)
@commands.command()
@checks.is_owner()
async def convert_images_to_tag(self):
"""Use to convert preexisting image libraries to the tag system."""
col_count = 0
tag_count = 0
for col in listdir(path.join('images', 'collections')):
coldir = path.join('images', 'collections', col)
for im in listdir(coldir):
# noinspection PyBroadException
try:
t = TagItem(
self.bot.user.id,
str(datetime.datetime.fromtimestamp(int(path.getmtime(path.join(coldir, im))))),
[col],
text=None,
image=path.join(coldir, im)
)
self.bot.tag_map[col] = t
tag_count += 1
except:
await self.bot.say(traceback.format_exc())
col_count += 1
await self.bot.say("Imported {} items into {} tags. :ok_hand:".format(tag_count, col_count))
@commands.group(pass_context=True, invoke_without_command=True, aliases=('i',))
async def image(self, ctx, category: str, filetype: str=None):
"""Image commands.
Get an image from a category or search through several online services with subcommands."""
def predicate(tag: TagItem):
if tag.image is None:
return False
if filetype is None:
return True
if tag.image.endswith(filetype):
return True
return False
try:
t = self.bot.tag_map.get(ctx.message, category, predicate=predicate)
except KeyError:
t = None
if t is None:
await self.bot.say("None found.")
return
await t.run(ctx)
@image.command(pass_context=True, name='add')
@request()
@checks.is_owner()
@checks.is_moderator()
async def _image_add(self, ctx, collection: str, *link: str):
"""Request to add an image to a category.
If you are not the owner, sends a request to them.
The owner can add images and also new categories using this command."""
links = link or [x['url'] for x in ctx.message.attachments]
if not links:
raise commands.BadArgument('Invalid Usage: No images.')
coldir = path.join('images', 'collections')
if collection not in listdir(coldir):
if await self.bot.confirm("That collection doesn't exist, add it?", self.bot.owner):
makedirs(path.join(coldir, collection))
await self.bot.notify("Added collection: {}".format(collection))
else:
return
for link in links:
if '//imgur.com/' in link:
link = self.bot.imgur.get_image(link.split('/')[-1]).link
name = "{}.{}".format(str(hash(link[-10:])), link.split('.')[-1])
if name in listdir(path.join(coldir, collection)):
await self.bot.notify("{} already existed, adding as temp. Correct soon so it isn't lost".format(name))
name = 'temp.png'
try:
await utils.download(self.session, link, path.join(coldir, collection, name))
t = TagItem(ctx.message.author.id, str(ctx.message.timestamp), [collection],
image=path.join(coldir, collection, name))
self.bot.tag_map[collection] = t
await t.run(ctx)
except OSError:
await self.bot.notify(
"Invalid link. Make sure it points directly to the file and ends with a valid file extension.")
@image.command(pass_context=True, name='add_album')
@request()
async def _add_album(self, ctx, link: str, *collections):
"""Add all the images in an imgur album.
The link should be to a valid imgur album or gallery album.
After that you may list multiple collections to put each image in."""
# input checking
if len(collections) == 0:
await self.bot.say("No tags given.")
return
a = self.bot.imgur.get_at_url(link)
if not isinstance(a, pyimgur.Album):
await self.bot.say('Not a valid imgur album.')
return
# initial response
m = await self.bot.say(f'Getting {len(a.images)} images...')
# add all images
for im in a.images:
link = im.link
n = "{}.{}".format(str(hash(link[-10:])), link.split('.')[-1])
await utils.download(self.session, link, path.join('images', n))
i_path = path.join('images', n)
t = TagItem(ctx.message.author.id, str(ctx.message.timestamp), collections, image=i_path)
self.bot.tag_map[collections[0]] = t
for name in collections[1:]:
self.bot.tag_map.add_tag(t.id, name)
await self.bot.edit_message(m, f'Added {len(a.images)} images to {",".join(collections)}')
@image.command(name='list')
async def _image_list(self):
"""Get a list of all the categories and reactions."""
r_list = [x[0] for x in self.bot.content.reactions.values() if x[0] is not None]
c_list = [x for x in listdir(path.join('images', 'collections')) if x not in r_list]
await self.bot.say("List of categories: {}\nList of reactions: {}".format(", ".join(c_list), ", ".join(r_list)))
async def count_booru(self, url, tags):
params = {'page': 'dapi', 's': 'post', 'q': 'index', 'limit': 0, 'tags': tags}
async with self.session.get(url + '/index.php', params=params) as r:
try:
return int(re.search(r'count="(\d+)"', await r.text()).group(1))
except AttributeError:
raise commands.BadArgument("API ERROR")
async def fetch_booru_image(self, url: str, tags: str, *filters: List[Callable[[dict], bool]], count=None):
count = count or await self.count_booru(url, tags)
params = {'page': 'dapi', 's': 'post', 'q': 'index', 'json': 1, 'pid': 0 if count <= 100 else random.randint(0, count // 100 - 1), 'tags': tags}
async with self.session.get(url + '/index.php', params=params) as r:
if r.status != 200:
return f'Something went wrong. Error {r.status}'
t = await r.text()
try:
ims = json.loads(t)
except json.JSONDecodeError:
return "API error"
filtered = [i for i in ims if not any(f(i) for f in filters)]
if len(filtered):
return random.choice(filtered)
else:
return "No results"
async def post_booru_image(self, url: str, tags: str, *filters: List[Callable[[dict], bool]]):
"""post the returned image from a booru, or it's error message."""
tmp = await self.bot.say("getting image from booru")
im = await self.fetch_booru_image(url, tags, *filters)
if isinstance(im, dict):
count = await self.count_booru(url, tags)
if 'file_url' in im:
if im['file_url'].startswith('http'):
img_url = im['file_url']
else:
img_url = f'{url.split(":")[0]}:{im["file_url"]}'
else:
img_url = f'{url}/images/{im["directory"]}/{im["image"]}'
e = discord.Embed(
title='This Image',
description=shorten(im['tags'].replace('_', r'\_').replace(' ', ', '), 2048, placeholder='...'),
url=f'{url}/index.php?page=post&s=view&id={im["id"]}'
).set_author(
name=f"{count} Images with these tags",
url=f"{url}/index.php?page=post&s=list&tags={'+'.join(tags.split())}"
).set_image(
url=img_url
)
try:
await self.bot.edit_message(tmp, '\N{ZERO WIDTH SPACE}', embed=e)
except discord.HTTPException as e:
await self.bot.edit_message(tmp, f'HTTP Error: {e.code}')
else:
await self.bot.edit_message(tmp, im)
@image.command(name='booru')
async def _booru(self, *, tags: str):
"""Get an image from safebooru based on tags."""
await self.post_booru_image("http://safebooru.org", tags, lambda im: im['rating'] == 'e')
@image.command(name='gelbooru')
@checks.has_tag("lewd")
async def _gelbooru(self, *, tags: str):
"""Get an image from gelbooru based on tags."""
await self.post_booru_image("http://gelbooru.com", tags)
@image.command(name='rule34xxx')
@checks.has_tag('lewd')
async def _rule34xxx(self, *, tags: str):
"""Get an image from rule34.xxx based on tags."""
await self.post_booru_image("http://rule34.xxx", tags)
async def post_booru_collage(self, url: str, tags: str, *filters: List[Callable[[dict], bool]]):
"""Make a collage from a booru."""
if await self.count_booru(url, tags) < 5:
raise commands.BadArgument("Not enough images with those tags. Need at least 5 static images.")
tmp = await self.bot.say(f"Collecting images")
async def gen():
total_images = 0
errors = 0
while True:
im = await self.fetch_booru_image(url, tags, *filters)
if isinstance(im, str):
return
if 'file_url' in im:
img_url = f'{url.split(":")[0]}:{im["file_url"]}'
else:
img_url = f'{url}/images/{im["directory"]}/{im["image"]}'
with await utils.download_fp(self.session, img_url) as fp:
try:
v = fp
iv = Image.open(v)
yield iv
except (OSError, ValueError) as e:
if errors < 5:
await self.bot.say("```py\n{}\n{}\n{}```".format(e, img_url, f'{url}/index.php?page=post&s=view&id={im["id"]}'))
errors += 1
else:
raise utils.CheckMsg("Too many erros. aborting.")
total_images += 1
await self.bot.edit_message(tmp, f"Collecting images {total_images}")
with await self.make_collage(gen) as f:
await self.bot.delete_message(tmp)
await self.bot.upload(f, filename=f'{tags.replace(" ", "_")}_collage.png')
@image.command()
async def booru_collage(self, *, tags: str):
"""Get an image from safebooru based on tags."""
await self.post_booru_collage("http://safebooru.org", tags, lambda im: im['rating'] == 'e')
@image.command()
@checks.has_tag("lewd")
async def gelbooru_collage(self, *, tags: str):
"""Get an image from gelbooru based on tags."""
await self.post_booru_collage("http://gelbooru.com", tags)
@image.command()
@checks.has_tag('lewd')
async def rule34xxx_collage(self, *, tags: str):
"""Get an image from rule34.xxx based on tags."""
await self.post_booru_collage("http://rule34.xxx", tags)
@image.command(pass_context=True, name='reddit', aliases=('r',))
async def _r(self, ctx, sub: str, window: str='month'):
"""Get an image from a subreddit.
Optional argument is a time window following reddit's time windows."""
tmp = await self.bot.say("getting image from r/%s" % sub)
gal = self.bot.imgur.get_subreddit_gallery(sub, sort='top', window=window, limit=50)
if len(gal) <= 1:
await self.bot.edit_message(tmp, 'no images found at r/%s. did you spell it right?' % sub)
return
im = random.choice(gal)
if im is pyimgur.Album:
im = await self.bot.loop.run_in_executor(self.bot.imgur.get_image(self.get_random_image(im.id)))
if im.is_nsfw and not checks.tagged(ctx, 'lewd'):
await self.bot.edit_message(tmp, "no ecchi.")
return
await self.bot.edit_message(tmp, "{0.title}\n{0.link}".format(im))
async def make_collage(self, gen) -> BytesIO:
"""Make a collage out of the images returned by the async generator."""
# approximate desired values
width = 1000
height = 1000
rows = 3
line_width = 2
# calculate other values
row_height = height / rows
min_row_width = width * 2 / 3
# create image jagged array. (row width, images)
image_array = [[0, []]]
async for im in gen():
def process(pi):
# make new row if current is too large and we can make more rows
if image_array[-1][0] >= width:
if len(image_array) == rows:
return True
image_array.append([0, []])
# load and perform initial resize on image.
pscale = row_height / pi.size[1]
pi = pi.resize([int(d * pscale) for d in pi.size], Image.ANTIALIAS)
pi.thumbnail((width, row_height))
# add to image array
image_array[-1][0] += pi.size[0] + (line_width if len(image_array[-1][1]) else 0)
image_array[-1][1].append(pi)
if await self.bot.loop.run_in_executor(None, process, im):
break
def fit_to_image():
# remove last row if below minimum.
if image_array[-1][0] < min_row_width:
del image_array[-1]
# fit each row to width
for i, (row_width, ims) in enumerate(image_array):
lines_amount = line_width * (len(ims) - 1)
if row_width != width:
scale = width / (row_width - lines_amount)
image_array[i][1] = [fi.resize([int(d * scale) for d in fi.size], Image.ANTIALIAS) for fi in ims]
# get the actual output height
out_height = sum(ims[1][0].size[1] for ims in image_array) + ((len(image_array) - 1) * line_width)
# create new Image object
image = Image.new('RGB', (width, int(out_height)))
# draw images on output image
y = 0
for _, ims in image_array:
x = 0
for i in ims:
image.paste(i, (x, y))
x += i.size[0] + line_width
y += ims[0].size[1] + line_width
fp = BytesIO()
image.save(fp, 'PNG')
fp.seek(0)
return fp
return await self.bot.loop.run_in_executor(None, fit_to_image)
@image.command(pass_context=True, name='collage', aliases=('c',))
async def _image_collage(self, ctx, *names):
"""Generate a collage from images in a tag."""
types = ('.png', '.jpg', '.jpeg')
async def gen():
# get list of image
try:
tags = self.bot.tag_map.get_items(*names, pred=lambda i: i.image and i.image.endswith(types))[:]
random.shuffle(tags)
images = [i.image for i in tags]
except KeyError:
raise commands.BadArgument("Key not found.")
if len(images) < 5:
raise commands.BadArgument("Not enough images in that tag. Need at least 5 static images.")
for i in images:
yield Image.open(i)
# processing can take a while, so we type to acknowledge the command and run it in and executor.
await self.bot.type()
with await self.make_collage(gen) as f:
await self.bot.upload(f, filename=f'{"_".join(names)}_collage.png')
@commands.group(pass_context=True, invoke_without_command=True)
async def meme(self, ctx, name: str, *, c: str=""):
"""Choose an image and text to add to the image."""
if ctx.invoked_subcommand is None:
if name not in self.memes.keys():
await self.bot.say(name + " not found.")
return
if len(c) <= 0:
await self.bot.say(self.memes[name])
return
cs = c.split()
tt = ' '.join(cs[:len(cs) // 2])
tb = ' '.join(cs[len(cs) // 2:])
replacements = [["-", "--"], ["_", "__"], ["?", "~q"], ["%", "~p"], [" ", "%20"], ["''", "\""]]
for r in replacements:
tt = tt.replace(r[0], r[1])
tb = tb.replace(r[0], r[1])
with await utils.download_fp(self.session, "http://memegen.link/custom/{0}/{1}.jpg?alt={2}".format(
tt, tb, self.memes[name])) as fp:
await self.bot.upload(fp, filename='meme.jpg')
@meme.command(name='list')
async def _meme_list(self):
"""Lists all available templates."""
await self.bot.say('Available Templates: ' + ', '.join(self.memes.keys()))
@meme.command(name='add')
@request()
async def _meme_add(self, name: str, link: str):
"""Add a template to the collection. Can't be GIF"""
if '.gif' in link[-5:]:
await self.bot.say("The image can not be a gif.")
return
if name in self.memes.keys():
await self.bot.say("{} already taken.".format(name))
return
self.memes[name] = link
self.bot.content.memes = self.memes
await self.bot.content.save()
await self.bot.say('Added {}'.format(name))
@image.group(pass_context=True, invoke_without_command=True)
async def overlay(self, ctx, template: str, *users: discord.Member):
"""
Overlay faces onto templates.
The template is the name of the template to use.
After that you can list one or more users whose avatars will be used. Your's is used if you don't add any.
"""
# default to author
users = list(users) or [ctx.message.author]
# get template or notify user that it was not found
try:
ov = self.bot.content.overlays[template]
except KeyError:
raise commands.BadArgument(f"{template} not found.")
link = ov['link']
coords = ov['coords']
# separate extra users into new images.
images = [
users[i*len(coords):(i+1)*len(coords)]
for i in range(0, math.ceil(len(users)/len(coords)))
]
# fill remaining spots by duplicating the last user.
images[-1] += ([images[-1][-1]] * (len(coords)-len(images[-1])))
# read template image
with await utils.download_fp(self.session, link) as fp:
original = Image.open(fp)
# repeat once for each image to be generated
for i in images:
# copy image if there are more than one. If not use the original
im = original.copy() if len(images) > 1 else original
make_gif = any('.gif' in u.avatar_url for u in i)
frames = [im]
# paste each face
for index, c in enumerate(coords):
# get user face for this coordinate
u = i[index]
with await utils.download_fp(self.session, u.avatar_url or u.default_avatar_url) as face_fp:
face = Image.open(face_fp)
# generate mask
mask = Image.new("L", [c[2] * 4, c[2] * 4], color=0)
ImageDraw.Draw(mask).ellipse((0, 0) + mask.size, fill=255)
# paste the face
if not make_gif:
# resize face to requested size
face.thumbnail((c[2], c[2]), Image.ANTIALIAS)
mask = mask.resize(face.size, Image.ANTIALIAS)
im.paste(
face,
(
c[0] - face.size[0]//2,
c[1] - face.size[1]//2
),
mask
)
else:
if face.format != 'GIF':
face.thumbnail((c[2], c[2]), Image.ANTIALIAS)
mask = mask.resize(face.size, Image.ANTIALIAS)
for frame in frames:
frame.paste(
face,
(
c[0] - face.size[0] // 2,
c[1] - face.size[1] // 2
),
mask
)
else:
for f_n, frame in enumerate(ImageSequence.Iterator(face)):
try:
f = frames[f_n]
except IndexError:
f = frames[-1].copy()
frames.append(f)
p = frame.copy()
p.thumbnail((c[2], c[2]), Image.ANTIALIAS)
m = mask.copy().resize(p.size, Image.ANTIALIAS)
f.paste(p, (c[0] - p.size[0] // 2, c[1] - p.size[1] // 2), m)
# send the image
out = BytesIO()
if make_gif:
frames[0].save(out, 'GIF', save_all=True, append_images=frames[1:], loop=0)
out.seek(0)
await self.bot.upload(out, filename=f"{template}.gif")
else:
im.save(out, 'PNG')
out.seek(0)
await self.bot.upload(out, filename=f"{template}.png")
@overlay.command(name='list')
async def __overlay__list(self):
"""
available templates.
"""
await self.bot.say(', '.join(self.bot.content.overlays.keys()))
def setup(bot):
bot.add_cog(Images(bot))
| Drowrin/Weeabot | cogs/images.py | Python | mit | 24,498 |
class config_eval_range(config_base):
mutable = 3
def eval_range(param):
if len(param) < 2:
return ''
step = 1.0
if len(param) > 2:
try:
step = float(param[2])
except ValueError:
return ''
try:
start = float(param[0])
except ValueError:
return ''
try:
stop = float(param[1])
except ValueError:
return ''
l = []
if step > 0:
while start <= stop:
l.append(float_to_str(start))
start += step
elif step < 0:
while start >= stop:
l.append(float_to_str(start))
start += step
else:
return ''
return ';'.join(l)
# TESTS
# IN ['1', '3']
# OUT '1;2;3'
# IN ['1', '5', '2']
# OUT '1;3;5'
# IN ['1', '4', '2']
# OUT '1;3'
# IN ['5', '-5', '-2.5']
# OUT '5;2.5;0;-2.5;-5'
# IN ['-5', '5', '-2.5']
# OUT ''
| plepe/pgmapcss | pgmapcss/eval/eval_range.py | Python | agpl-3.0 | 907 |
from conans import python_requires
import os
common = python_requires('llvm-common/0.0.0@orbitdeps/stable')
class LLVMDebugInfoCodeView(common.LLVMModulePackage):
version = common.LLVMModulePackage.version
name = 'llvm_debuginfo_codeview'
llvm_component = 'llvm'
llvm_module = 'DebugInfoCodeView'
llvm_requires = ['llvm_headers', 'llvm_support', 'llvm_debuginfo_msf']
| pierricgimmig/orbitprofiler | contrib/conan/recipes/llvm_debuginfo_codeview/conanfile.py | Python | bsd-2-clause | 390 |
"""A module exposing the search backends registry."""
from __future__ import unicode_literals
from djblets.registries.importer import lazy_import_registry
#: The search backend registry.
search_backend_registry = \
lazy_import_registry('reviewboard.search.search_backends.registry',
'SearchBackendRegistry')
| chipx86/reviewboard | reviewboard/search/__init__.py | Python | mit | 341 |
from os.path import join
import bzt
from bzt.modules.aggregator import DataPoint, KPISet
from bzt.modules.k6 import K6Executor, K6LogReader
from bzt.utils import EXE_SUFFIX
from tests.unit import BZTestCase, ExecutorTestCase, RESOURCES_DIR, ROOT_LOGGER
TOOL_NAME = join(RESOURCES_DIR, "k6", "k6_mock" + EXE_SUFFIX)
K6_SCRIPT = join(RESOURCES_DIR, "k6", "k6_script.js")
class TestK6Executor(ExecutorTestCase):
EXECUTOR = K6Executor
CMD_LINE = None
def start_subprocess(self, args, **kwargs):
self.CMD_LINE = " ".join(args)
def test_full(self):
self.configure({"execution": {
"concurrency": 5,
"hold-for": "30",
"iterations": 50,
"scenario": {"script": K6_SCRIPT}}})
tmp_eac = bzt.utils.exec_and_communicate
try:
bzt.utils.exec_and_communicate = lambda *args, **kwargs: ("", "")
self.obj.prepare()
finally:
bzt.utils.exec_and_communicate = tmp_eac
self.obj.get_widget()
self.obj.k6.tool_name = TOOL_NAME
self.obj.startup()
self.obj.check()
self.obj.shutdown()
self.obj.post_process()
def simple_run(self, config):
self.configure(config)
tmp_eac = bzt.utils.exec_and_communicate
try:
bzt.utils.exec_and_communicate = lambda *args, **kwargs: ("", "")
self.obj.prepare()
finally:
bzt.utils.exec_and_communicate = tmp_eac
self.obj.engine.start_subprocess = self.start_subprocess
self.obj.startup()
self.obj.post_process()
def test_kpi_file(self):
self.simple_run({
"execution": {
"scenario": {"script": K6_SCRIPT},
"executor": "k6"
},
})
self.assertIn(f"--out csv={self.obj.kpi_file}", self.CMD_LINE)
def test_concurrency(self):
self.simple_run({
"execution": {
"concurrency": "5",
"scenario": {"script": K6_SCRIPT},
"executor": "k6"
},
})
self.assertIn("--vus 5", self.CMD_LINE)
def test_hold_for(self):
self.simple_run({
"execution": {
"hold-for": "30",
"scenario": {"script": K6_SCRIPT},
"executor": "k6"
},
})
self.assertIn("--duration 30s", self.CMD_LINE)
def test_iterations(self):
self.simple_run({
"execution": {
"iterations": "100",
"scenario": {"script": K6_SCRIPT},
"executor": "k6"
},
})
self.assertIn("--iterations 100", self.CMD_LINE)
def test_iterations_multiplied(self):
self.simple_run({
"execution": {
"iterations": "10",
"concurrency": "10",
"scenario": {"script": K6_SCRIPT},
"executor": "k6"
},
})
self.assertIn("--iterations 100", self.CMD_LINE)
class TestK6Reader(BZTestCase):
def test_read(self):
log_path = join(RESOURCES_DIR, "k6", "k6_kpi.csv")
obj = K6LogReader(log_path, ROOT_LOGGER)
points = list(obj.datapoints(True))
self.assertEqual(len(points), 4)
for datapoint in points:
self.assertTrue(datapoint['ts'] > 1500000000)
self.assertEqual(points[-1][DataPoint.CUMULATIVE][''][KPISet.SUCCESSES], 2)
self.assertEqual(points[-1][DataPoint.CUMULATIVE][''][KPISet.FAILURES], 2)
| Blazemeter/taurus | tests/unit/modules/test_k6.py | Python | apache-2.0 | 3,575 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_update_request_initial(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PATCH",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"diskEncryptionSetName": _SERIALIZER.url("disk_encryption_set_name", disk_encryption_set_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_resource_group_request(
subscription_id: str,
resource_group_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class DiskEncryptionSetsOperations(object):
"""DiskEncryptionSetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSet",
**kwargs: Any
) -> "_models.DiskEncryptionSet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk_encryption_set, 'DiskEncryptionSet')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSet",
**kwargs: Any
) -> LROPoller["_models.DiskEncryptionSet"]:
"""Creates or updates a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:param disk_encryption_set: disk encryption set object supplied in the body of the Put disk
encryption set operation.
:type disk_encryption_set: ~azure.mgmt.compute.v2019_11_01.models.DiskEncryptionSet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DiskEncryptionSet or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_11_01.models.DiskEncryptionSet]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
disk_encryption_set=disk_encryption_set,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
def _update_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSetUpdate",
**kwargs: Any
) -> "_models.DiskEncryptionSet":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk_encryption_set, 'DiskEncryptionSetUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def begin_update(
self,
resource_group_name: str,
disk_encryption_set_name: str,
disk_encryption_set: "_models.DiskEncryptionSetUpdate",
**kwargs: Any
) -> LROPoller["_models.DiskEncryptionSet"]:
"""Updates (patches) a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:param disk_encryption_set: disk encryption set object supplied in the body of the Patch disk
encryption set operation.
:type disk_encryption_set: ~azure.mgmt.compute.v2019_11_01.models.DiskEncryptionSetUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either DiskEncryptionSet or the result of
cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.compute.v2019_11_01.models.DiskEncryptionSet]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
disk_encryption_set=disk_encryption_set,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> "_models.DiskEncryptionSet":
"""Gets information about a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DiskEncryptionSet, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2019_11_01.models.DiskEncryptionSet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DiskEncryptionSet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
def _delete_initial(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
resource_group_name: str,
disk_encryption_set_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes a disk encryption set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_encryption_set_name: The name of the disk encryption set that is being created. The
name can't be changed after the disk encryption set is created. Supported characters for the
name are a-z, A-Z, 0-9 and _. The maximum name length is 80 characters.
:type disk_encryption_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
disk_encryption_set_name=disk_encryption_set_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets/{diskEncryptionSetName}'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> Iterable["_models.DiskEncryptionSetList"]:
"""Lists all the disk encryption sets under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskEncryptionSetList or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_11_01.models.DiskEncryptionSetList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSetList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskEncryptionSetList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/diskEncryptionSets'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> Iterable["_models.DiskEncryptionSetList"]:
"""Lists all the disk encryption sets under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskEncryptionSetList or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2019_11_01.models.DiskEncryptionSetList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskEncryptionSetList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("DiskEncryptionSetList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/diskEncryptionSets'} # type: ignore
| Azure/azure-sdk-for-python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2019_11_01/operations/_disk_encryption_sets_operations.py | Python | mit | 35,129 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.CDPSM.Balanced.IEC61970.Core.IdentifiedObject import IdentifiedObject
class GeneratingUnit(IdentifiedObject):
"""A single or set of synchronous machines for converting mechanical power into alternating-current power. For example, individual machines within a set may be defined for scheduling purposes while a single control signal is derived for the set. In this case there would be a GeneratingUnit for each member of the set and an additional GeneratingUnit corresponding to the set.
"""
def __init__(self, baseP=0.0, genControlSource="plantControl", ratedNetMaxP=0.0, initialP=0.0, SynchronousMachines=None, *args, **kw_args):
"""Initialises a new 'GeneratingUnit' instance.
@param baseP: For dispatchable units, this value represents the economic active power basepoint, for units that are not dispatchable, this value represents the fixed generation value. The value must be between the operating low and high limits.
@param genControlSource: The source of controls for a generating unit. Values are: "plantControl", "offAGC", "unavailable", "onAGC"
@param ratedNetMaxP: The net rated maximum capacity determined by subtracting the auxiliary power used to operate the internal plant machinery from the rated gross maximum capacity
@param initialP: Default Initial active power which is used to store a powerflow result for the initial active power for this unit in this network configuration
@param SynchronousMachines: A synchronous machine may operate as a generator and as such becomes a member of a generating unit
"""
#: For dispatchable units, this value represents the economic active power basepoint, for units that are not dispatchable, this value represents the fixed generation value. The value must be between the operating low and high limits.
self.baseP = baseP
#: The source of controls for a generating unit. Values are: "plantControl", "offAGC", "unavailable", "onAGC"
self.genControlSource = genControlSource
#: The net rated maximum capacity determined by subtracting the auxiliary power used to operate the internal plant machinery from the rated gross maximum capacity
self.ratedNetMaxP = ratedNetMaxP
#: Default Initial active power which is used to store a powerflow result for the initial active power for this unit in this network configuration
self.initialP = initialP
self._SynchronousMachines = []
self.SynchronousMachines = [] if SynchronousMachines is None else SynchronousMachines
super(GeneratingUnit, self).__init__(*args, **kw_args)
_attrs = ["baseP", "genControlSource", "ratedNetMaxP", "initialP"]
_attr_types = {"baseP": float, "genControlSource": str, "ratedNetMaxP": float, "initialP": float}
_defaults = {"baseP": 0.0, "genControlSource": "plantControl", "ratedNetMaxP": 0.0, "initialP": 0.0}
_enums = {"genControlSource": "GeneratorControlSource"}
_refs = ["SynchronousMachines"]
_many_refs = ["SynchronousMachines"]
def getSynchronousMachines(self):
"""A synchronous machine may operate as a generator and as such becomes a member of a generating unit
"""
return self._SynchronousMachines
def setSynchronousMachines(self, value):
for x in self._SynchronousMachines:
x.GeneratingUnit = None
for y in value:
y._GeneratingUnit = self
self._SynchronousMachines = value
SynchronousMachines = property(getSynchronousMachines, setSynchronousMachines)
def addSynchronousMachines(self, *SynchronousMachines):
for obj in SynchronousMachines:
obj.GeneratingUnit = self
def removeSynchronousMachines(self, *SynchronousMachines):
for obj in SynchronousMachines:
obj.GeneratingUnit = None
| rwl/PyCIM | CIM15/CDPSM/Balanced/IEC61970/Generation/Production/GeneratingUnit.py | Python | mit | 4,966 |
"""
Django settings for allauthdemo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!h8#n5wopc#7zq!_)i=l#t=q)7g0g-+&0!=kxv+*&2b7*xb8bm'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap3', # optional module for making bootstrap forms easier
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauthdemo.auth',
'allauthdemo.demo',
'fileupload',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'allauthdemo.urls'
WSGI_APPLICATION = 'allauthdemo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.abspath(os.path.dirname(__file__)) + '/media/'
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Authentication
AUTHENTICATION_BACKENDS = (
"allauth.account.auth_backends.AuthenticationBackend",
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# allauth templates: you could copy this directory into your
# project and tweak it according to your needs
# os.path.join(PROJECT_ROOT, 'templates', 'uniform', 'allauth'),
# example project specific templates
os.path.join(BASE_DIR, 'allauthdemo', 'templates', 'plain', 'example'),
#os.path.join(BASE_DIR, 'allauthdemo', 'templates', 'bootstrap', 'allauth'),
os.path.join(BASE_DIR, 'allauthdemo', 'templates', 'allauth'),
os.path.join(BASE_DIR, 'allauthdemo', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# needed for admin templates
'django.contrib.auth.context_processors.auth',
# these *may* not be needed
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
# allauth needs this from django
'django.template.context_processors.request',
# allauth specific context processors
#'allauth.account.context_processors.account',
#'allauth.socialaccount.context_processors.socialaccount',
],
},
}
]
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
SITE_ID = 1
AUTH_USER_MODEL = 'allauthdemo_auth.DemoUser'
LOGIN_REDIRECT_URL = '/member/'
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_MIN_LENGTH = 3
# ACCOUNT_EMAIL_VERIFICATION = 'none' # testing...
ACCOUNT_USER_MODEL_USERNAME_FIELD = None
SOCIALACCOUNT_AUTO_SIGNUP = False # require social accounts to use the signup form ... I think
# For custom sign-up form:
# http://stackoverflow.com/questions/12303478/how-to-customize-user-profile-when-using-django-allauth
| infoclock/OlympicTracker | allauthdemo/settings_generated.py | Python | mit | 4,742 |
from geosquizzy.fsm.fsm import GeojsonFiniteStateMachine
from geosquizzy.structure.outcome import GeoSquizzyResults
from geosquizzy.optimum.network import Optimum
from geosquizzy.gs_socket.gs_client import GsSocketClient
from socket import AF_INET, SOCK_STREAM
import threading
import queue
class Tree:
def __init__(self, *args, **kwargs):
self.nodes = dict()
class FeaturesTree:
def __init__(self, *args, **kwargs):
self.Tree = Tree(*args, **kwargs)
self.Res = GeoSquizzyResults(*args, **kwargs)
self.Optimum = Optimum(*args, **kwargs)
# self.socket = kwargs.get('socket', None)
@staticmethod
def __new__leaf__():
leaf = dict({'id': None, 'name': None, 'children': [], 'level': None,
'parent': None, 'completed': False, 'values': []})
return leaf
def prepare_new_leaf(self, **kwargs):
new_leaf = self.__new__leaf__()
return {x: kwargs[x] if y is None else y for x, y in new_leaf.items()}
def new_obj(self, omitted):
self.Optimum.update_data(omitted)
def add_leaf(self, leaf=None):
"""
:param leaf new node/leaf dict():
:return:boolean(which mean if node already exist)
"""
self.Optimum.update_seq(leaf=leaf)
# self.socket.write(self.get_all_leafs_paths(progress=True))
# self.socket.run(leaf)
if leaf['parent'] is None:
self.Tree.nodes[leaf['id']] = leaf
elif self.Tree.nodes.get(leaf['id'], None) is None:
self.Tree.nodes[leaf['id']] = leaf
if leaf['id'] not in self.Tree.nodes[leaf['parent']]['children']:
self.Tree.nodes[leaf['parent']]['children'].append(leaf['id'])
if self.Optimum.fit_optimum:
self.Optimum.fit_optimum = False
return self.Optimum.prediction
def add_leaf_values(self, leaf_id=None, leaf_values=None):
self.Tree.nodes[leaf_id]['values'] = leaf_values
def get_all_leafs_paths(self, progress=None):
return self.Res.get_results(nodes=self.Tree.nodes, progress=progress)
class GeoJSON:
def __init__(self, **kwargs):
if not (kwargs.get('socket_options', None) is None):
self.Socket = GsSocketClient(**kwargs.get('socket_options'))
self.ProgressQueue = queue.Queue()
else:
self.Socket = None
self.ProgressQueue = None
self.FeTree = FeaturesTree(**kwargs)
self.Fsm = GeojsonFiniteStateMachine(progress_queue=self.ProgressQueue, structure=self.FeTree)
self.geojson = None
self.options = kwargs.get('geojson_options', {})
self.__processes__()
def __processes__(self):
if self.Socket:
self.SocketThread = threading.Thread(target=self.Socket.run, args=(self.ProgressQueue, self.__get_results__))
self.SocketThread.start()
def __start__(self, **kwargs):
self.geojson = kwargs.get('geojson', None)
self.__read_geojson__(**kwargs)
def __get_results__(self, progress=None):
# [print(x.keys, x.count) for x in self.FeTree.Optimum.RawData.models]
# [print(x) for x in self.FeTree.Optimum.history]
return self.FeTree.get_all_leafs_paths(progress=progress)
def __read_geojson__(self, **kwargs):
if self.options['mode'] == 'static':
self.Fsm.run(data=self.geojson, **kwargs)
elif self.options['mode'] == 'dynamic':
pass
| LowerSilesians/geo-squizzy | geosquizzy/structure/structure.py | Python | mit | 3,495 |
from __future__ import with_statement
from decimal import Decimal, InvalidOperation
import time
from django.core import serializers
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.db.utils import DatabaseError
from django.dispatch.dispatcher import receiver
from django.test import TestCase
from django.utils.unittest import expectedFailure, skip
from .fields import ListField, SetField, DictField, EmbeddedModelField
def count_calls(func):
def wrapper(*args, **kwargs):
wrapper.calls += 1
return func(*args, **kwargs)
wrapper.calls = 0
return wrapper
class Target(models.Model):
index = models.IntegerField()
class Source(models.Model):
target = models.ForeignKey(Target)
index = models.IntegerField()
class DecimalModel(models.Model):
decimal = models.DecimalField(max_digits=9, decimal_places=2)
class DecimalKey(models.Model):
decimal = models.DecimalField(max_digits=9, decimal_places=2, primary_key=True)
class DecimalParent(models.Model):
child = models.ForeignKey(DecimalKey)
class DecimalsList(models.Model):
decimals = ListField(models.ForeignKey(DecimalKey))
class ListModel(models.Model):
integer = models.IntegerField(primary_key=True)
floating_point = models.FloatField()
names = ListField(models.CharField)
names_with_default = ListField(models.CharField(max_length=500),
default=[])
names_nullable = ListField(models.CharField(max_length=500), null=True)
class OrderedListModel(models.Model):
ordered_ints = ListField(models.IntegerField(max_length=500), default=[],
ordering=count_calls(lambda x: x), null=True)
ordered_nullable = ListField(ordering=lambda x: x, null=True)
class SetModel(models.Model):
setfield = SetField(models.IntegerField())
class DictModel(models.Model):
dictfield = DictField(models.IntegerField)
dictfield_nullable = DictField(null=True)
auto_now = DictField(models.DateTimeField(auto_now=True))
class EmbeddedModelFieldModel(models.Model):
simple = EmbeddedModelField('EmbeddedModel', null=True)
simple_untyped = EmbeddedModelField(null=True)
decimal_parent = EmbeddedModelField(DecimalParent, null=True)
typed_list = ListField(EmbeddedModelField('SetModel'))
typed_list2 = ListField(EmbeddedModelField('EmbeddedModel'))
untyped_list = ListField(EmbeddedModelField())
untyped_dict = DictField(EmbeddedModelField())
ordered_list = ListField(EmbeddedModelField(),
ordering=lambda obj: obj.index)
class EmbeddedModel(models.Model):
some_relation = models.ForeignKey(DictModel, null=True)
someint = models.IntegerField(db_column='custom')
auto_now = models.DateTimeField(auto_now=True)
auto_now_add = models.DateTimeField(auto_now_add=True)
class IterableFieldsTest(TestCase):
floats = [5.3, 2.6, 9.1, 1.58]
names = [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']
unordered_ints = [4, 2, 6, 1]
def setUp(self):
for i, float in zip(range(1, 5), IterableFieldsTest.floats):
ListModel(integer=i, floating_point=float,
names=IterableFieldsTest.names[:i]).save()
def test_startswith(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__startswith='Sa')]),
dict([(3, ['Kakashi', 'Naruto', 'Sasuke']),
(4, ['Kakashi', 'Naruto', 'Sasuke', 'Sakura']), ]))
def test_options(self):
self.assertEqual([entity.names_with_default for entity in
ListModel.objects.filter(names__startswith='Sa')],
[[], []])
self.assertEqual([entity.names_nullable for entity in
ListModel.objects.filter(names__startswith='Sa')],
[None, None])
def test_default_value(self):
# Make sure default value is copied.
ListModel().names_with_default.append(2)
self.assertEqual(ListModel().names_with_default, [])
def test_ordering(self):
f = OrderedListModel._meta.fields[1]
f.ordering.calls = 0
# Ensure no ordering happens on assignment.
obj = OrderedListModel()
obj.ordered_ints = self.unordered_ints
self.assertEqual(f.ordering.calls, 0)
obj.save()
self.assertEqual(OrderedListModel.objects.get().ordered_ints,
sorted(self.unordered_ints))
# Ordering should happen only once, i.e. the order function may
# be called N times at most (N being the number of items in the
# list).
self.assertLessEqual(f.ordering.calls, len(self.unordered_ints))
def test_gt(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__gt='Kakashi')]),
dict([(2, [u'Kakashi', u'Naruto']),
(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_lt(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__lt='Naruto')]),
dict([(1, [u'Kakashi']),
(2, [u'Kakashi', u'Naruto']),
(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_gte(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__gte='Sakura')]),
dict([(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_lte(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.filter(names__lte='Kakashi')]),
dict([(1, [u'Kakashi']),
(2, [u'Kakashi', u'Naruto']),
(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_equals(self):
self.assertEquals([entity.names for entity in
ListModel.objects.filter(names='Sakura')],
[[u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']])
# Test with additonal pk filter (for DBs that have special pk
# queries).
query = ListModel.objects.filter(names='Sakura')
self.assertEquals(query.get(pk=query[0].pk).names,
[u'Kakashi', u'Naruto', u'Sasuke', u'Sakura'])
def test_is_null(self):
self.assertEquals(ListModel.objects.filter(
names__isnull=True).count(), 0)
def test_exclude(self):
self.assertEquals(
dict([(entity.pk, entity.names) for entity in
ListModel.objects.all().exclude(names__lt='Sakura')]),
dict([(3, [u'Kakashi', u'Naruto', u'Sasuke']),
(4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ]))
def test_chained_filter(self):
self.assertEquals(
[entity.names for entity in ListModel.objects
.filter(names='Sasuke').filter(names='Sakura')],
[['Kakashi', 'Naruto', 'Sasuke', 'Sakura'], ])
self.assertEquals(
[entity.names for entity in ListModel.objects
.filter(names__startswith='Sa').filter(names='Sakura')],
[['Kakashi', 'Naruto', 'Sasuke', 'Sakura']])
# Test across multiple columns. On app engine only one filter
# is allowed to be an inequality filter.
self.assertEquals(
[entity.names for entity in ListModel.objects
.filter(floating_point=9.1).filter(names__startswith='Sa')],
[['Kakashi', 'Naruto', 'Sasuke'], ])
def test_setfield(self):
setdata = [1, 2, 3, 2, 1]
# At the same time test value conversion.
SetModel(setfield=map(str, setdata)).save()
item = SetModel.objects.filter(setfield=3)[0]
self.assertEqual(item.setfield, set(setdata))
# This shouldn't raise an error because the default value is
# an empty list.
SetModel().save()
def test_dictfield(self):
DictModel(dictfield=dict(a=1, b='55', foo=3.14),
auto_now={'a': None}).save()
item = DictModel.objects.get()
self.assertEqual(item.dictfield, {u'a': 1, u'b': 55, u'foo': 3})
dt = item.auto_now['a']
self.assertNotEqual(dt, None)
item.save()
time.sleep(0.5) # Sleep to avoid false positive failure on the assertion below
self.assertGreater(DictModel.objects.get().auto_now['a'], dt)
item.delete()
# Saving empty dicts shouldn't throw errors.
DictModel().save()
# Regression tests for djangoappengine issue #39.
DictModel.add_to_class('new_dict_field', DictField())
DictModel.objects.get()
@skip("GAE specific?")
def test_Q_objects(self):
self.assertEquals(
[entity.names for entity in ListModel.objects
.exclude(Q(names__lt='Sakura') | Q(names__gte='Sasuke'))],
[['Kakashi', 'Naruto', 'Sasuke', 'Sakura']])
def test_list_with_foreignkeys(self):
class ReferenceList(models.Model):
keys = ListField(models.ForeignKey('Model'))
class Model(models.Model):
pass
model1 = Model.objects.create()
model2 = Model.objects.create()
ReferenceList.objects.create(keys=[model1.pk, model2.pk])
self.assertEqual(ReferenceList.objects.get().keys[0], model1.pk)
self.assertEqual(ReferenceList.objects.filter(keys=model1.pk).count(), 1)
def test_list_with_foreign_conversion(self):
decimal = DecimalKey.objects.create(decimal=Decimal('1.5'))
DecimalsList.objects.create(decimals=[decimal.pk])
@expectedFailure
def test_nested_list(self):
"""
Some back-ends expect lists to be strongly typed or not contain
other lists (e.g. GAE), this limits how the ListField can be
used (unless the back-end were to serialize all lists).
"""
class UntypedListModel(models.Model):
untyped_list = ListField()
UntypedListModel.objects.create(untyped_list=[1, [2, 3]])
class Child(models.Model):
pass
class Parent(models.Model):
id = models.IntegerField(primary_key=True)
integer_list = ListField(models.IntegerField)
integer_dict = DictField(models.IntegerField)
embedded_list = ListField(EmbeddedModelField(Child))
embedded_dict = DictField(EmbeddedModelField(Child))
class EmbeddedModelFieldTest(TestCase):
def assertEqualDatetime(self, d1, d2):
"""Compares d1 and d2, ignoring microseconds."""
self.assertEqual(d1.replace(microsecond=0),
d2.replace(microsecond=0))
def assertNotEqualDatetime(self, d1, d2):
self.assertNotEqual(d1.replace(microsecond=0),
d2.replace(microsecond=0))
def _simple_instance(self):
EmbeddedModelFieldModel.objects.create(
simple=EmbeddedModel(someint='5'))
return EmbeddedModelFieldModel.objects.get()
def test_simple(self):
instance = self._simple_instance()
self.assertIsInstance(instance.simple, EmbeddedModel)
# Make sure get_prep_value is called.
self.assertEqual(instance.simple.someint, 5)
# Primary keys should not be populated...
self.assertEqual(instance.simple.id, None)
# ... unless set explicitly.
instance.simple.id = instance.id
instance.save()
instance = EmbeddedModelFieldModel.objects.get()
self.assertEqual(instance.simple.id, instance.id)
def _test_pre_save(self, instance, get_field):
# Make sure field.pre_save is called for embedded objects.
from time import sleep
instance.save()
auto_now = get_field(instance).auto_now
auto_now_add = get_field(instance).auto_now_add
self.assertNotEqual(auto_now, None)
self.assertNotEqual(auto_now_add, None)
sleep(1) # FIXME
instance.save()
self.assertNotEqualDatetime(get_field(instance).auto_now,
get_field(instance).auto_now_add)
instance = EmbeddedModelFieldModel.objects.get()
instance.save()
# auto_now_add shouldn't have changed now, but auto_now should.
self.assertEqualDatetime(get_field(instance).auto_now_add,
auto_now_add)
self.assertGreater(get_field(instance).auto_now, auto_now)
def test_pre_save(self):
obj = EmbeddedModelFieldModel(simple=EmbeddedModel())
self._test_pre_save(obj, lambda instance: instance.simple)
def test_pre_save_untyped(self):
obj = EmbeddedModelFieldModel(simple_untyped=EmbeddedModel())
self._test_pre_save(obj, lambda instance: instance.simple_untyped)
def test_pre_save_in_list(self):
obj = EmbeddedModelFieldModel(untyped_list=[EmbeddedModel()])
self._test_pre_save(obj, lambda instance: instance.untyped_list[0])
def test_pre_save_in_dict(self):
obj = EmbeddedModelFieldModel(untyped_dict={'a': EmbeddedModel()})
self._test_pre_save(obj, lambda instance: instance.untyped_dict['a'])
def test_pre_save_list(self):
# Also make sure auto_now{,add} works for embedded object *lists*.
EmbeddedModelFieldModel.objects.create(typed_list2=[EmbeddedModel()])
instance = EmbeddedModelFieldModel.objects.get()
auto_now = instance.typed_list2[0].auto_now
auto_now_add = instance.typed_list2[0].auto_now_add
self.assertNotEqual(auto_now, None)
self.assertNotEqual(auto_now_add, None)
instance.typed_list2.append(EmbeddedModel())
instance.save()
instance = EmbeddedModelFieldModel.objects.get()
self.assertEqualDatetime(instance.typed_list2[0].auto_now_add,
auto_now_add)
self.assertGreater(instance.typed_list2[0].auto_now, auto_now)
self.assertNotEqual(instance.typed_list2[1].auto_now, None)
self.assertNotEqual(instance.typed_list2[1].auto_now_add, None)
def test_error_messages(self):
for kwargs, expected in (
({'simple': 42}, EmbeddedModel),
({'simple_untyped': 42}, models.Model),
({'typed_list': [EmbeddedModel()]}, SetModel)):
self.assertRaisesRegexp(
TypeError, "Expected instance of type %r." % expected,
EmbeddedModelFieldModel(**kwargs).save)
def test_typed_listfield(self):
EmbeddedModelFieldModel.objects.create(
typed_list=[SetModel(setfield=range(3)),
SetModel(setfield=range(9))],
ordered_list=[Target(index=i) for i in xrange(5, 0, -1)])
obj = EmbeddedModelFieldModel.objects.get()
self.assertIn(5, obj.typed_list[1].setfield)
self.assertEqual([target.index for target in obj.ordered_list],
range(1, 6))
def test_untyped_listfield(self):
EmbeddedModelFieldModel.objects.create(untyped_list=[
EmbeddedModel(someint=7),
OrderedListModel(ordered_ints=range(5, 0, -1)),
SetModel(setfield=[1, 2, 2, 3])])
instances = EmbeddedModelFieldModel.objects.get().untyped_list
for instance, cls in zip(instances,
[EmbeddedModel, OrderedListModel, SetModel]):
self.assertIsInstance(instance, cls)
self.assertNotEqual(instances[0].auto_now, None)
self.assertEqual(instances[1].ordered_ints, range(1, 6))
def test_untyped_dict(self):
EmbeddedModelFieldModel.objects.create(untyped_dict={
'a': SetModel(setfield=range(3)),
'b': DictModel(dictfield={'a': 1, 'b': 2}),
'c': DictModel(dictfield={}, auto_now={'y': 1})})
data = EmbeddedModelFieldModel.objects.get().untyped_dict
self.assertIsInstance(data['a'], SetModel)
self.assertNotEqual(data['c'].auto_now['y'], None)
def test_foreignkey_in_embedded_object(self):
simple = EmbeddedModel(some_relation=DictModel.objects.create())
obj = EmbeddedModelFieldModel.objects.create(simple=simple)
simple = EmbeddedModelFieldModel.objects.get().simple
self.assertNotIn('some_relation', simple.__dict__)
self.assertIsInstance(simple.__dict__['some_relation_id'],
type(obj.id))
self.assertIsInstance(simple.some_relation, DictModel)
def test_embedded_field_with_foreign_conversion(self):
decimal = DecimalKey.objects.create(decimal=Decimal('1.5'))
decimal_parent = DecimalParent.objects.create(child=decimal)
EmbeddedModelFieldModel.objects.create(decimal_parent=decimal_parent)
def test_update(self):
"""
Test that update can be used on an a subset of objects
containing collections of embedded instances; see issue #13.
Also ensure that updated values are coerced according to
collection field.
"""
child1 = Child.objects.create()
child2 = Child.objects.create()
parent = Parent.objects.create(pk=1,
integer_list=[1], integer_dict={'a': 2},
embedded_list=[child1], embedded_dict={'a': child2})
Parent.objects.filter(pk=1).update(
integer_list=['3'], integer_dict={'b': '3'},
embedded_list=[child2], embedded_dict={'b': child1})
parent = Parent.objects.get()
self.assertEqual(parent.integer_list, [3])
self.assertEqual(parent.integer_dict, {'b': 3})
self.assertEqual(parent.embedded_list, [child2])
self.assertEqual(parent.embedded_dict, {'b': child1})
class BaseModel(models.Model):
pass
class ExtendedModel(BaseModel):
name = models.CharField(max_length=20)
class BaseModelProxy(BaseModel):
class Meta:
proxy = True
class ExtendedModelProxy(ExtendedModel):
class Meta:
proxy = True
class ProxyTest(TestCase):
def test_proxy(self):
list(BaseModelProxy.objects.all())
def test_proxy_with_inheritance(self):
self.assertRaises(DatabaseError,
lambda: list(ExtendedModelProxy.objects.all()))
class SignalTest(TestCase):
def test_post_save(self):
created = []
@receiver(post_save, sender=SetModel)
def handle(**kwargs):
created.append(kwargs['created'])
SetModel().save()
self.assertEqual(created, [True])
SetModel.objects.get().save()
self.assertEqual(created, [True, False])
qs = SetModel.objects.all()
list(qs)[0].save()
self.assertEqual(created, [True, False, False])
list(qs)[0].save()
self.assertEqual(created, [True, False, False, False])
list(qs.select_related())[0].save()
self.assertEqual(created, [True, False, False, False, False])
class SelectRelatedTest(TestCase):
def test_select_related(self):
target = Target(index=5)
target.save()
Source(target=target, index=8).save()
source = Source.objects.all().select_related()[0]
self.assertEqual(source.target.pk, target.pk)
self.assertEqual(source.target.index, target.index)
source = Source.objects.all().select_related('target')[0]
self.assertEqual(source.target.pk, target.pk)
self.assertEqual(source.target.index, target.index)
class DBColumn(models.Model):
a = models.IntegerField(db_column='b')
class OrderByTest(TestCase):
def test_foreign_keys(self):
target1 = Target.objects.create(index=1)
target2 = Target.objects.create(index=2)
source1 = Source.objects.create(target=target1, index=3)
source2 = Source.objects.create(target=target2, index=4)
self.assertEqual(list(Source.objects.all().order_by('target')),
[source1, source2])
self.assertEqual(list(Source.objects.all().order_by('-target')),
[source2, source1])
def test_db_column(self):
model1 = DBColumn.objects.create(a=1)
model2 = DBColumn.objects.create(a=2)
self.assertEqual(list(DBColumn.objects.all().order_by('a')),
[model1, model2])
self.assertEqual(list(DBColumn.objects.all().order_by('-a')),
[model2, model1])
def test_reverse(self):
model1 = DBColumn.objects.create(a=1)
model2 = DBColumn.objects.create(a=2)
self.assertEqual(list(DBColumn.objects.all().order_by('a').reverse()),
[model2, model1])
self.assertEqual(list(DBColumn.objects.all().order_by('-a').reverse()),
[model1, model2])
def test_chain(self):
model1 = Target.objects.create(index=1)
model2 = Target.objects.create(index=2)
self.assertEqual(
list(Target.objects.all().order_by('index').order_by('-index')),
[model2, model1])
class SerializableSetModel(models.Model):
setfield = SetField(models.IntegerField())
setcharfield = SetField(models.CharField(), null=True)
class SerializationTest(TestCase):
"""
JSON doesn't support sets, so they need to be converted to lists
for serialization; see issue #12.
TODO: Check if the fix works with embedded models / nested sets.
"""
names = ['foo', 'bar', 'baz', 'monkey']
def test_json_listfield(self):
for i in range(1, 5):
ListModel(integer=i, floating_point=0,
names=SerializationTest.names[:i]).save()
objects = ListModel.objects.all()
serialized = serializers.serialize('json', objects)
deserialized = serializers.deserialize('json', serialized)
for m in deserialized:
integer = m.object.integer
names = m.object.names
self.assertEqual(names, SerializationTest.names[:integer])
def test_json_setfield(self):
for i in range(1, 5):
SerializableSetModel(
setfield=set([i - 1]),
setcharfield=set(SerializationTest.names[:i])).save()
objects = SerializableSetModel.objects.all()
serialized = serializers.serialize('json', objects)
deserialized = serializers.deserialize('json', serialized)
for m in deserialized:
integer = m.object.setfield.pop()
names = m.object.setcharfield
self.assertEqual(names, set(SerializationTest.names[:integer + 1]))
class String(models.Model):
s = models.CharField(max_length=20)
class LazyObjectsTest(TestCase):
def test_translation(self):
"""
Using a lazy translation call should work just the same as
a non-lazy one (or a plain string).
"""
from django.utils.translation import ugettext_lazy
a = String.objects.create(s='a')
b = String.objects.create(s=ugettext_lazy('b'))
self.assertEqual(String.objects.get(s='a'), a)
self.assertEqual(list(String.objects.filter(s='a')), [a])
self.assertEqual(list(String.objects.filter(s__lte='a')), [a])
self.assertEqual(String.objects.get(s=ugettext_lazy('a')), a)
self.assertEqual(
list(String.objects.filter(s__lte=ugettext_lazy('a'))), [a])
self.assertEqual(String.objects.get(s='b'), b)
self.assertEqual(list(String.objects.filter(s='b')), [b])
self.assertEqual(list(String.objects.filter(s__gte='b')), [b])
self.assertEqual(String.objects.get(s=ugettext_lazy('b')), b)
self.assertEqual(
list(String.objects.filter(s__gte=ugettext_lazy('b'))), [b])
def test_marked_strings(self):
"""
Check that strings marked as safe or needing escaping do not
confuse the back-end.
"""
from django.utils.safestring import mark_safe, mark_for_escaping
a = String.objects.create(s='a')
b = String.objects.create(s=mark_safe('b'))
c = String.objects.create(s=mark_for_escaping('c'))
self.assertEqual(String.objects.get(s='a'), a)
self.assertEqual(list(String.objects.filter(s__startswith='a')), [a])
self.assertEqual(String.objects.get(s=mark_safe('a')), a)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_safe('a'))), [a])
self.assertEqual(String.objects.get(s=mark_for_escaping('a')), a)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_for_escaping('a'))),
[a])
self.assertEqual(String.objects.get(s='b'), b)
self.assertEqual(list(String.objects.filter(s__startswith='b')), [b])
self.assertEqual(String.objects.get(s=mark_safe('b')), b)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_safe('b'))), [b])
self.assertEqual(String.objects.get(s=mark_for_escaping('b')), b)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_for_escaping('b'))),
[b])
self.assertEqual(String.objects.get(s='c'), c)
self.assertEqual(list(String.objects.filter(s__startswith='c')), [c])
self.assertEqual(String.objects.get(s=mark_safe('c')), c)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_safe('c'))), [c])
self.assertEqual(String.objects.get(s=mark_for_escaping('c')), c)
self.assertEqual(
list(String.objects.filter(s__startswith=mark_for_escaping('c'))),
[c])
class FeaturesTest(TestCase):
"""
Some things are unlikely to cause problems for SQL back-ends, but
require special handling in nonrel.
"""
def test_subqueries(self):
"""
Django includes SQL statements as WHERE tree values when
filtering using a QuerySet -- this won't "just work" with
nonrel back-ends.
TODO: Subqueries handling may require a bit of Django
changing, but should be easy to support.
"""
target = Target.objects.create(index=1)
source = Source.objects.create(index=2, target=target)
targets = Target.objects.all()
with self.assertRaises(DatabaseError):
Source.objects.get(target__in=targets)
self.assertEqual(
Source.objects.get(target__in=list(targets)),
source)
class DecimalFieldTest(TestCase):
"""
Some NoSQL databases can't handle Decimals, so respective back-ends
convert them to strings or floats. This can cause some precision
and sorting problems.
"""
def setUp(self):
for d in (Decimal('12345.6789'), Decimal('5'), Decimal('345.67'),
Decimal('45.6'), Decimal('2345.678'),):
DecimalModel(decimal=d).save()
def test_filter(self):
d = DecimalModel.objects.get(decimal=Decimal('5.0'))
self.assertTrue(isinstance(d.decimal, Decimal))
self.assertEquals(str(d.decimal), '5.00')
d = DecimalModel.objects.get(decimal=Decimal('45.60'))
self.assertEquals(str(d.decimal), '45.60')
# Filter argument should be converted to Decimal with 2 decimal
#_places.
d = DecimalModel.objects.get(decimal='0000345.67333333333333333')
self.assertEquals(str(d.decimal), '345.67')
def test_order(self):
"""
Standard Django decimal-to-string conversion isn't monotonic
(see `django.db.backends.util.format_number`).
"""
rows = DecimalModel.objects.all().order_by('decimal')
values = list(d.decimal for d in rows)
self.assertEquals(values, sorted(values))
def test_sign_extend(self):
DecimalModel(decimal=Decimal('-0.0')).save()
try:
# If we've written a valid string we should be able to
# retrieve the DecimalModel object without error.
DecimalModel.objects.filter(decimal__lt=1)[0]
except InvalidOperation:
self.assertTrue(False)
class DeleteModel(models.Model):
key = models.IntegerField(primary_key=True)
deletable = models.BooleanField()
class BasicDeleteTest(TestCase):
def setUp(self):
for i in range(1, 10):
DeleteModel(key=i, deletable=i % 2 == 0).save()
def test_model_delete(self):
d = DeleteModel.objects.get(pk=1)
d.delete()
with self.assertRaises(DeleteModel.DoesNotExist):
DeleteModel.objects.get(pk=1)
def test_delete_all(self):
DeleteModel.objects.all().delete()
self.assertEquals(0, DeleteModel.objects.all().count())
def test_delete_filtered(self):
DeleteModel.objects.filter(deletable=True).delete()
self.assertEquals(5, DeleteModel.objects.all().count())
class M2MDeleteChildModel(models.Model):
key = models.IntegerField(primary_key=True)
class M2MDeleteModel(models.Model):
key = models.IntegerField(primary_key=True)
deletable = models.BooleanField()
children = models.ManyToManyField(M2MDeleteChildModel, blank=True)
class ManyToManyDeleteTest(TestCase):
"""
Django-nonrel doesn't support many-to-many, but there may be
models that are used which contain them, even if they're not
accessed. This test ensures they can be deleted.
"""
def setUp(self):
for i in range(1, 10):
M2MDeleteModel(key=i, deletable=i % 2 == 0).save()
def test_model_delete(self):
d = M2MDeleteModel.objects.get(pk=1)
d.delete()
with self.assertRaises(M2MDeleteModel.DoesNotExist):
M2MDeleteModel.objects.get(pk=1)
@expectedFailure
def test_delete_all(self):
M2MDeleteModel.objects.all().delete()
self.assertEquals(0, M2MDeleteModel.objects.all().count())
@expectedFailure
def test_delete_filtered(self):
M2MDeleteModel.objects.filter(deletable=True).delete()
self.assertEquals(5, M2MDeleteModel.objects.all().count())
class QuerysetModel(models.Model):
key = models.IntegerField(primary_key=True)
class QuerysetTest(TestCase):
"""
Django 1.6 changes how
"""
def setUp(self):
for i in range(10):
QuerysetModel.objects.create(key=i + 1)
def test_all(self):
self.assertEqual(10, len(QuerysetModel.objects.all()))
def test_none(self):
self.assertEqual(0, len(QuerysetModel.objects.none()))
| luxnovalabs/enjigo_door | web_interface/djangotoolbox/tests.py | Python | unlicense | 30,967 |
import Sofa
import numpy
import numpy.linalg
from SofaPython import Quaternion
import warnings
def decomposeInertia(inertia):
""" Decompose an inertia matrix into
- a diagonal inertia
- the rotation (quaternion) to get to the frame in wich the inertia is diagonal
"""
assert not numpy.isnan(inertia).any(), "input inertia matrix contains NaNs"
U, diagonal_inertia, V = numpy.linalg.svd(inertia)
# det should be 1->rotation or -1->reflexion
if numpy.linalg.det(U) < 0 : # reflexion
# made it a rotation by negating a column
U[:,0] = -U[:,0]
inertia_rotation = Quaternion.from_matrix( U )
return diagonal_inertia, inertia_rotation
class RigidMassInfo(object):
__slots__ = 'mass', 'com', 'diagonal_inertia', 'inertia_rotation', 'density'
"""A structure to set and store a RigidMass as used by sofa: mass, com,
diagonal_inertia and inertia_rotation
"""
def __init__(self):
self.mass=0.
self.com=[0.,0.,0.]
self.diagonal_inertia=[0.,0.,0.]
self.inertia_rotation=Quaternion.id()
self.density = 0.
def setFromMesh(self, filepath, density = 1000, scale3d=[1,1,1], rotation=[0,0,0]):
""" TODO: a single scalar for scale could be enough
"""
self.density = density
rigidInfo = Sofa.generateRigid( filepath, density, scale3d[0], scale3d[1], scale3d[2],
rotation[0], rotation[1], rotation[2] )
self.mass = rigidInfo[0]
self.com = rigidInfo[1:4]
self.diagonal_inertia = rigidInfo[4:7]
self.inertia_rotation = rigidInfo[7:11]
if not self.mass and density:
warnings.warn("zero mass when processing {0}".format(filepath))
def setFromInertia(self, Ixx, Ixy, Ixz, Iyy, Iyz, Izz):
""" set the diagonal_inertia and inertia_rotation from the full inertia matrix
"""
I = numpy.array([ [Ixx, Ixy, Ixz],
[Ixy, Iyy, Iyz],
[Ixz, Iyz, Izz] ])
self.diagonal_inertia, self.inertia_rotation = decomposeInertia(I)
def getWorldInertia(self):
""" @return inertia with respect to world reference frame
"""
R = Quaternion.to_matrix(self.inertia_rotation)
# I in world axis
I = numpy.dot(R.transpose(), numpy.dot(numpy.diag(self.diagonal_inertia), R))
# I at world origin, using // axis theorem
# see http://www.colorado.edu/physics/phys3210/phys3210_sp14/lecnotes.2014-03-07.More_on_Inertia_Tensors.html
# or https://en.wikipedia.org/wiki/Moment_of_inertia
a=numpy.array(self.com).reshape(3,1)
return I + self.mass*(pow(numpy.linalg.norm(self.com),2)*numpy.eye(3) - a*a.transpose())
def __add__(self, other):
res = RigidMassInfo()
# sum mass
res.mass = self.mass + other.mass
assert res.mass, "zero total mass"
# barycentric center of mass
res.com = (self.mass * numpy.array(self.com)
+ other.mass*numpy.array(other.com)) / res.mass
# inertia tensors
# resultant inertia in world frame
res_I_w = self.getWorldInertia() + other.getWorldInertia()
# resultant inertia at com, world axis, using // axis theorem
a = numpy.array(res.com).reshape(3,1)
res_I_com = res_I_w - res.mass*(pow(numpy.linalg.norm(res.com),2)*numpy.eye(3) - a*a.transpose())
res.diagonal_inertia, res.inertia_rotation = decomposeInertia(res_I_com)
if 0. == self.density:
res.density = other.density
elif 0. == other.density:
res.density = self.density
else :
res.density = self.density*other.density*(self.mass+other.mass) / ( other.density*self.mass + self.density*other.mass )
return res
| Anatoscope/sofa | applications/plugins/SofaPython/python/SofaPython/mass.py | Python | lgpl-2.1 | 3,920 |
#!/usr/bin/env python3
class UniqueIndexViolationCheck:
unique_indexes_query = """
select table_oid, index_name, table_name, array_agg(attname) as column_names
from pg_attribute, (
select pg_index.indrelid as table_oid, index_class.relname as index_name, table_class.relname as table_name, unnest(pg_index.indkey) as column_index
from pg_index, pg_class index_class, pg_class table_class
where pg_index.indisunique='t'
and index_class.relnamespace = (select oid from pg_namespace where nspname = 'pg_catalog')
and index_class.relkind = 'i'
and index_class.oid = pg_index.indexrelid
and table_class.oid = pg_index.indrelid
) as unique_catalog_index_columns
where attnum = column_index
and attrelid = table_oid
group by table_oid, index_name, table_name;
"""
def __init__(self):
self.violated_segments_query = """
select distinct(gp_segment_id) from (
(select gp_segment_id, %s
from gp_dist_random('%s')
where (%s) is not null
group by gp_segment_id, %s
having count(*) > 1)
union
(select gp_segment_id, %s
from %s
where (%s) is not null
group by gp_segment_id, %s
having count(*) > 1)
) as violations
"""
def runCheck(self, db_connection):
unique_indexes = db_connection.query(self.unique_indexes_query).getresult()
violations = []
for (table_oid, index_name, table_name, column_names) in unique_indexes:
column_names = ",".join(column_names)
sql = self.get_violated_segments_query(table_name, column_names)
violated_segments = db_connection.query(sql).getresult()
if violated_segments:
violations.append(dict(table_oid=table_oid,
table_name=table_name,
index_name=index_name,
column_names=column_names,
violated_segments=[row[0] for row in violated_segments]))
return violations
def get_violated_segments_query(self, table_name, column_names):
return self.violated_segments_query % (
column_names, table_name, column_names, column_names, column_names, table_name, column_names, column_names
)
| 50wu/gpdb | gpMgmt/bin/gpcheckcat_modules/unique_index_violation_check.py | Python | apache-2.0 | 2,547 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Group Forms."""
from __future__ import absolute_import, print_function
from flask_babelex import gettext as _
from flask_wtf import FlaskForm
from sqlalchemy_utils.types.choice import ChoiceType
from wtforms import RadioField, TextAreaField
from wtforms.validators import DataRequired, Email, StopValidation, \
ValidationError
from wtforms_alchemy import ClassMap, model_form_factory
from .models import Group
ModelForm = model_form_factory(FlaskForm)
class EmailsValidator(object):
"""Validates TextAreaField containing emails.
Runs DataRequired validator on whole field and additionaly for each email
parsed it runs Email validator.
"""
def __init__(self):
"""Initalize all used validators."""
self.validate_data = DataRequired()
self.validate_email = Email()
def __call__(self, form, field):
"""Parse emails and run validators."""
self.validate_data(form, field)
emails_org = field.data
emails = filter(None, emails_org.splitlines())
for email in emails:
try:
field.data = email
self.validate_email(form, field)
except (ValidationError, StopValidation):
raise ValidationError('Invalid email: ' + email)
finally:
field.data = emails_org
class GroupForm(ModelForm):
"""Form for creating and updating a group."""
class Meta:
"""Metadata class."""
model = Group
type_map = ClassMap({ChoiceType: RadioField})
exclude = [
'is_managed',
]
class NewMemberForm(FlaskForm):
"""For for adding new members to a group."""
emails = TextAreaField(
description=_(
'Required. Provide list of the emails of the users'
' you wish to be added. Put each email in new line.'),
validators=[EmailsValidator()]
)
| inveniosoftware/invenio-groups | invenio_groups/forms.py | Python | gpl-2.0 | 2,700 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019 SUNET
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the SUNET nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from typing import Any, Mapping, Optional, cast
from flask import current_app
from eduid_common.api import am, mail_relay, msg, translation
from eduid_common.api.am import AmRelay
from eduid_common.api.app import EduIDBaseApp
from eduid_common.api.mail_relay import MailRelay
from eduid_common.api.msg import MsgRelay
from eduid_common.config.base import FlaskConfig
from eduid_common.config.parsers import load_config
from eduid_userdb.authninfo import AuthnInfoDB
from eduid_userdb.logs import ProofingLog
from eduid_userdb.reset_password import ResetPasswordStateDB, ResetPasswordUserDB
from eduid_webapp.reset_password.settings.common import ResetPasswordConfig
__author__ = 'eperez'
class ResetPasswordApp(EduIDBaseApp):
def __init__(self, config: ResetPasswordConfig, **kwargs):
super().__init__(config, **kwargs)
self.conf = config
# Init celery
self.msg_relay = MsgRelay(config)
self.am_relay = AmRelay(config)
self.mail_relay = MailRelay(config)
# Init dbs
self.private_userdb = ResetPasswordUserDB(self.conf.mongo_uri)
self.password_reset_state_db = ResetPasswordStateDB(self.conf.mongo_uri)
self.proofing_log = ProofingLog(self.conf.mongo_uri)
self.authninfo_db = AuthnInfoDB(self.conf.mongo_uri)
current_reset_password_app: ResetPasswordApp = cast(ResetPasswordApp, current_app)
def init_reset_password_app(
name: str = 'reset_password', test_config: Optional[Mapping[str, Any]] = None
) -> ResetPasswordApp:
"""
:param name: The name of the instance, it will affect the configuration loaded.
:param test_config: Override config. Used in tests.
"""
config = load_config(typ=ResetPasswordConfig, app_name=name, ns='webapp', test_config=test_config)
app = ResetPasswordApp(config)
app.logger.info(f'Init {app}...')
# Register views
from eduid_webapp.reset_password.views.reset_password import reset_password_views
app.register_blueprint(reset_password_views)
translation.init_babel(app)
return app
| SUNET/eduid-webapp | src/eduid_webapp/reset_password/app.py | Python | bsd-3-clause | 3,677 |
import pytest
import doctest
from insights.core import ContentException
from insights.parsers import postconf, SkipException
from insights.parsers.postconf import PostconfBuiltin, Postconf, _Postconf
from insights.tests import context_wrap
V_OUT1 = """
""".strip()
V_OUT2 = """
smtpd_tls_loglevel = 0
smtpd_tls_mandatory_ciphers = medium
smtpd_tls_mandatory_exclude_ciphers =
smtpd_tls_mandatory_protocols = !SSLv2, !SSLv3, !TLSv1
""".strip()
V_OUT3 = """
command not found
""".strip()
def test_PostconfBuiltin():
with pytest.raises(SkipException):
PostconfBuiltin(context_wrap(V_OUT1))
with pytest.raises(ContentException):
PostconfBuiltin(context_wrap(V_OUT3))
p = PostconfBuiltin(context_wrap(V_OUT2))
assert p['smtpd_tls_loglevel'] == '0'
assert p['smtpd_tls_mandatory_ciphers'] == 'medium'
assert p['smtpd_tls_mandatory_exclude_ciphers'] == ''
assert p['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1'
def test_Postconf():
with pytest.raises(SkipException):
Postconf(context_wrap(V_OUT1))
with pytest.raises(ContentException):
Postconf(context_wrap(V_OUT3))
p = Postconf(context_wrap(V_OUT2))
assert p['smtpd_tls_loglevel'] == '0'
assert p['smtpd_tls_mandatory_ciphers'] == 'medium'
assert p['smtpd_tls_mandatory_exclude_ciphers'] == ''
assert p['smtpd_tls_mandatory_protocols'] == '!SSLv2, !SSLv3, !TLSv1'
def test_empty():
with pytest.raises(SkipException):
PostconfBuiltin(context_wrap(""))
with pytest.raises(SkipException):
Postconf(context_wrap(""))
def test_invalid():
with pytest.raises(SkipException):
PostconfBuiltin(context_wrap("asdf"))
with pytest.raises(SkipException):
Postconf(context_wrap("asdf"))
def test_doc_examples():
env = {
'postconfb': PostconfBuiltin(context_wrap(V_OUT2)),
'postconf': Postconf(context_wrap(V_OUT2)),
'_postconf': _Postconf(context_wrap(V_OUT2)),
}
failed, total = doctest.testmod(postconf, globs=env)
assert failed == 0
# TODO
# env = {
# 'postconf': Postconf(context_wrap(V_OUT2)),
# }
# failed, total = doctest.testmod(postconf, globs=env)
# assert failed == 0
| RedHatInsights/insights-core | insights/parsers/tests/test_postconf.py | Python | apache-2.0 | 2,247 |
x = []
d = 0
for _ in range(4):
x.append(int(raw_input()))
for i in range(input()):
if any(((i + 1) % j == 0) for j in x):
d += 1
print d
| yamstudio/Codeforces | 100/148A - Insomnia cure.py | Python | gpl-3.0 | 146 |
import functools
import os
import random
import shutil
import subprocess
import tempfile
from datetime import datetime
class OpenSSL(object):
def __init__(self, logger, binary, base_path, conf_path, hosts, duration,
base_conf_path=None):
"""Context manager for interacting with OpenSSL.
Creates a config file for the duration of the context.
:param logger: stdlib logger or python structured logger
:param binary: path to openssl binary
:param base_path: path to directory for storing certificates
:param conf_path: path for configuration file storing configuration data
:param hosts: list of hosts to include in configuration (or None if not
generating host certificates)
:param duration: Certificate duration in days"""
self.base_path = base_path
self.binary = binary
self.conf_path = conf_path
self.base_conf_path = base_conf_path
self.logger = logger
self.proc = None
self.cmd = []
self.hosts = hosts
self.duration = duration
def __enter__(self):
with open(self.conf_path, "w") as f:
f.write(get_config(self.base_path, self.hosts, self.duration))
return self
def __exit__(self, *args, **kwargs):
os.unlink(self.conf_path)
def log(self, line):
if hasattr(self.logger, "process_output"):
self.logger.process_output(self.proc.pid if self.proc is not None else None,
line.decode("utf8", "replace"),
command=" ".join(self.cmd))
else:
self.logger.debug(line)
def __call__(self, cmd, *args, **kwargs):
"""Run a command using OpenSSL in the current context.
:param cmd: The openssl subcommand to run
:param *args: Additional arguments to pass to the command
"""
self.cmd = [self.binary, cmd]
if cmd != "x509":
self.cmd += ["-config", self.conf_path]
self.cmd += list(args)
env = os.environ.copy()
if self.base_conf_path is not None:
env["OPENSSL_CONF"] = self.base_conf_path.encode("utf8")
self.proc = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env)
stdout, stderr = self.proc.communicate()
self.log(stdout)
if self.proc.returncode != 0:
raise subprocess.CalledProcessError(self.proc.returncode, self.cmd,
output=stdout)
self.cmd = []
self.proc = None
return stdout
def make_subject(common_name,
country=None,
state=None,
locality=None,
organization=None,
organization_unit=None):
args = [("country", "C"),
("state", "ST"),
("locality", "L"),
("organization", "O"),
("organization_unit", "OU"),
("common_name", "CN")]
rv = []
for var, key in args:
value = locals()[var]
if value is not None:
rv.append("/%s=%s" % (key, value.replace("/", "\\/")))
return "".join(rv)
def make_alt_names(hosts):
rv = []
for name in hosts:
rv.append("DNS:%s" % name)
return ",".join(rv)
def get_config(root_dir, hosts, duration=30):
if hosts is None:
san_line = ""
else:
san_line = "subjectAltName = %s" % make_alt_names(hosts)
if os.path.sep == "\\":
# This seems to be needed for the Shining Light OpenSSL on
# Windows, at least.
root_dir = root_dir.replace("\\", "\\\\")
rv = """[ ca ]
default_ca = CA_default
[ CA_default ]
dir = %(root_dir)s
certs = $dir
new_certs_dir = $certs
crl_dir = $dir%(sep)scrl
database = $dir%(sep)sindex.txt
private_key = $dir%(sep)scakey.pem
certificate = $dir%(sep)scacert.pem
serial = $dir%(sep)sserial
crldir = $dir%(sep)scrl
crlnumber = $dir%(sep)scrlnumber
crl = $crldir%(sep)scrl.pem
RANDFILE = $dir%(sep)sprivate%(sep)s.rand
x509_extensions = usr_cert
name_opt = ca_default
cert_opt = ca_default
default_days = %(duration)d
default_crl_days = %(duration)d
default_md = sha256
preserve = no
policy = policy_anything
copy_extensions = copy
[ policy_anything ]
countryName = optional
stateOrProvinceName = optional
localityName = optional
organizationName = optional
organizationalUnitName = optional
commonName = supplied
emailAddress = optional
[ req ]
default_bits = 2048
default_keyfile = privkey.pem
distinguished_name = req_distinguished_name
attributes = req_attributes
x509_extensions = v3_ca
# Passwords for private keys if not present they will be prompted for
# input_password = secret
# output_password = secret
string_mask = utf8only
req_extensions = v3_req
[ req_distinguished_name ]
countryName = Country Name (2 letter code)
countryName_default = AU
countryName_min = 2
countryName_max = 2
stateOrProvinceName = State or Province Name (full name)
stateOrProvinceName_default =
localityName = Locality Name (eg, city)
0.organizationName = Organization Name
0.organizationName_default = Web Platform Tests
organizationalUnitName = Organizational Unit Name (eg, section)
#organizationalUnitName_default =
commonName = Common Name (e.g. server FQDN or YOUR name)
commonName_max = 64
emailAddress = Email Address
emailAddress_max = 64
[ req_attributes ]
[ usr_cert ]
basicConstraints=CA:false
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid,issuer
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
%(san_line)s
[ v3_ca ]
basicConstraints = CA:true
subjectKeyIdentifier=hash
authorityKeyIdentifier=keyid:always,issuer:always
keyUsage = keyCertSign
""" % {"root_dir": root_dir,
"san_line": san_line,
"duration": duration,
"sep": os.path.sep.replace("\\", "\\\\")}
return rv
class OpenSSLEnvironment(object):
ssl_enabled = True
def __init__(self, logger, openssl_binary="openssl", base_path=None,
password="web-platform-tests", force_regenerate=False,
duration=30, base_conf_path=None):
"""SSL environment that creates a local CA and host certificate using OpenSSL.
By default this will look in base_path for existing certificates that are still
valid and only create new certificates if there aren't any. This behaviour can
be adjusted using the force_regenerate option.
:param logger: a stdlib logging compatible logger or mozlog structured logger
:param openssl_binary: Path to the OpenSSL binary
:param base_path: Path in which certificates will be stored. If None, a temporary
directory will be used and removed when the server shuts down
:param password: Password to use
:param force_regenerate: Always create a new certificate even if one already exists.
"""
self.logger = logger
self.temporary = False
if base_path is None:
base_path = tempfile.mkdtemp()
self.temporary = True
self.base_path = os.path.abspath(base_path)
self.password = password
self.force_regenerate = force_regenerate
self.duration = duration
self.base_conf_path = base_conf_path
self.path = None
self.binary = openssl_binary
self.openssl = None
self._ca_cert_path = None
self._ca_key_path = None
self.host_certificates = {}
def __enter__(self):
if not os.path.exists(self.base_path):
os.makedirs(self.base_path)
path = functools.partial(os.path.join, self.base_path)
with open(path("index.txt"), "w"):
pass
with open(path("serial"), "w") as f:
serial = "%x" % random.randint(0, 1000000)
if len(serial) % 2:
serial = "0" + serial
f.write(serial)
self.path = path
return self
def __exit__(self, *args, **kwargs):
if self.temporary:
shutil.rmtree(self.base_path)
def _config_openssl(self, hosts):
conf_path = self.path("openssl.cfg")
return OpenSSL(self.logger, self.binary, self.base_path, conf_path, hosts,
self.duration, self.base_conf_path)
def ca_cert_path(self):
"""Get the path to the CA certificate file, generating a
new one if needed"""
if self._ca_cert_path is None and not self.force_regenerate:
self._load_ca_cert()
if self._ca_cert_path is None:
self._generate_ca()
return self._ca_cert_path
def _load_ca_cert(self):
key_path = self.path("cakey.pem")
cert_path = self.path("cacert.pem")
if self.check_key_cert(key_path, cert_path, None):
self.logger.info("Using existing CA cert")
self._ca_key_path, self._ca_cert_path = key_path, cert_path
def check_key_cert(self, key_path, cert_path, hosts):
"""Check that a key and cert file exist and are valid"""
if not os.path.exists(key_path) or not os.path.exists(cert_path):
return False
with self._config_openssl(hosts) as openssl:
end_date_str = openssl("x509",
"-noout",
"-enddate",
"-in", cert_path).split("=", 1)[1].strip()
# Not sure if this works in other locales
end_date = datetime.strptime(end_date_str, "%b %d %H:%M:%S %Y %Z")
# Should have some buffer here e.g. 1 hr
if end_date < datetime.now():
return False
#TODO: check the key actually signed the cert.
return True
def _generate_ca(self):
path = self.path
self.logger.info("Generating new CA in %s" % self.base_path)
key_path = path("cakey.pem")
req_path = path("careq.pem")
cert_path = path("cacert.pem")
with self._config_openssl(None) as openssl:
openssl("req",
"-batch",
"-new",
"-newkey", "rsa:2048",
"-keyout", key_path,
"-out", req_path,
"-subj", make_subject("web-platform-tests"),
"-passout", "pass:%s" % self.password)
openssl("ca",
"-batch",
"-create_serial",
"-keyfile", key_path,
"-passin", "pass:%s" % self.password,
"-selfsign",
"-extensions", "v3_ca",
"-in", req_path,
"-out", cert_path)
os.unlink(req_path)
self._ca_key_path, self._ca_cert_path = key_path, cert_path
def host_cert_path(self, hosts):
"""Get a tuple of (private key path, certificate path) for a host,
generating new ones if necessary.
hosts must be a list of all hosts to appear on the certificate, with
the primary hostname first."""
hosts = tuple(hosts)
if hosts not in self.host_certificates:
if not self.force_regenerate:
key_cert = self._load_host_cert(hosts)
else:
key_cert = None
if key_cert is None:
key, cert = self._generate_host_cert(hosts)
else:
key, cert = key_cert
self.host_certificates[hosts] = key, cert
return self.host_certificates[hosts]
def _load_host_cert(self, hosts):
host = hosts[0]
key_path = self.path("%s.key" % host)
cert_path = self.path("%s.pem" % host)
# TODO: check that this cert was signed by the CA cert
if self.check_key_cert(key_path, cert_path, hosts):
self.logger.info("Using existing host cert")
return key_path, cert_path
def _generate_host_cert(self, hosts):
host = hosts[0]
if self._ca_key_path is None:
self._generate_ca()
ca_key_path = self._ca_key_path
assert os.path.exists(ca_key_path)
path = self.path
req_path = path("wpt.req")
cert_path = path("%s.pem" % host)
key_path = path("%s.key" % host)
self.logger.info("Generating new host cert")
with self._config_openssl(hosts) as openssl:
openssl("req",
"-batch",
"-newkey", "rsa:2048",
"-keyout", key_path,
"-in", ca_key_path,
"-nodes",
"-out", req_path)
openssl("ca",
"-batch",
"-in", req_path,
"-passin", "pass:%s" % self.password,
"-subj", make_subject(host),
"-out", cert_path)
os.unlink(req_path)
return key_path, cert_path
| wldcordeiro/servo | tests/wpt/web-platform-tests/tools/sslutils/openssl.py | Python | mpl-2.0 | 13,164 |
"""
This script investigates how calculating phasic currents from voltage clamp
recordings may benefit from subtracting-out the "noise" determined from a
subset of the quietest pieces of the recording, rather than using smoothing
or curve fitting to guess a guassian-like RMS noise function.
"""
import os
import sys
sys.path.append("../")
sys.path.append("../../")
sys.path.append("../../../")
sys.path.append("../../../../")
import swhlab
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.mlab as mlab
POINTS_PER_SEC=20000
POINTS_PER_MS=int(POINTS_PER_SEC/1000)
CHUNK_POINTS=POINTS_PER_MS*10 # size of Y pieces to calculate variance from
PERCENT_STEP=10 # percentile steps to display
HIST_RESOLUTION=.1 # pA per bin
COLORMAP=plt.get_cmap('jet') # which color scheme do we want to use?
#COLORMAP=plt.get_cmap('winter') # which color scheme do we want to use?
def quietParts(data,percentile=10):
"""
Given some data (Y) break it into chunks and return just the quiet ones.
Returns data where the variance for its chunk size is below the given percentile.
CHUNK_POINTS should be adjusted so it's about 10ms of data.
"""
nChunks=int(len(Y)/CHUNK_POINTS)
chunks=np.reshape(Y[:nChunks*CHUNK_POINTS],(nChunks,CHUNK_POINTS))
variances=np.var(chunks,axis=1)
percentiles=np.empty(len(variances))
for i,variance in enumerate(variances):
percentiles[i]=sorted(variances).index(variance)/len(variances)*100
selected=chunks[np.where(percentiles<=percentile)[0]].flatten()
return selected
def ndist(data,Xs):
"""
given some data and a list of X posistions, return the normal
distribution curve as a Y point at each of those Xs.
"""
sigma=np.sqrt(np.var(data))
center=np.average(data)
curve=mlab.normpdf(Xs,center,sigma)
curve*=len(data)*HIST_RESOLUTION
return curve
if __name__=="__main__":
Y=np.load("sweepdata.npy")
baseline=swhlab.common.lowpass(Y,POINTS_PER_MS*250)
plt.figure(figsize=(15,5))
plt.plot(Y)
plt.plot(baseline,color='r',alpha=.5,lw=5)
plt.savefig("baseline.png")
plt.figure(figsize=(15,5))
plt.plot(Y-baseline)
plt.axhline(0,color='r',alpha=.5,lw=5)
plt.savefig("baseline2.png")
plt.show()
if __name__=="__main__" and False:
# apply baseline
Y=Y-baseline
# predict what our histogram will look like
padding=50
histCenter=int(np.average(Y))
histRange=(histCenter-padding,histCenter+padding)
histBins=int(abs(histRange[0]-histRange[1])/HIST_RESOLUTION)
# FIRST CALCULATE THE 10-PERCENTILE CURVE
data=quietParts(Y,10) # assume 10% is a good percentile to use
hist,bins=np.histogram(data,bins=histBins,range=histRange,density=False)
hist=hist.astype(np.float) # histogram of data values
curve=ndist(data,bins[:-1]) # normal distribution curve
hist[hist == 0] = np.nan
histValidIs=np.where(~np.isnan(hist))
histX,histY=bins[:-1][histValidIs],hist[histValidIs] # remove nans
baselineCurve=curve/np.max(curve) # max is good for smooth curve
# THEN CALCULATE THE WHOLE-SWEEP HISTOGRAM
hist,bins=np.histogram(Y,bins=histBins,range=histRange,density=False)
hist=hist.astype(np.float) # histogram of data values
hist[hist == 0] = np.nan
histValidIs=np.where(~np.isnan(hist))
histX,histY=bins[:-1][histValidIs],hist[histValidIs] # remove nans
histY/=np.percentile(histY,98) # percentile is needed for noisy data
# DETERMINE THE DIFFERENCE
diffX=bins[:-1][histValidIs]
diffY=histY-baselineCurve[histValidIs]
diffY[diffY<0]=np.nan
# NOW PLOT THE DIFFERENCE
plt.figure(figsize=(10,10))
plt.subplot(211)
plt.grid()
plt.plot(histX,histY,'b.',ms=10,alpha=.5,label="data points")
plt.plot(bins[:-1],baselineCurve,'r-',lw=3,alpha=.5,label="10% distribution")
plt.legend(loc='upper left',shadow=True)
plt.ylabel("normalized distribution")
plt.axis([histCenter-20,histCenter+20,0,1.5])
plt.subplot(212)
plt.grid()
plt.plot(diffX,diffY,'.',ms=10,alpha=.5,color='b')
plt.axvline(histCenter,color='r',lw=3,alpha=.5,ls='--')
plt.legend(loc='upper left',shadow=True)
plt.ylabel("difference")
plt.xlabel("histogram data points (pA)")
plt.margins(0,.1)
plt.axis([histCenter-20,histCenter+20,0,None])
plt.tight_layout()
plt.savefig("2016-12-16-tryout-yesSub.png")
plt.show()
print("DONE") | swharden/SWHLab | doc/uses/EPSCs-and-IPSCs/variance method/2016-12-16 tryout2.py | Python | mit | 4,500 |
"""
Utilities for getting WIP limit data about a team
"""
class WIPLimits(object):
"""
Stores data about a team's wip limit rules
"""
def __init__(self, columns=None, conwip=None, name=None):
if columns is None:
columns = {}
self.name = name
self._conwip = conwip
self._columns = columns
self._limits = self._calculate_limits()
def _calculate_limits(self):
_limits = {}
for key, value in self._columns.items():
_limits[key] = value
if self._columns.values() and self._conwip is None:
_limits['conwip'] = sum(self._columns.values())
else:
_limits['conwip'] = self._conwip
return _limits
def get(self, key, default=None):
return self._limits.get(key, default)
def __getitem__(self, key):
return self._limits[key]
| cmheisel/kardboard | kardboard/services/wiplimits.py | Python | mit | 889 |
# Copyright 2010 OpenStack Foundation
# Copyright 2012 University Of Minho
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import __builtin__
import contextlib
import copy
import datetime
import errno
import glob
import os
import random
import re
import shutil
import threading
import time
import uuid
import eventlet
from eventlet import greenthread
import fixtures
from lxml import etree
import mock
from mox3 import mox
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova.compute import arch
from nova.compute import cpumodel
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova.openstack.common import fileutils
from nova.openstack.common import loopingcall
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_pci_device
from nova.tests.unit.objects import test_vcpu_model
from nova.tests.unit.virt.libvirt import fake_imagebackend
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import hardware
from nova.virt.image import model as imgmodel
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import lvm
from nova.virt.libvirt import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import volume as volume_drivers
libvirt_driver.libvirt = fakelibvirt
host.libvirt = fakelibvirt
libvirt_guest.libvirt = fakelibvirt
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('instances_path', 'nova.compute.manager')
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_NodeDevXml = \
{"pci_0000_04_00_3": """
<device>
<name>pci_0000_04_00_3</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igb</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>3</function>
<product id='0x1521'>I350 Gigabit Network Connection</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='virt_functions'>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
</capability>
</capability>
</device>""",
"pci_0000_04_10_7": """
<device>
<name>pci_0000_04_10_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>16</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>""",
"pci_0000_04_11_7": """
<device>
<name>pci_0000_04_11_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>17</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<numa node='0'/>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>"""}
_fake_cpu_info = {
"arch": "test_arch",
"model": "test_model",
"vendor": "test_vendor",
"topology": {
"sockets": 1,
"cores": 8,
"threads": 16
},
"features": ["feature1", "feature2"]
}
def _concurrency(signal, wait, done, target, is_block_dev=False):
signal.send()
wait.wait()
done.send()
class FakeVirDomainSnapshot(object):
def __init__(self, dom=None):
self.dom = dom
def delete(self, flags):
pass
class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None):
if uuidstr is None:
uuidstr = str(uuid.uuid4())
self.uuidstr = uuidstr
self.id = id
self.domname = name
self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
None, None]
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def name(self):
if self.domname is None:
return "fake-domain %s" % self
else:
return self.domname
def ID(self):
return self.id
def info(self):
return self._info
def create(self):
pass
def managedSave(self, *args):
pass
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, flags):
return self._fake_dom_xml
def UUIDString(self):
return self.uuidstr
def attachDeviceFlags(self, xml, flags):
pass
def attachDevice(self, xml):
pass
def detachDeviceFlags(self, xml, flags):
pass
def snapshotCreateXML(self, xml, flags):
pass
def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
pass
def blockRebase(self, disk, base, bandwidth=0, flags=0):
pass
def blockJobInfo(self, path, flags):
pass
def resume(self):
pass
def destroy(self):
pass
def fsFreeze(self, disks=None, flags=0):
pass
def fsThaw(self, disks=None, flags=0):
pass
class CacheConcurrencyTestCase(test.NoDBTestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# utils.synchronized() will create the lock_path for us if it
# doesn't already exist. It will also delete it when it's done,
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
basedir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if fname == basedir or fname == self.lock_path:
return True
return False
def fake_execute(*args, **kwargs):
pass
def fake_extend(image, size, use_cow=False):
pass
self.stubs.Set(os.path, 'exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def _fake_instance(self, uuid):
return objects.Instance(id=1, uuid=uuid)
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
# Wait for thread 2 to start.
sig2.wait()
wait2.send()
tries = 0
while not done2.ready() and tries < 10:
eventlet.sleep(0)
tries += 1
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
class FakeVolumeDriver(object):
def __init__(self, *args, **kwargs):
pass
def attach_volume(self, *args):
pass
def detach_volume(self, *args):
pass
def get_xml(self, *args):
return ""
def get_config(self, *args):
"""Connect the volume to a fake device."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
conf.target_dev = "fake"
conf.target_bus = "fake"
return conf
def connect_volume(self, *args):
"""Connect the volume to a fake device."""
return self.get_config()
class FakeConfigGuestDisk(object):
def __init__(self, *args, **kwargs):
self.source_type = None
self.driver_cache = None
class FakeConfigGuest(object):
def __init__(self, *args, **kwargs):
self.driver_cache = None
class FakeNodeDevice(object):
def __init__(self, fakexml):
self.xml = fakexml
def XMLDesc(self, flags):
return self.xml
def _create_test_instance():
flavor = objects.Flavor(memory_mb=2048,
swap=0,
vcpu_weight=None,
root_gb=1,
id=2,
name=u'm1.small',
ephemeral_gb=0,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=1,
extra_specs={})
return {
'id': 1,
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'display_name': "Acme webserver",
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'extra_specs': {},
'system_metadata': {},
'flavor': flavor,
'new_flavor': None,
'old_flavor': None,
'pci_devices': objects.PciDeviceList(),
'numa_topology': None,
'config_drive': None,
'vm_mode': None,
'kernel_id': None,
'ramdisk_id': None,
'os_type': 'linux',
'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
'ephemeral_key_uuid': None,
'vcpu_model': None,
'host': 'fake-host',
}
class LibvirtConnTestCase(test.NoDBTestCase):
REQUIRES_LOCKING = True
_EPHEMERAL_20_DEFAULT = ('ephemeral_20_%s' %
utils.get_hash_str(disk._DEFAULT_FILE_SYSTEM)[:7])
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
self.flags(fake_call=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
temp_dir = self.useFixture(fixtures.TempDir()).path
self.flags(instances_path=temp_dir)
self.flags(snapshots_directory=temp_dir, group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.flags(sysinfo_serial="hardware", group="libvirt")
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def fake_extend(image, size, use_cow=False):
pass
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
imagebackend.Image._get_driver_format)
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.test_instance = _create_test_instance()
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self.stubs)
self.device_xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
def relpath(self, path):
return os.path.relpath(path, CONF.instances_path)
def tearDown(self):
nova.tests.unit.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
def test_driver_capabilities(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr.capabilities['has_imagecache'],
'Driver capabilities for \'has_imagecache\''
'is invalid')
self.assertTrue(drvr.capabilities['supports_recreate'],
'Driver capabilities for \'supports_recreate\''
'is invalid')
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtDriver(object):
def defineXML(self, xml):
return FakeVirtDomain()
# Creating mocks
volume_driver = ['iscsi=nova.tests.unit.virt.libvirt.test_driver'
'.FakeVolumeDriver']
fake = FakeLibvirtDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake)
self.stubs.Set(libvirt_driver.LibvirtDriver, '_get_volume_drivers',
lambda x: volume_driver)
self.stubs.Set(host.Host, 'get_connection', lambda x: fake)
def fake_lookup(self, instance_name):
return FakeVirtDomain()
def fake_execute(self, *args, **kwargs):
open(args[-1], "a").close()
def _create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'disabled': kwargs.get('disabled', False),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0}
return objects.Service(**service_ref)
def _get_pause_flag(self, drvr, network_info, power_on=True,
vifs_already_plugged=False):
timeout = CONF.vif_plugging_timeout
events = []
if (drvr._conn_supports_start_paused and
utils.is_neutron() and
not vifs_already_plugged and
power_on and timeout):
events = drvr._get_neutron_events(network_info)
return bool(events)
def test_public_api_signatures(self):
baseinst = driver.ComputeDriver(None)
inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertPublicAPISignatures(baseinst, inst)
def test_legacy_block_device_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(drvr.need_legacy_block_device_info)
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_ok(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_abort(self, mock_version):
mock_version.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1)
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_deprecation_warning(self, mock_warning,
mock_get_libversion):
# Test that a warning is logged if the libvirt version is less than
# the next required minimum version.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': '0.10.2'}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_ok(self, mock_warning, mock_get_libversion):
# Test that a warning is not logged if the libvirt version is greater
# than or equal to NEXT_MIN_LIBVIRT_VERSION.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': '0.10.2'}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertFalse(version_arg_found)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable(self, mock_svc):
# Tests disabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
self.assertTrue(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable(self, mock_svc):
# Tests enabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
self.assertTrue(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable_state_enabled(self, mock_svc):
# Tests enabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=False, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
self.assertFalse(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable_state_disabled(self, mock_svc):
# Tests disabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
self.assertTrue(svc.disabled)
def test_set_host_enabled_swallows_exceptions(self):
# Tests that set_host_enabled will swallow exceptions coming from the
# db_api code so they don't break anything calling it, e.g. the
# _get_new_connection method.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
# Make db.service_get_by_compute_host raise NovaException; this
# is more robust than just raising ComputeHostNotFound.
db_mock.side_effect = exception.NovaException
drvr._set_host_enabled(False)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
def test_prepare_pci_device(self, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
drvr._prepare_pci_devices_for_use(pci_devices)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
@mock.patch.object(fakelibvirt.virNodeDevice, "dettach")
def test_prepare_pci_device_exception(self, mock_detach, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
mock_detach.side_effect = fakelibvirt.libvirtError("xxxx")
self.assertRaises(exception.PciDevicePrepareFailed,
drvr._prepare_pci_devices_for_use, pci_devices)
def test_detach_pci_devices_exception(self):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: False
self.assertRaises(exception.PciDeviceDetachFailed,
drvr._detach_pci_devices, None, pci_devices)
def test_detach_pci_devices(self):
fake_domXML1 =\
"""<domain> <devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='xxx'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0000"
bus="0x04"/>
</source>
</hostdev></devices></domain>"""
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid',
address="0001:04:10:1")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_get_guest_pci_device')
class FakeDev(object):
def to_xml(self):
pass
libvirt_driver.LibvirtDriver._get_guest_pci_device =\
lambda x, y: FakeDev()
class FakeDomain(object):
def detachDeviceFlags(self, xml, flags):
pci_devices[0]['hypervisor_name'] = 'marked'
pass
def XMLDesc(self, flags):
return fake_domXML1
guest = libvirt_guest.Guest(FakeDomain())
drvr._detach_pci_devices(guest, pci_devices)
self.assertEqual(pci_devices[0]['hypervisor_name'], 'marked')
def test_detach_pci_devices_timeout(self):
fake_domXML1 =\
"""<domain>
<devices>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/>
</source>
</hostdev>
</devices>
</domain>"""
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid',
address="0000:04:10:1")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_get_guest_pci_device')
class FakeDev(object):
def to_xml(self):
pass
libvirt_driver.LibvirtDriver._get_guest_pci_device =\
lambda x, y: FakeDev()
class FakeDomain(object):
def detachDeviceFlags(self, xml, flags):
pass
def XMLDesc(self, flags):
return fake_domXML1
guest = libvirt_guest.Guest(FakeDomain())
self.assertRaises(exception.PciDeviceDetachFailed,
drvr._detach_pci_devices, guest, pci_devices)
def test_get_connector(self):
initiator = 'fake.initiator.iqn'
ip = 'fakeip'
host = 'fakehost'
wwpns = ['100010604b019419']
wwnns = ['200010604b019419']
self.flags(my_ip=ip)
self.flags(host=host)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {
'ip': ip,
'initiator': initiator,
'host': host,
'wwpns': wwpns,
'wwnns': wwnns
}
volume = {
'id': 'fake'
}
result = drvr.get_volume_connector(volume)
self.assertThat(expected, matchers.DictMatches(result))
def test_get_connector_storage_ip(self):
ip = '100.100.100.100'
storage_ip = '101.101.101.101'
self.flags(my_block_storage_ip=storage_ip, my_ip=ip)
volume = {
'id': 'fake'
}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
result = drvr.get_volume_connector(volume)
self.assertEqual(storage_ip, result['ip'])
def test_lifecycle_event_registration(self):
calls = []
def fake_registerErrorHandler(*args, **kwargs):
calls.append('fake_registerErrorHandler')
def fake_get_host_capabilities(**args):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
calls.append('fake_get_host_capabilities')
return caps
@mock.patch.object(fakelibvirt, 'registerErrorHandler',
side_effect=fake_registerErrorHandler)
@mock.patch.object(host.Host, "get_capabilities",
side_effect=fake_get_host_capabilities)
def test_init_host(get_host_capabilities, register_error_handler):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("test_host")
test_init_host()
# NOTE(dkliban): Will fail if get_host_capabilities is called before
# registerErrorHandler
self.assertEqual(['fake_registerErrorHandler',
'fake_get_host_capabilities'], calls)
def test_sanitize_log_to_xml(self):
# setup fake data
data = {'auth_password': 'scrubme'}
bdm = [{'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
# Tests that the parameters to the _get_guest_xml method
# are sanitized for passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.assertNotIn('scrubme', args[0])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = mock.Mock()
with contextlib.nested(
mock.patch.object(libvirt_driver.LOG, 'debug',
side_effect=fake_debug),
mock.patch.object(drvr, '_get_guest_config', return_value=conf)
) as (
debug_mock, conf_mock
):
drvr._get_guest_xml(self.context, self.test_instance,
network_info={}, disk_info={},
image_meta={}, block_device_info=bdi)
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
@mock.patch.object(time, "time")
def test_get_guest_config(self, time_mock):
time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
test_instance = copy.deepcopy(self.test_instance)
test_instance["display_name"] = "purple tomatoes"
ctxt = context.RequestContext(project_id=123,
project_name="aubergine",
user_id=456,
user_name="pie")
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
instance_ref = objects.Instance(**test_instance)
instance_ref.flavor = flavor
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info,
context=ctxt)
self.assertEqual(cfg.uuid, instance_ref["uuid"])
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, 6 * units.Ki)
self.assertEqual(cfg.vcpus, 28)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(len(cfg.metadata), 1)
self.assertIsInstance(cfg.metadata[0],
vconfig.LibvirtConfigGuestMetaNovaInstance)
self.assertEqual(version.version_string_with_package(),
cfg.metadata[0].package)
self.assertEqual("purple tomatoes",
cfg.metadata[0].name)
self.assertEqual(1234567.89,
cfg.metadata[0].creationTime)
self.assertEqual("image",
cfg.metadata[0].roottype)
self.assertEqual(str(instance_ref["image_ref"]),
cfg.metadata[0].rootid)
self.assertIsInstance(cfg.metadata[0].owner,
vconfig.LibvirtConfigGuestMetaNovaOwner)
self.assertEqual(456,
cfg.metadata[0].owner.userid)
self.assertEqual("pie",
cfg.metadata[0].owner.username)
self.assertEqual(123,
cfg.metadata[0].owner.projectid)
self.assertEqual("aubergine",
cfg.metadata[0].owner.projectname)
self.assertIsInstance(cfg.metadata[0].flavor,
vconfig.LibvirtConfigGuestMetaNovaFlavor)
self.assertEqual("m1.small",
cfg.metadata[0].flavor.name)
self.assertEqual(6,
cfg.metadata[0].flavor.memory)
self.assertEqual(28,
cfg.metadata[0].flavor.vcpus)
self.assertEqual(496,
cfg.metadata[0].flavor.disk)
self.assertEqual(8128,
cfg.metadata[0].flavor.ephemeral)
self.assertEqual(33550336,
cfg.metadata[0].flavor.swap)
def test_get_guest_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_lxc_with_id_maps(self):
self.flags(virt_type='lxc', group='libvirt')
self.flags(uid_maps=['0:1000:100'], group='libvirt')
self.flags(gid_maps=['0:1000:100'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertEqual(len(cfg.idmaps), 2)
self.assertIsInstance(cfg.idmaps[0],
vconfig.LibvirtConfigGuestUIDMap)
self.assertIsInstance(cfg.idmaps[1],
vconfig.LibvirtConfigGuestGIDMap)
def test_get_guest_config_numa_host_instance_fits(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_numa_host_instance_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice')
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def _test_get_guest_memory_backing_config(
self, host_topology, inst_topology, numatune):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(
drvr, "_get_host_numa_topology",
return_value=host_topology):
return drvr._get_guest_memory_backing_config(
inst_topology, numatune)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_large_success(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertEqual(1, len(result.hugepages))
self.assertEqual(2048, result.hugepages[0].size_kb)
self.assertEqual([0], result.hugepages[0].nodeset)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_smallest(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertIsNone(result)
def test_get_guest_config_numa_host_instance_pci_no_numa_info(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status='available',
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=None)
pci_device = objects.PciDevice(**pci_device_info)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device])):
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_numa_host_instance_2pci_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status='available',
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=1)
pci_device = objects.PciDevice(**pci_device_info)
pci_device_info.update(numa_node=0, address='0000:00:00.2')
pci_device2 = objects.PciDevice(**pci_device_info)
with contextlib.nested(
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice'),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device, pci_device2])
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock, pci_mock):
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
@mock.patch.object(host.Host, 'get_capabilities')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
def _test_get_guest_config_numa_unsupported(self, fake_lib_version,
fake_version, fake_type,
fake_arch, exception_class,
pagesize, mock_host,
mock_caps, mock_lib_version,
mock_version, mock_type):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]),
memory=1024, pagesize=pagesize)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = {}
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fake_arch
caps.host.topology = self._fake_caps_numa_topology()
mock_type.return_value = fake_type
mock_version.return_value = fake_version
mock_lib_version.return_value = fake_lib_version
mock_caps.return_value = caps
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception_class,
drvr._get_guest_config,
instance_ref, [], {}, disk_info)
def test_get_guest_config_numa_old_version_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1,
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_bad_version_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.BAD_LIBVIRT_NUMA_VERSIONS[0]),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_old_version_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1,
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_other_arch_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.PPC64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_xen(self):
self.flags(virt_type='xen', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int((4, 5, 0)),
'XEN',
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_old_pages_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1,
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.MemoryPagesUnsupported,
2048)
def test_get_guest_config_numa_old_pages_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1,
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
2048)
def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology(kb_mem=4194304)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([2, 3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8)))
) as (has_min_version_mock, get_host_cap_mock,
get_vcpu_pin_set_mock, get_online_cpus_mock):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
# NOTE(ndipanov): we make sure that pin_set was taken into account
# when choosing viable cells
self.assertEqual(set([2, 3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_non_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([2]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = {}
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.numatune)
self.assertIsNotNone(cfg.cpu.numa)
for instance_cell, numa_cfg_cell in zip(
instance_topology.cells, cfg.cpu.numa.cells):
self.assertEqual(instance_cell.id, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
def test_get_guest_config_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024, pagesize=None),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]), memory=1024,
pagesize=None)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = {}
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([2, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_reordered(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = {}
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset)
for index, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells,
cfg.cpu.numa.cells)):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for index, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells,
cfg.numatune.memnodes)):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024,
cpu_pinning={0: 24, 1: 25}),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024,
cpu_pinning={2: 0, 3: 1})])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = {}
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology(
sockets_per_cell=4, cores_per_socket=3, threads_per_core=2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
# Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 24, 25]),
cfg.cputune.emulatorpin.cpuset)
for i, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells, cfg.cpu.numa.cells)):
self.assertEqual(i, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for i, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells, cfg.numatune.memnodes)):
self.assertEqual(i, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_cpu_numa_config_from_instance(self):
topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128),
])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(topology)
self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA)
self.assertEqual(0, conf.cells[0].id)
self.assertEqual(set([1, 2]), conf.cells[0].cpus)
self.assertEqual(131072, conf.cells[0].memory)
self.assertEqual(1, conf.cells[1].id)
self.assertEqual(set([3, 4]), conf.cells[1].cpus)
self.assertEqual(131072, conf.cells[1].memory)
def test_get_cpu_numa_config_from_instance_none(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(None)
self.assertIsNone(conf)
def test_get_guest_config_clock(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {}
hpet_map = {
arch.X86_64: True,
arch.I686: True,
arch.PPC: False,
arch.PPC64: False,
arch.ARMV7: False,
arch.AARCH64: False,
}
for guestarch, expect_hpet in hpet_map.items():
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "utc")
self.assertIsInstance(cfg.clock.timers[0],
vconfig.LibvirtConfigGuestTimer)
self.assertIsInstance(cfg.clock.timers[1],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual(cfg.clock.timers[0].name, "pit")
self.assertEqual(cfg.clock.timers[0].tickpolicy,
"delay")
self.assertEqual(cfg.clock.timers[1].name, "rtc")
self.assertEqual(cfg.clock.timers[1].tickpolicy,
"catchup")
if expect_hpet:
self.assertEqual(3, len(cfg.clock.timers))
self.assertIsInstance(cfg.clock.timers[2],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual('hpet', cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
else:
self.assertEqual(2, len(cfg.clock.timers))
@mock.patch.object(libvirt_utils, 'get_arch')
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows(self, mock_version, mock_get_arch):
mock_version.return_value = False
mock_get_arch.return_value = arch.I686
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.clock.timers), cfg.clock.timers)
self.assertEqual("pit", cfg.clock.timers[0].name)
self.assertEqual("rtc", cfg.clock.timers[1].name)
self.assertEqual("hpet", cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
@mock.patch.object(libvirt_utils, 'get_arch')
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_timer(self, mock_version, mock_get_arch):
mock_version.return_value = True
mock_get_arch.return_value = arch.I686
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers)
self.assertEqual("pit", cfg.clock.timers[0].name)
self.assertEqual("rtc", cfg.clock.timers[1].name)
self.assertEqual("hpet", cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
self.assertEqual("hypervclock", cfg.clock.timers[3].name)
self.assertTrue(cfg.clock.timers[3].present)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_hyperv_feature1(self, mock_version):
def fake_version(lv_ver=None, hv_ver=None, hv_type=None):
if lv_ver == (1, 0, 0) and hv_ver == (1, 1, 0):
return True
return False
mock_version.side_effect = fake_version
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
self.assertTrue(cfg.features[2].relaxed)
self.assertFalse(cfg.features[2].spinlocks)
self.assertFalse(cfg.features[2].vapic)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_hyperv_feature2(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
self.assertTrue(cfg.features[2].relaxed)
self.assertTrue(cfg.features[2].spinlocks)
self.assertEqual(8191, cfg.features[2].spinlock_retries)
self.assertTrue(cfg.features[2].vapic)
def test_get_guest_config_with_two_nics(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 2),
{}, disk_info)
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_bug_1118829(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
disk_info = {'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {u'vda': {'bus': 'virtio',
'type': 'disk',
'dev': u'vda'},
'root': {'bus': 'virtio',
'type': 'disk',
'dev': 'vda'}}}
# NOTE(jdg): For this specific test leave this blank
# This will exercise the failed code path still,
# and won't require fakes and stubs of the iscsi discovery
block_device_info = {}
drvr._get_guest_config(instance_ref, [], {}, disk_info,
None, block_device_info)
self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
def test_get_guest_config_with_root_device_name(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {'root_device_name': '/dev/vdb'}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info,
None, block_device_info)
self.assertEqual(0, len(cfg.features))
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, "uml")
self.assertEqual(cfg.os_boot_dev, [])
self.assertEqual(cfg.os_root, '/dev/vdb')
self.assertEqual(len(cfg.devices), 3)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdc'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdd'}),
])}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info,
None, info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_lxc_with_attached_volume(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'boot_index': 0}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 3,
'source_type': 'volume', 'destination_type': 'volume',
}),
])}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
info['block_device_mapping'][2]['connection_info'] = conn_info
info['block_device_mapping'][0]['mount_device'] = '/dev/vda'
info['block_device_mapping'][1]['mount_device'] = '/dev/vdc'
info['block_device_mapping'][2]['mount_device'] = '/dev/vdd'
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info,
None, info)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[1].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_with_configdrive(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
# make configdrive.required_by() return True
instance_ref['config_drive'] = True
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
# The last device is selected for this. on x86 is the last ide
# device (hdd). Since power only support scsi, the last device
# is sdz
expect = {"ppc": "sdz", "ppc64": "sdz"}
disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd")
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, disk)
def test_get_guest_config_with_virtio_scsi_bus(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
instance_ref = objects.Instance(**self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
[])
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[2].model, 'virtio-scsi')
def test_get_guest_config_with_virtio_scsi_bus_bdm(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
instance_ref = objects.Instance(**self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
bd_info = {
'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
])}
bd_info['block_device_mapping'][0]['connection_info'] = conn_info
bd_info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
bd_info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info, [], bd_info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'sdc')
self.assertEqual(cfg.devices[2].target_bus, 'scsi')
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'sdd')
self.assertEqual(cfg.devices[3].target_bus, 'scsi')
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
mock_save.assert_called_with()
def test_get_guest_config_with_vnc(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "vnc")
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=False, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_spice_and_tablet(self):
self.flags(vnc_enabled=False)
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "spice")
def test_get_guest_config_with_spice_and_agent(self):
self.flags(vnc_enabled=False)
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, "qxl")
@mock.patch('nova.console.serial.acquire_port')
@mock.patch('nova.virt.hardware.get_number_of_serial_ports',
return_value=1)
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',)
def test_create_serial_console_devices_based_on_arch(self, mock_get_arch,
mock_get_port_number,
mock_acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {arch.X86_64: vconfig.LibvirtConfigGuestSerial,
arch.S390: vconfig.LibvirtConfigGuestConsole,
arch.S390X: vconfig.LibvirtConfigGuestConsole}
for guest_arch, device_type in expected.items():
mock_get_arch.return_value = guest_arch
guest = vconfig.LibvirtConfigGuest()
drvr._create_serial_console_devices(guest, instance=None,
flavor={}, image_meta={})
self.assertEqual(1, len(guest.devices))
console_device = guest.devices[0]
self.assertIsInstance(console_device, device_type)
self.assertEqual("tcp", console_device.type)
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console(self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.return_value = 11111
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(8, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual(11111, cfg.devices[2].listen_port)
def test_get_guest_config_serial_console_through_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(10, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
def test_get_guest_config_serial_console_invalid_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(
exception.ImageSerialPortNumberInvalid,
drvr._get_guest_config, instance_ref, [], {}, disk_info)
def test_get_guest_config_serial_console_image_and_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = {"properties": {"hw_serial_port_count": "3"}}
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info)
self.assertEqual(10, len(cfg.devices), cfg.devices)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
def test_get_guest_config_serial_console_invalid_img_meta(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_serial_port_count": "fail"}}
self.assertRaises(
exception.ImageSerialPortNumberInvalid,
drvr._get_guest_config, instance_ref, [], image_meta, disk_info)
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console_through_port_rng_exhausted(
self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.side_effect = exception.SocketPortRangeExhaustedException(
'127.0.0.1')
self.assertRaises(
exception.SocketPortRangeExhaustedException,
drvr._get_guest_config, instance_ref, [], {}, disk_info)
@mock.patch.object(host.Host, "get_domain")
def test_get_serial_ports_from_instance(self, mock_get_domain):
i = self._test_get_serial_ports_from_instance(None,
mock_get_domain)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(host.Host, "get_domain")
def test_get_serial_ports_from_instance_bind_only(self, mock_get_domain):
i = self._test_get_serial_ports_from_instance('bind',
mock_get_domain)
self.assertEqual([
('127.0.0.1', 101),
('127.0.0.2', 100)], list(i))
@mock.patch.object(host.Host, "get_domain")
def test_get_serial_ports_from_instance_connect_only(self,
mock_get_domain):
i = self._test_get_serial_ports_from_instance('connect',
mock_get_domain)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(host.Host, "get_domain")
def test_get_serial_ports_from_instance_on_s390(self, mock_get_domain):
i = self._test_get_serial_ports_from_instance(None,
mock_get_domain,
'console')
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
def _test_get_serial_ports_from_instance(self, mode, mock_get_domain,
dev_name='serial'):
xml = """
<domain type='kvm'>
<devices>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="100" mode="connect"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="101" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="100" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="101" mode="connect"/>
</%(dev_name)s>
</devices>
</domain>""" % {'dev_name': dev_name}
dom = mock.MagicMock()
dom.XMLDesc.return_value = xml
mock_get_domain.return_value = dom
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(**self.test_instance)
return drvr._get_serial_ports_from_instance(
instance, mode=mode)
def test_get_guest_config_with_type_xen(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 6)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, "vnc")
self.assertEqual(cfg.devices[4].type, "xen")
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',
return_value=arch.S390X)
def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch):
self.flags(vnc_enabled=False)
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
self._stub_host_capabilities_cpu_arch(arch.S390X)
instance_ref = objects.Instance(**self.test_instance)
cfg = self._get_guest_config_via_fake_api(instance_ref)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
log_file_device = cfg.devices[2]
self.assertIsInstance(log_file_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclplm", log_file_device.target_type)
self.assertEqual("file", log_file_device.type)
terminal_device = cfg.devices[3]
self.assertIsInstance(terminal_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclp", terminal_device.target_type)
self.assertEqual("pty", terminal_device.type)
self.assertEqual("s390-ccw-virtio", cfg.os_mach_type)
def _stub_host_capabilities_cpu_arch(self, cpu_arch):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = cpu_arch
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
def _get_guest_config_via_fake_api(self, instance):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
return drvr._get_guest_config(instance, [], {}, disk_info)
def test_get_guest_config_with_type_xen_pae_hvm(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['vm_mode'] = vm_mode.HVM
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureAPIC)
def test_get_guest_config_with_type_xen_pae_pvm(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(cfg.os_type, vm_mode.XEN)
self.assertEqual(1, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
def test_get_guest_config_with_vnc_and_spice(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[6].type, "vnc")
self.assertEqual(cfg.devices[7].type, "spice")
def test_invalid_watchdog_action(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_watchdog_action": "something"}}
self.assertRaises(exception.InvalidWatchdogAction,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_get_guest_config_with_watchdog_action_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_watchdog_action": "none"}}
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type,
agent_enabled=False):
self.flags(vnc_enabled=vnc_enabled)
self.flags(enabled=spice_enabled,
agent_enabled=agent_enabled, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
return drvr._get_guest_usb_tablet(os_type)
def test_get_guest_usb_tablet_wipe(self):
self.flags(use_usb_tablet=True, group='libvirt')
tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(True, False, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(False, True, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(False, False, vm_mode.HVM)
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(True, True, "foo")
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(
False, True, vm_mode.HVM, True)
self.assertIsNone(tablet)
def _test_get_guest_config_with_watchdog_action_flavor(self,
hw_watchdog_action="hw:watchdog_action"):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {hw_watchdog_action: 'none'}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def test_get_guest_config_with_watchdog_action_through_flavor(self):
self._test_get_guest_config_with_watchdog_action_flavor()
# TODO(pkholkin): the test accepting old property name 'hw_watchdog_action'
# should be removed in the next release
def test_get_guest_config_with_watchdog_action_through_flavor_no_scope(
self):
self._test_get_guest_config_with_watchdog_action_flavor(
hw_watchdog_action="hw_watchdog_action")
def test_get_guest_config_with_watchdog_overrides_flavor(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_watchdog_action': 'none'}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_watchdog_action": "pause"}}
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("pause", cfg.devices[7].action)
def test_unsupported_video_driver_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_video_model": "something"}}
self.assertRaises(exception.InvalidVideoMode,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_get_guest_config_with_video_driver_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_video_model": "vmvga"}}
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[6].type, "vmvga")
def test_get_guest_config_with_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_qemu_guest_agent": "yes"}}
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[7].type, "unix")
self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0")
def test_get_guest_config_with_video_driver_vram(self):
self.flags(vnc_enabled=False)
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}}
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, "qxl")
self.assertEqual(cfg.devices[6].vram, 64 * units.Mi / units.Ki)
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('nova.openstack.common.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_unmount_fs_if_error_during_lxc_create_domain(self,
mock_get_inst_path, mock_ensure_tree, mock_setup_container,
mock_get_info, mock_teardown):
"""If we hit an error during a `_create_domain` call to `libvirt+lxc`
we need to ensure the guest FS is unmounted from the host so that any
future `lvremove` calls will work.
"""
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.side_effect = exception.InstanceNotFound(
instance_id='foo')
drvr._conn.defineXML = mock.Mock()
drvr._conn.defineXML.side_effect = ValueError('somethingbad')
with contextlib.nested(
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(ValueError,
drvr._create_domain_and_network,
self.context,
'xml',
mock_instance, None, None)
mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
def test_video_driver_flavor_limit_not_set(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_video_driver_ram_above_flavor_limit(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
instance_ref = objects.Instance(**self.test_instance)
instance_type = instance_ref.get_flavor()
instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
image_meta = {"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_get_guest_config_without_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"hw_qemu_guest_agent": "no"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_qemu_guest_agent": "no"}}
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_rng_device(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = {"properties": {"hw_rng_model": "virtio"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertIsNone(cfg.devices[6].backend)
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
def test_get_guest_config_with_rng_not_allowed(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"hw_rng_model": "virtio"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_with_rng_limits(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True',
'hw_rng:rate_bytes': '1024',
'hw_rng:rate_period': '2'}
image_meta = {"properties": {"hw_rng_model": "virtio"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertIsNone(cfg.devices[6].backend)
self.assertEqual(cfg.devices[6].rate_bytes, 1024)
self.assertEqual(cfg.devices[6].rate_period, 2)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_backend(self, mock_path):
self.flags(virt_type='kvm',
use_usb_tablet=False,
rng_dev_path='/dev/hw_rng',
group='libvirt')
mock_path.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = {"properties": {"hw_rng_model": "virtio"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng')
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_dev_not_present(self, mock_path):
self.flags(virt_type='kvm',
use_usb_tablet=False,
rng_dev_path='/dev/hw_rng',
group='libvirt')
mock_path.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = {"properties": {"hw_rng_model": "virtio"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception.RngDeviceNotExist,
drvr._get_guest_config,
instance_ref,
[],
image_meta, disk_info)
def test_guest_cpu_shares_with_multi_vcpu(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 4
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(4096, cfg.cputune.shares)
def test_get_guest_config_with_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
'quota:cpu_period': '20000'}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(10000, cfg.cputune.shares)
self.assertEqual(20000, cfg.cputune.period)
def test_get_guest_config_with_bogus_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
'quota:cpu_period': '20000'}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(ValueError,
drvr._get_guest_config,
instance_ref, [], {}, disk_info)
def _test_get_guest_config_sysinfo_serial(self, expected_serial):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
cfg = drvr._get_guest_config_sysinfo(instance_ref)
self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
self.assertEqual(version.vendor_string(),
cfg.system_manufacturer)
self.assertEqual(version.product_string(),
cfg.system_product)
self.assertEqual(version.version_string_with_package(),
cfg.system_version)
self.assertEqual(expected_serial,
cfg.system_serial)
self.assertEqual(instance_ref['uuid'],
cfg.system_uuid)
def test_get_guest_config_sysinfo_serial_none(self):
self.flags(sysinfo_serial="none", group="libvirt")
self._test_get_guest_config_sysinfo_serial(None)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
self.flags(sysinfo_serial="hardware", group="libvirt")
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_os(self):
self.flags(sysinfo_serial="os", group="libvirt")
real_open = __builtin__.open
with contextlib.nested(
mock.patch.object(__builtin__, "open"),
) as (mock_open, ):
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
def fake_open(filename, *args, **kwargs):
if filename == "/etc/machine-id":
h = mock.MagicMock()
h.read.return_value = theuuid
h.__enter__.return_value = h
return h
return real_open(filename, *args, **kwargs)
mock_open.side_effect = fake_open
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_auto_hardware(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
with contextlib.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
) as (mock_exists, mock_uuid):
def fake_exists(filename):
if filename == "/etc/machine-id":
return False
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_auto_os(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
real_open = __builtin__.open
with contextlib.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(__builtin__, "open"),
) as (mock_exists, mock_open):
def fake_exists(filename):
if filename == "/etc/machine-id":
return True
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
def fake_open(filename, *args, **kwargs):
if filename == "/etc/machine-id":
h = mock.MagicMock()
h.read.return_value = theuuid
h.__enter__.return_value = h
return h
return real_open(filename, *args, **kwargs)
mock_open.side_effect = fake_open
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_invalid(self):
self.flags(sysinfo_serial="invalid", group="libvirt")
self.assertRaises(exception.NovaException,
libvirt_driver.LibvirtDriver,
fake.FakeVirtAPI(),
True)
def _create_fake_service_compute(self):
service_info = {
'id': 1729,
'host': 'fake',
'report_count': 0
}
service_ref = objects.Service(**service_info)
compute_info = {
'id': 1729,
'vcpus': 2,
'memory_mb': 1024,
'local_gb': 2048,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'free_ram_mb': 1024,
'free_disk_gb': 2048,
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'running_vms': 0,
'cpu_info': '',
'current_workload': 0,
'service_id': service_ref['id'],
'host': service_ref['host']
}
compute_ref = objects.ComputeNode(**compute_info)
return (service_ref, compute_ref)
def test_get_guest_config_with_pci_passthrough_kvm(self):
self.flags(virt_type='kvm', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = {}
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status='allocated',
address='0000:00:00.1',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [], {}, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'yes')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "1")
self.assertEqual(had_pci, 1)
def test_get_guest_config_with_pci_passthrough_xen(self):
self.flags(virt_type='xen', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = {}
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status='allocated',
address='0000:00:00.2',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [], {}, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'no')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "2")
self.assertEqual(had_pci, 1)
def test_get_guest_config_os_command_line_through_image_meta(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"os_command_line":
"fake_os_command_line"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
def test_get_guest_config_os_command_line_without_kernel_id(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"os_command_line":
"fake_os_command_line"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsNone(cfg.os_cmdline)
def test_get_guest_config_os_command_empty(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"os_command_line": ""}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
# the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by
# default, so testing an empty string and None value in the
# os_command_line image property must pass
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertNotEqual(cfg.os_cmdline, "")
image_meta = {"properties": {"os_command_line": None}}
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsNotNone(cfg.os_cmdline)
def test_get_guest_config_armv7(self):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertEqual(cfg.os_mach_type, "vexpress-a15")
def test_get_guest_config_aarch64(self):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.AARCH64
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertEqual(cfg.os_mach_type, "virt")
def test_get_guest_config_machine_type_s390(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigGuestCPU()
host_cpu_archs = (arch.S390, arch.S390X)
for host_cpu_arch in host_cpu_archs:
caps.host.cpu.arch = host_cpu_arch
os_mach_type = drvr._get_machine_type(None, caps)
self.assertEqual('s390-ccw-virtio', os_mach_type)
def test_get_guest_config_machine_type_through_image_meta(self):
self.flags(virt_type="kvm",
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_machine_type":
"fake_machine_type"}}
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def test_get_guest_config_machine_type_from_config(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(hw_machine_type=['x86_64=fake_machine_type'],
group='libvirt')
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# Make sure the host arch is mocked as x86_64
self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
baselineCPU=fake_baselineCPU,
getVersion=lambda: 1005001)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def _test_get_guest_config_ppc64(self, device_index):
"""Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
"""
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {}
expected = (arch.PPC64, arch.PPC)
for guestarch in expected:
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.devices[device_index],
vconfig.LibvirtConfigGuestVideo)
self.assertEqual(cfg.devices[device_index].type, 'vga')
def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
self.flags(vnc_enabled=True)
self._test_get_guest_config_ppc64(6)
def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
self.flags(enabled=True,
agent_enabled=True,
group='spice')
self._test_get_guest_config_ppc64(8)
def _test_get_guest_config_bootmenu(self, image_meta, extra_specs):
self.flags(virt_type='kvm', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = extra_specs
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref, image_meta)
conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertTrue(conf.os_bootmenu)
def test_get_guest_config_bootmenu_via_image_meta(self):
self._test_get_guest_config_bootmenu(
{"properties": {"hw_boot_menu": "True"}}, {})
def test_get_guest_config_bootmenu_via_extra_specs(self):
self._test_get_guest_config_bootmenu({}, {'hw:boot_menu': 'True'})
def test_get_guest_cpu_config_none(self):
self.flags(cpu_mode="none", group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertIsNone(conf.cpu.mode)
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_kvm(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_uml(self):
self.flags(virt_type="uml",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_default_lxc(self):
self.flags(virt_type="lxc",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_host_passthrough(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
self.flags(cpu_mode="host-passthrough", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-passthrough")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_host_model(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
self.flags(cpu_mode="host-model", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_custom(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
self.flags(cpu_mode="custom",
cpu_model="Penryn",
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "Penryn")
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_topology(self):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 8
instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertEqual(conf.cpu.sockets, 4)
self.assertEqual(conf.cpu.cores, 2)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_memory_balloon_config_by_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_disable(self):
self.flags(mem_stats_period_seconds=0, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
def test_get_guest_memory_balloon_config_period_value(self):
self.flags(mem_stats_period_seconds=21, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(21, device.period)
def test_get_guest_memory_balloon_config_qemu(self):
self.flags(virt_type='qemu', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_xen(self):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('xen', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.HVM})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=True)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.XEN})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=False,
xen_only=True)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=False, rescue=instance_data)
def test_xml_uuid(self):
self._check_xml_and_uuid({"disk_format": "raw"})
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
def test_xml_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, None)
def test_xml_user_specified_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, 'sd')
def test_xml_disk_driver(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_driver(instance_data)
def test_xml_disk_bus_virtio(self):
self._check_xml_and_disk_bus({"disk_format": "raw"},
None,
(("disk", "virtio", "vda"),))
def test_xml_disk_bus_ide(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {arch.PPC: ("cdrom", "scsi", "sda"),
arch.PPC64: ("cdrom", "scsi", "sda")}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
self._check_xml_and_disk_bus({"disk_format": "iso"},
None,
(expec_val,))
def test_xml_disk_bus_ide_and_virtio(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {arch.PPC: ("cdrom", "scsi", "sda"),
arch.PPC64: ("cdrom", "scsi", "sda")}
swap = {'device_name': '/dev/vdc',
'swap_size': 1}
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {
'swap': swap,
'ephemerals': ephemerals}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
self._check_xml_and_disk_bus({"disk_format": "iso"},
block_device_info,
(expec_val,
("disk", "virtio", "vdb"),
("disk", "virtio", "vdc")))
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instances(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
names = drvr.list_instances()
self.assertEqual(names[0], vm1.name())
self.assertEqual(names[1], vm2.name())
self.assertEqual(names[2], vm3.name())
self.assertEqual(names[3], vm4.name())
mock_list.assert_called_with(only_running=False)
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instance_uuids(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
uuids = drvr.list_instance_uuids()
self.assertEqual(len(uuids), 4)
self.assertEqual(uuids[0], vm1.UUIDString())
self.assertEqual(uuids[1], vm2.UUIDString())
self.assertEqual(uuids[2], vm3.UUIDString())
self.assertEqual(uuids[3], vm4.UUIDString())
mock_list.assert_called_with(only_running=False)
@mock.patch.object(host.Host, "list_instance_domains")
def test_get_all_block_devices(self, mock_list):
xml = [
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
</disk>
</devices>
</domain>
""",
]
mock_list.return_value = [
FakeVirtDomain(xml[0], id=3, name="instance00000001"),
FakeVirtDomain(xml[1], id=1, name="instance00000002"),
FakeVirtDomain(xml[2], id=5, name="instance00000003")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = drvr._get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
mock_list.assert_called_with()
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-5")
get_online_cpus.return_value = set([4, 5, 6])
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_out_of_range(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.return_value = set([4, 5])
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.side_effect = not_supported_exc
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="1")
get_online_cpus.side_effect = not_supported_exc
expected_vcpus = 1
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_cpu_count')
def test_get_host_vcpus_after_hotplug(self, get_cpu_count):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
get_cpu_count.return_value = 2
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
get_cpu_count.return_value = 3
expected_vcpus = 3
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_quiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(lookupByName=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
img_meta = {"properties": {"hw_qemu_guest_agent": "yes",
"os_require_quiesce": "yes"}}
self.assertIsNone(drvr.quiesce(self.context, instance, img_meta))
mock_fsfreeze.assert_called_once_with()
def test_quiesce_not_supported(self):
self.create_fake_libvirt_mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.InstanceQuiesceNotSupported,
drvr.quiesce, self.context, instance, None)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_unquiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005,
lookupByName=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
img_meta = {"properties": {"hw_qemu_guest_agent": "yes",
"os_require_quiesce": "yes"}}
self.assertIsNone(drvr.unquiesce(self.context, instance, img_meta))
mock_fsthaw.assert_called_once_with()
def test__create_snapshot_metadata(self):
base = {}
instance_data = {'kernel_id': 'kernel',
'project_id': 'prj_id',
'ramdisk_id': 'ram_id',
'os_type': None}
instance = objects.Instance(**instance_data)
img_fmt = 'raw'
snp_name = 'snapshot_name'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
expected = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
},
'disk_format': img_fmt,
'container_format': base.get('container_format', 'bare')
}
self.assertEqual(ret, expected)
# simulate an instance with os_type field defined
# disk format equals to ami
# container format not equals to bare
instance['os_type'] = 'linux'
base['disk_format'] = 'ami'
base['container_format'] = 'test_container'
expected['properties']['os_type'] = instance['os_type']
expected['disk_format'] = base['disk_format']
expected['container_format'] = base.get('container_format', 'bare')
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
self.assertEqual(ret, expected)
def test_get_volume_driver(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
driver = conn._get_volume_driver(connection_info)
result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver)
self.assertTrue(result)
def test_get_volume_driver_unknown(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'unknown',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
self.assertRaises(
exception.VolumeDriverNotFound,
conn._get_volume_driver,
connection_info
)
@mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.'
'connect_volume')
@mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.get_config')
def test_get_volume_config(self, get_config, connect_volume):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
mock_config = mock.MagicMock()
get_config.return_value = mock_config
config = drvr._get_volume_config(connection_info, disk_info)
get_config.assert_called_once_with(connection_info, disk_info)
self.assertEqual(mock_config, config)
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
drvr.attach_volume, None,
{"driver_volume_type": "badtype"},
instance,
"/dev/sda")
def test_attach_blockio_invalid_hypervisor(self):
self.flags(virt_type='fake_type', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidHypervisorType,
drvr.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
instance,
"/dev/sda")
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
def test_attach_blockio_invalid_version(self, mock_version):
mock_version.return_value = (0 * 1000 * 1000) + (9 * 1000) + 8
self.flags(virt_type='qemu', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.Invalid,
drvr.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
instance,
"/dev/sda")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_attach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain, mock_get_info, get_image):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = {}
get_image.return_value = image_meta
mock_dom = mock.MagicMock()
mock_get_domain.return_value = mock_dom
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
mock_get_info.return_value = disk_info
mock_conf = mock.MagicMock()
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with contextlib.nested(
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf),
mock.patch.object(drvr, '_set_cache_mode')
) as (mock_connect_volume, mock_get_volume_config,
mock_set_cache_mode):
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
drvr.attach_volume(self.context, connection_info, instance,
"/dev/vdb", disk_bus=bdm['disk_bus'],
device_type=bdm['device_type'])
mock_get_domain.assert_called_with(instance)
mock_get_info.assert_called_with(CONF.libvirt.virt_type,
image_meta, bdm)
mock_connect_volume.assert_called_with(
connection_info, disk_info)
mock_get_volume_config.assert_called_with(
connection_info, disk_info)
mock_set_cache_mode.assert_called_with(mock_conf)
mock_dom.attachDeviceFlags.assert_called_with(
mock_conf.to_xml(), flags=flags)
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_detach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_xml = """<domain>
<devices>
<disk type='file'>
<source file='/path/to/fake-volume'/>
<target dev='vdc' bus='virtio'/>
</disk>
</devices>
</domain>"""
mock_dom = mock.MagicMock()
mock_dom.XMLDesc.return_value = mock_xml
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with mock.patch.object(drvr, '_disconnect_volume') as \
mock_disconnect_volume:
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
mock_get_domain.return_value = mock_dom
drvr.detach_volume(connection_info, instance, '/dev/vdc')
mock_get_domain.assert_called_with(instance)
mock_dom.detachDeviceFlags.assert_called_with("""<disk type="file" device="disk">
<source file="/path/to/fake-volume"/>
<target bus="virtio" dev="vdc"/>
</disk>
""", flags=flags)
mock_disconnect_volume.assert_called_with(
connection_info, 'vdc')
def test_multi_nic(self):
network_info = _fake_network_info(self.stubs, 2)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEqual(len(interfaces), 2)
self.assertEqual(interfaces[0].get('type'), 'bridge')
def _behave_supports_direct_io(self, raise_open=False, raise_write=False,
exc=ValueError()):
open_behavior = os.open(os.path.join('.', '.directio.test'),
os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
if raise_open:
open_behavior.AndRaise(exc)
else:
open_behavior.AndReturn(3)
write_bahavior = os.write(3, mox.IgnoreArg())
if raise_write:
write_bahavior.AndRaise(exc)
else:
os.close(3)
os.unlink(3)
def test_supports_direct_io(self):
# O_DIRECT is not supported on all Python runtimes, so on platforms
# where it's not supported (e.g. Mac), we can still test the code-path
# by stubbing out the value.
if not hasattr(os, 'O_DIRECT'):
# `mock` seems to have trouble stubbing an attr that doesn't
# originally exist, so falling back to stubbing out the attribute
# directly.
os.O_DIRECT = 16384
self.addCleanup(delattr, os, 'O_DIRECT')
einval = OSError()
einval.errno = errno.EINVAL
self.mox.StubOutWithMock(os, 'open')
self.mox.StubOutWithMock(os, 'write')
self.mox.StubOutWithMock(os, 'close')
self.mox.StubOutWithMock(os, 'unlink')
_supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io
self._behave_supports_direct_io()
self._behave_supports_direct_io(raise_write=True)
self._behave_supports_direct_io(raise_open=True)
self._behave_supports_direct_io(raise_write=True, exc=einval)
self._behave_supports_direct_io(raise_open=True, exc=einval)
self.mox.ReplayAll()
self.assertTrue(_supports_direct_io('.'))
self.assertRaises(ValueError, _supports_direct_io, '.')
self.assertRaises(ValueError, _supports_direct_io, '.')
self.assertFalse(_supports_direct_io('.'))
self.assertFalse(_supports_direct_io('.'))
self.mox.VerifyAll()
def _check_xml_and_container(self, instance):
instance_ref = objects.Instance(**instance)
image_meta = {}
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), 'lxc:///')
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
check = [
(lambda t: t.find('.').get('type'), 'lxc'),
(lambda t: t.find('./os/type').text, 'exe'),
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
for i, (check, expected_result) in enumerate(check):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
target = tree.find('./devices/filesystem/source').get('dir')
self.assertTrue(len(target) > 0)
def _check_xml_and_disk_prefix(self, instance, prefix):
instance_ref = objects.Instance(**instance)
image_meta = {}
def _get_prefix(p, default):
if p:
return p + 'a'
return default
type_disk_map = {
'qemu': [
(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'xen': [
(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'xvda'))],
'kvm': [
(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'uml': [
(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'ubda'))]
}
for (virt_type, checks) in six.iteritems(type_disk_map):
self.flags(virt_type=virt_type, group='libvirt')
if prefix:
self.flags(disk_prefix=prefix, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
def _check_xml_and_disk_driver(self, image_meta):
os_open = os.open
directio_supported = True
def os_open_stub(path, flags, *args, **kwargs):
if flags & os.O_DIRECT:
if not directio_supported:
raise OSError(errno.EINVAL,
'%s: %s' % (os.strerror(errno.EINVAL), path))
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
self.stubs.Set(os, 'open', os_open_stub)
@staticmethod
def connection_supports_direct_io_stub(dirpath):
return directio_supported
self.stubs.Set(libvirt_driver.LibvirtDriver,
'_supports_direct_io', connection_supports_direct_io_stub)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "none")
directio_supported = False
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
instance_ref = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
got_disk_targets = tree.findall('./devices/disk/target')
for i in range(len(wantConfig)):
want_device_type = wantConfig[i][0]
want_device_bus = wantConfig[i][1]
want_device_dev = wantConfig[i][2]
got_device_type = got_disks[i].get('device')
got_device_bus = got_disk_targets[i].get('bus')
got_device_dev = got_disk_targets[i].get('dev')
self.assertEqual(got_device_type, want_device_type)
self.assertEqual(got_device_bus, want_device_bus)
self.assertEqual(got_device_dev, want_device_dev)
def _check_xml_and_uuid(self, image_meta):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware",)
def _check_xml_and_uri(self, instance, mock_serial,
expect_ramdisk=False, expect_kernel=False,
rescue=None, expect_xen_hvm=False, xen_only=False):
mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686"
instance_ref = objects.Instance(**instance)
image_meta = {}
xen_vm_mode = vm_mode.XEN
if expect_xen_hvm:
xen_vm_mode = vm_mode.HVM
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text,
vm_mode.UML)]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text,
xen_vm_mode)])}
if expect_xen_hvm or xen_only:
hypervisors_to_check = ['xen']
else:
hypervisors_to_check = ['qemu', 'kvm', 'xen']
for hypervisor_type in hypervisors_to_check:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
suffix = '.rescue'
else:
suffix = ''
if expect_kernel:
check = (lambda t: self.relpath(t.find('./os/kernel').text).
split('/')[1], 'kernel' + suffix)
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
if expect_kernel:
check = (lambda t: "no_timer_check" in t.find('./os/cmdline').
text, hypervisor_type == "qemu")
check_list.append(check)
# Hypervisors that only support vm_mode.HVM and Xen
# should not produce configuration that results in kernel
# arguments
if not expect_kernel and (hypervisor_type in
['qemu', 'kvm', 'xen']):
check = (lambda t: t.find('./os/root'), None)
check_list.append(check)
check = (lambda t: t.find('./os/cmdline'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: self.relpath(t.find('./os/initrd').text).
split('/')[1], 'ramdisk' + suffix)
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
xpath = "./sysinfo/system/entry"
check = (lambda t: t.findall(xpath)[0].get("name"),
"manufacturer")
check_list.append(check)
check = (lambda t: t.findall(xpath)[0].text,
version.vendor_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].get("name"),
"product")
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].text,
version.product_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[2].get("name"),
"version")
check_list.append(check)
# NOTE(sirp): empty strings don't roundtrip in lxml (they are
# converted to None), so we need an `or ''` to correct for that
check = (lambda t: t.findall(xpath)[2].text or '',
version.version_string_with_package())
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].get("name"),
"serial")
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].text,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].get("name"),
"uuid")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].text,
instance['uuid'])
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.findall('./devices/serial')[0].get(
'type'), 'file')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial')[1].get(
'type'), 'pty')
check_list.append(check)
check = (lambda t: self.relpath(t.findall(
'./devices/serial/source')[0].get('path')).
split('/')[1], 'console.log')
check_list.append(check)
else:
check = (lambda t: t.find('./devices/console').get(
'type'), 'pty')
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: self.relpath(t.findall('./devices/disk/source')[0].
get('file')).split('/')[1], 'disk.rescue'),
(lambda t: self.relpath(t.findall('./devices/disk/source')[1].
get('file')).split('/')[1], 'disk')]
else:
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[0].get('file')).split('/')[1],
'disk')]
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[1].get('file')).split('/')[1],
'disk.local')]
for virt_type in hypervisors_to_check:
expected_uri = type_uri_map[virt_type][0]
checks = type_uri_map[virt_type][1]
self.flags(virt_type=virt_type, group='libvirt')
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), expected_uri)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
rescue=rescue)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta,
rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
filterref = './devices/interface/filterref'
vif = network_info[0]
nic_id = vif['address'].replace(':', '')
fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), drvr)
instance_filter_name = fw._instance_filter_name(instance_ref,
nic_id)
self.assertEqual(tree.find(filterref).get('filter'),
instance_filter_name)
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to CONF.connection_uri
# and checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(connection_uri=testuri, group='libvirt')
for (virt_type, (expected_uri, checks)) in six.iteritems(type_uri_map):
self.flags(virt_type=virt_type, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), testuri)
def test_ensure_filtering_rules_for_instance_timeout(self):
# ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
def fake_sleep(t):
fake_timer.sleep(t)
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock()
instance_ref = objects.Instance(**self.test_instance)
# Start test
self.mox.ReplayAll()
try:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'instance_filter_exists',
fake_none)
self.stubs.Set(greenthread,
'sleep',
fake_sleep)
drvr.ensure_filtering_rules_for_instance(instance_ref,
network_info)
except exception.NovaException as e:
msg = ('The firewall filter for %s does not exist' %
instance_ref['name'])
c1 = (0 <= six.text_type(e).find(msg))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, True)
self.assertThat({"filename": "file",
'image_type': 'default',
'disk_available_mb': 409600,
"disk_over_commit": False,
"block_migration": True},
matchers.DictMatches(return_value))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, False)
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu')
def test_check_can_live_migrate_guest_cpu_none_model(
self, mock_cpu, mock_test_file):
# Tests that when instance.vcpu_model.model is None, the host cpu
# model is used for live migration.
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
instance_ref.vcpu_model.model = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
result = drvr.check_can_live_migrate_destination(
self.context, instance_ref, compute_info, compute_info)
mock_cpu.assert_called_once_with(None, 'asdf')
expected_result = {"filename": 'fake',
"image_type": CONF.libvirt.images_type,
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None}
self.assertDictEqual(expected_result, result)
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': jsonutils.dumps({
"vendor": "AMD",
"arch": arch.I686,
"features": ["sse3"],
"model": "Opteron_G3",
"topology": {"cores": 2, "threads": 1, "sockets": 4}
})}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo')
self.assertRaises(exception.InvalidCPUInfo,
drvr.check_can_live_migrate_destination,
self.context, instance_ref,
compute_info, compute_info, False)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
mock_compare.return_value = 5
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info))
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
mock_compare):
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virCompareCPU',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_compare.side_effect = not_supported_exc
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info))
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
mock_compare):
mock_compare.return_value = 6
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None)
self.assertIsNone(ret)
def test_compare_cpu_virt_type_xen(self):
self.flags(virt_type='xen', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, None)
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
mock_compare.return_value = 0
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidCPUInfo,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info))
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
mock_compare.side_effect = fakelibvirt.libvirtError('cpu')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationPreCheckError,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info))
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
objects.Instance(**self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_cleanup_shared_storage_test_file')
drvr._cleanup_shared_storage_test_file("file")
self.mox.ReplayAll()
drvr.check_can_live_migrate_destination_cleanup(self.context,
dest_check_data)
def _mock_can_live_migrate_source(self, block_migration=False,
is_shared_block_storage=False,
is_shared_instance_path=False,
disk_available_mb=1024,
block_device_info=None):
instance = objects.Instance(**self.test_instance)
dest_check_data = {'filename': 'file',
'image_type': 'default',
'block_migration': block_migration,
'disk_over_commit': False,
'disk_available_mb': disk_available_mb}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_is_shared_block_storage')
drvr._is_shared_block_storage(instance, dest_check_data,
block_device_info).AndReturn(is_shared_block_storage)
self.mox.StubOutWithMock(drvr, '_check_shared_storage_test_file')
drvr._check_shared_storage_test_file('file').AndReturn(
is_shared_instance_path)
return (instance, dest_check_data, drvr)
def test_check_can_live_migrate_source_block_migration(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True)
self.mox.StubOutWithMock(drvr, "_assert_dest_node_has_enough_disk")
drvr._assert_dest_node_has_enough_disk(
self.context, instance, dest_check_data['disk_available_mb'],
False, None)
self.mox.ReplayAll()
ret = drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
self.assertIsInstance(ret, dict)
self.assertIn('is_shared_block_storage', ret)
self.assertIn('is_shared_instance_path', ret)
self.assertEqual(ret['is_shared_instance_path'],
ret['is_shared_storage'])
def test_check_can_live_migrate_source_shared_block_storage(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_shared_block_storage=True)
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_shared_instance_path(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_shared_instance_path=True)
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_non_shared_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source()
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source, self.context,
instance, dest_check_data)
def test_check_can_live_migrate_source_shared_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
is_shared_block_storage=True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_shared_path_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
is_shared_instance_path=True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data, None)
def test_check_can_live_migrate_non_shared_non_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source()
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
disk_available_mb=0)
self.mox.StubOutWithMock(drvr, "get_instance_disk_info")
drvr.get_instance_disk_info(instance,
block_device_info=None).AndReturn(
'[{"virt_disk_size":2}]')
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def _is_shared_block_storage_test_create_mocks(self, disks):
# Test data
instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>{}</devices></domain>")
disks_xml = ''
for dsk in disks:
if dsk['type'] is not 'network':
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source {source}='{source_path}'/>"
"<target dev='{target_dev}' bus='virtio'/>"
"</disk>".format(**dsk)])
else:
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source protocol='{source_proto}'"
"name='{source_image}' >"
"<host name='hostname' port='7000'/>"
"<config file='/path/to/file'/>"
"</source>"
"<target dev='{target_dev}'"
"bus='ide'/>".format(**dsk)])
# Preparing mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.XMLDesc = mock.Mock()
mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml))
mock_lookup = mock.Mock()
def mock_lookup_side_effect(name):
return mock_virDomain
mock_lookup.side_effect = mock_lookup_side_effect
mock_getsize = mock.Mock()
mock_getsize.return_value = "10737418240"
return (mock_getsize, mock_lookup)
def test_is_shared_block_storage_rbd(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_instance_disk_info = mock.Mock()
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr._is_shared_block_storage(instance,
{'image_type': 'rbd'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_lvm(self):
self.flags(images_type='lvm', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'lvm'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_qcow2(self):
self.flags(images_type='qcow2', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'qcow2'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_source(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'is_shared_instance_path': False},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_dest(self):
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'rbd',
'is_shared_instance_path': False},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_volume_backed(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_getsize, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
with mock.patch.object(host.Host, 'get_domain', mock_lookup):
self.assertTrue(drvr._is_shared_block_storage(instance,
{'is_volume_backed': True,
'is_shared_instance_path': False},
block_device_info = bdi))
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_volume_backed_with_disk(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'},
{'type': 'file',
'driver': 'raw',
'source': 'file',
'source_path': '/instance/disk.local',
'target_dev': 'vdb'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_getsize, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
with contextlib.nested(
mock.patch.object(os.path, 'getsize', mock_getsize),
mock.patch.object(host.Host, 'get_domain', mock_lookup)):
self.assertFalse(drvr._is_shared_block_storage(
instance,
{'is_volume_backed': True,
'is_shared_instance_path': False},
block_device_info = bdi))
mock_getsize.assert_called_once_with('/instance/disk.local')
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_nfs(self):
bdi = {'block_device_mapping': []}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_backend = mock.MagicMock()
mock_image_backend.backend.return_value = mock_backend
mock_backend.is_file_in_instance_path.return_value = True
mock_get_instance_disk_info = mock.Mock()
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
self.assertTrue(drvr._is_shared_block_storage('instance',
{'is_shared_instance_path': True},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_live_migration_update_graphics_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
xml_tmpl = ("<domain type='kvm'>"
"<devices>"
"<graphics type='vnc' listen='{vnc}'>"
"<listen address='{vnc}'/>"
"</graphics>"
"<graphics type='spice' listen='{spice}'>"
"<listen address='{spice}'/>"
"</graphics>"
"</devices>"
"</domain>")
initial_xml = xml_tmpl.format(vnc='1.2.3.4',
spice='5.6.7.8')
target_xml = xml_tmpl.format(vnc='10.0.0.1',
spice='10.0.0.2')
target_xml = etree.tostring(etree.fromstring(target_xml))
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
initial_xml)
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
None,
target_xml,
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_update_volume_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
# start test
migrate_data = {'pre_live_migration_result':
{'volume': {u'58a84f6d-3f0c-4e19-a0af-eb657b790657':
{'connection_info': {u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'disk_info': {'bus': u'virtio', 'type': u'disk', 'dev': u'vdb'}}}},
'graphics_listen_addrs': {}}
pre_live_migrate_data = ((migrate_data or {}).
get('pre_live_migration_result', {}))
volume = pre_live_migrate_data.get('volume')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_mock = mock.MagicMock()
with mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') as \
mget_info,\
mock.patch.object(drvr._host, 'get_domain') as mget_domain,\
mock.patch.object(fakelibvirt.virDomain, 'migrateToURI2'),\
mock.patch.object(drvr, '_update_xml') as mupdate:
mget_info.side_effect = exception.InstanceNotFound(
instance_id='foo')
mget_domain.return_value = test_mock
test_mock.XMLDesc.return_value = target_xml
self.assertFalse(drvr._live_migration_operation(
self.context, instance_ref, 'dest', False,
migrate_data, test_mock))
mupdate.assert_called_once_with(target_xml, volume, None)
def test_update_volume_xml(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}}
volume_xml['volume'][serial]['connection_info'] = \
{u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio',
'type': u'disk',
'dev': u'vdb'}
connection_info = volume_xml['volume'][serial]['connection_info']
disk_info = volume_xml['volume'][serial]['disk_info']
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = disk_info['type']
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
conf.source_type = "block"
conf.source_path = connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
parser = etree.XMLParser(remove_blank_text=True)
xml_doc = etree.fromstring(initial_xml, parser)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
xml_doc = etree.fromstring(target_xml, parser)
self.assertEqual(etree.tostring(xml_doc), etree.tostring(config))
def test_update_volume_xml_no_serial(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial></serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}}
volume_xml['volume'][serial]['connection_info'] = \
{u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio',
'type': u'disk',
'dev': u'vdb'}
connection_info = volume_xml['volume'][serial]['connection_info']
disk_info = volume_xml['volume'][serial]['disk_info']
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = disk_info['type']
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
conf.source_type = "block"
conf.source_path = connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
self.assertEqual(target_xml, etree.tostring(config))
def test_update_volume_xml_no_connection_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'info1': {}, 'info2': {}}
conf = vconfig.LibvirtConfigGuestDisk()
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
self.assertEqual(target_xml, etree.tostring(config))
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_uses_migrateToURI_without_migratable_flag(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_fails_without_migratable_flag_or_0_addr(self):
self.flags(vnc_enabled=True, vncserver_listen='1.2.3.4')
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
if getattr(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None:
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError('ERR'))
else:
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE
).AndReturn(FakeVirtDomain().XMLDesc(flags=0))
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
None,
mox.IgnoreArg(),
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError('ERR'))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
self.assertEqual(power_state.RUNNING, instance_ref.power_state)
def test_live_migration_raises_unsupported_config_exception(self):
# Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED,
# migrateToURI is used instead.
# Preparing data
instance_ref = objects.Instance(**self.test_instance)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, 'migrateToURI2')
self.mox.StubOutWithMock(vdmock, 'migrateToURI')
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(flags=fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
FakeVirtDomain().XMLDesc(flags=0))
unsupported_config_error = fakelibvirt.libvirtError('ERR')
unsupported_config_error.err = (
fakelibvirt.VIR_ERR_CONFIG_UNSUPPORTED,)
# This is the first error we hit but since the error code is
# VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI.
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None,
mox.IgnoreArg(), mox.IgnoreArg(), None,
_bandwidth).AndRaise(unsupported_config_error)
# This is the second and final error that will actually kill the run,
# we use TestingException to make sure it's not the same libvirtError
# above.
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(), None,
_bandwidth).AndRaise(test.TestingException('oops'))
graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'}
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs': graphics_listen_addrs}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(
drvr, '_check_graphics_addresses_can_live_migrate')
drvr._check_graphics_addresses_can_live_migrate(graphics_listen_addrs)
self.mox.ReplayAll()
# start test
self.assertRaises(test.TestingException,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
# destroy method may raise InstanceTerminationFailure or
# InstancePowerOffFailure, here use their base class Invalid.
mock_destroy.side_effect = exception.Invalid(reason='just test')
fake_instance_path = os.path.join(cfg.CONF.instances_path,
'/fake_instance_uuid')
mock_get_instance_path.return_value = fake_instance_path
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = {'is_shared_instance_path': False}
self.assertRaises(exception.Invalid,
drvr.rollback_live_migration_at_destination,
"context", "instance", [], None, True, migrate_data)
mock_exist.assert_called_once_with(fake_instance_path)
mock_shutil.assert_called_once_with(fake_instance_path)
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = {'is_shared_instance_path': True}
drvr.rollback_live_migration_at_destination("context", "instance", [],
None, True, migrate_data)
mock_destroy.assert_called_once_with("context", "instance", [],
None, True, migrate_data)
self.assertFalse(mock_get_instance_path.called)
self.assertFalse(mock_exist.called)
self.assertFalse(mock_shutil.called)
@mock.patch.object(time, "sleep",
side_effect=lambda x: eventlet.sleep(0))
@mock.patch.object(host.DomainJobInfo, "for_domain")
@mock.patch.object(objects.Instance, "save")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
def _test_live_migration_monitoring(self,
job_info_records,
expect_success,
mock_running,
mock_save,
mock_job_info,
mock_sleep):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True)
finish_event = eventlet.event.Event()
def fake_job_info(hostself):
while True:
self.assertTrue(len(job_info_records) > 0)
rec = job_info_records.pop()
if type(rec) == str:
if rec == "thread-finish":
finish_event.send()
elif rec == "domain-stop":
dom.destroy()
else:
return rec
return rec
mock_job_info.side_effect = fake_job_info
dest = mock.sentinel.migrate_dest
migrate_data = mock.sentinel.migrate_data
fake_post_method = mock.MagicMock()
fake_recover_method = mock.MagicMock()
drvr._live_migration_monitor(self.context, instance,
dest,
fake_post_method,
fake_recover_method,
False,
migrate_data,
dom,
finish_event)
if expect_success:
self.assertFalse(fake_recover_method.called,
'Recover method called when success expected')
fake_post_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data)
else:
self.assertFalse(fake_post_method.called,
'Post method called when success not expected')
fake_recover_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data)
def test_live_migration_monitor_success(self):
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, True)
def test_live_migration_monitor_success_race(self):
# A normalish sequence but we're too slow to see the
# completed job state
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, True)
def test_live_migration_monitor_failed(self):
# A failed sequence where we see all the expected events
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
]
self._test_live_migration_monitoring(domain_info_records, False)
def test_live_migration_monitor_failed_race(self):
# A failed sequence where we are too slow to see the
# failed event
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, False)
def test_live_migration_monitor_cancelled(self):
# A cancelled sequence where we see all the events
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records, False)
@mock.patch.object(greenthread, "spawn")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor")
@mock.patch.object(host.Host, "get_domain")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
def test_live_migration_main(self, mock_running, mock_dom,
mock_monitor, mock_thread):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True)
migrate_data = {}
mock_dom.return_value = dom
def fake_post():
pass
def fake_recover():
pass
drvr._live_migration(self.context, instance, "fakehost",
fake_post, fake_recover, False,
migrate_data)
class AnyEventletEvent(object):
def __eq__(self, other):
return type(other) == eventlet.event.Event
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", False,
migrate_data, dom)
mock_monitor.assert_called_once_with(
self.context, instance, "fakehost",
fake_post, fake_recover, False,
migrate_data, dom, AnyEventletEvent())
def _do_test_create_images_and_backing(self, disk_type):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk')
self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image')
disk_info = {'path': 'foo', 'type': disk_type,
'disk_size': 1 * 1024 ** 3,
'virt_disk_size': 20 * 1024 ** 3,
'backing_file': None}
libvirt_driver.libvirt_utils.create_image(
disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size'])
drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance,
fallback_from_host=None)
self.mox.ReplayAll()
self.stubs.Set(os.path, 'exists', lambda *args: False)
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", [disk_info])
def test_create_images_and_backing_qcow2(self):
self._do_test_create_images_and_backing('qcow2')
def test_create_images_and_backing_raw(self):
self._do_test_create_images_and_backing('raw')
def test_create_images_and_backing_images_not_exist_no_fallback(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id="fake_id")):
self.assertRaises(exception.ImageNotFound,
conn._create_images_and_backing,
self.context, instance,
"/fake/instance/dir", disk_info)
def test_create_images_and_backing_images_not_exist_fallback(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'),
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id="fake_id")),
) as (copy_image_mock, fetch_image_mock):
conn._create_images_and_backing(self.context, instance,
"/fake/instance/dir", disk_info,
fallback_from_host="fake_host")
backfile_path = os.path.join(base_dir, 'fake_image_backing_file')
kernel_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'kernel')
ramdisk_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'ramdisk')
copy_image_mock.assert_has_calls([
mock.call(dest=backfile_path, src=backfile_path,
host='fake_host', receive=True),
mock.call(dest=kernel_path, src=kernel_path,
host='fake_host', receive=True),
mock.call(dest=ramdisk_path, src=ramdisk_path,
host='fake_host', receive=True)
])
fetch_image_mock.assert_has_calls([
mock.call(context=self.context,
target=backfile_path,
image_id=self.test_instance['image_ref'],
user_id=self.test_instance['user_id'],
project_id=self.test_instance['project_id'],
max_size=25165824),
mock.call(self.context, kernel_path,
self.test_instance['kernel_id'],
self.test_instance['user_id'],
self.test_instance['project_id']),
mock.call(self.context, ramdisk_path,
self.test_instance['ramdisk_id'],
self.test_instance['user_id'],
self.test_instance['project_id']),
])
def test_create_images_and_backing_ephemeral_gets_created(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824},
{u'backing_file': u'ephemeral_1_default',
u'disk_size': 393216,
u'over_committed_disk_size': 1073348608,
u'path': u'disk_eph_path',
u'type': u'qcow2',
u'virt_disk_size': 1073741824}]
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(drvr, '_fetch_instance_kernel_ramdisk'),
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'),
mock.patch.object(drvr, '_create_ephemeral'),
mock.patch.object(imagebackend.Image, 'verify_base_size')
) as (fetch_kernel_ramdisk_mock, fetch_image_mock,
create_ephemeral_mock, verify_base_size_mock):
drvr._create_images_and_backing(self.context, instance,
"/fake/instance/dir",
disk_info)
self.assertEqual(len(create_ephemeral_mock.call_args_list), 1)
m_args, m_kwargs = create_ephemeral_mock.call_args_list[0]
self.assertEqual(
os.path.join(base_dir, 'ephemeral_1_default'),
m_kwargs['target'])
self.assertEqual(len(fetch_image_mock.call_args_list), 1)
m_args, m_kwargs = fetch_image_mock.call_args_list[0]
self.assertEqual(
os.path.join(base_dir, 'fake_image_backing_file'),
m_kwargs['target'])
verify_base_size_mock.assert_has_calls([
mock.call(os.path.join(base_dir, 'fake_image_backing_file'),
25165824),
mock.call(os.path.join(base_dir, 'ephemeral_1_default'),
1073741824)
])
def test_create_images_and_backing_disk_info_none(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk')
drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance,
fallback_from_host=None)
self.mox.ReplayAll()
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", None)
def test_pre_live_migration_works_correctly_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': {'serial': '12345', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}},
'mount_device': '/dev/sda'},
{'connection_info': {'serial': '67890', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo(object):
def fixed_ips(self):
return ["test_ip_addr"]
def fake_none(*args, **kwargs):
return
self.stubs.Set(drvr, '_create_images_and_backing', fake_none)
instance = objects.Instance(**self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(drvr, "_connect_volume")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
drvr._connect_volume(v['connection_info'],
disk_info)
self.mox.StubOutWithMock(drvr, 'plug_vifs')
drvr.plug_vifs(mox.IsA(instance), nw_info)
self.mox.ReplayAll()
result = drvr.pre_live_migration(
c, instance, vol, nw_info, None,
migrate_data={"block_migration": False})
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'},
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
'serial': '12345'},
'disk_info': {'bus': 'scsi',
'dev': 'sda',
'type': 'disk'}},
'67890': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
'serial': '67890'},
'disk_info': {'bus': 'scsi',
'dev': 'sdb',
'type': 'disk'}}}}
self.assertEqual(result, target_ret)
def test_pre_live_migration_block_with_config_drive_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def fake_true(*args, **kwargs):
return True
self.stubs.Set(configdrive, 'required_by', fake_true)
instance = objects.Instance(**self.test_instance)
c = context.get_admin_context()
self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt,
drvr.pre_live_migration, c, instance, vol, None,
None, {'is_shared_instance_path': False,
'is_shared_block_storage': False})
@mock.patch('nova.virt.driver.block_device_info_get_mapping',
return_value=())
@mock.patch('nova.virt.configdrive.required_by',
return_value=True)
def test_pre_live_migration_block_with_config_drive_mocked_with_vfat(
self, mock_required_by, block_device_info_get_mapping):
self.flags(config_drive_format='vfat')
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
res_data = drvr.pre_live_migration(
self.context, instance, vol, [], None,
{'is_shared_instance_path': False,
'is_shared_block_storage': False})
block_device_info_get_mapping.assert_called_once_with(
{'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}
]}
)
self.assertEqual({'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
'volume': {}}, res_data)
def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
# Creating testdata, using temp dir.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
vol = {'block_device_mapping': [
{'connection_info': {'serial': '12345', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}},
'mount_device': '/dev/sda'},
{'connection_info': {'serial': '67890', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def fake_none(*args, **kwargs):
return
self.stubs.Set(drvr, '_create_images_and_backing', fake_none)
class FakeNetworkInfo(object):
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = objects.Instance(**self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(drvr, "_connect_volume")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
drvr._connect_volume(v['connection_info'],
disk_info)
self.mox.StubOutWithMock(drvr, 'plug_vifs')
drvr.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
migrate_data = {'is_shared_instance_path': False,
'is_volume_backed': True,
'block_migration': False,
'instance_relative_path': inst_ref['name']
}
ret = drvr.pre_live_migration(c, inst_ref, vol, nw_info, None,
migrate_data)
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
'serial': '12345'},
'disk_info': {'bus': 'scsi',
'dev': 'sda',
'type': 'disk'}},
'67890': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
'serial': '67890'},
'disk_info': {'bus': 'scsi',
'dev': 'sdb',
'type': 'disk'}}}}
self.assertEqual(ret, target_ret)
self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
inst_ref['name'])))
def test_pre_live_migration_plug_vifs_retry_fails(self):
self.flags(live_migration_retry_count=3)
instance = objects.Instance(**self.test_instance)
def fake_plug_vifs(instance, network_info):
raise processutils.ProcessExecutionError()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(eventlet.greenthread, 'sleep',
lambda x: eventlet.sleep(0))
disk_info_json = jsonutils.dumps({})
self.assertRaises(processutils.ProcessExecutionError,
drvr.pre_live_migration,
self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json)
def test_pre_live_migration_plug_vifs_retry_works(self):
self.flags(live_migration_retry_count=3)
called = {'count': 0}
instance = objects.Instance(**self.test_instance)
def fake_plug_vifs(instance, network_info):
called['count'] += 1
if called['count'] < CONF.live_migration_retry_count:
raise processutils.ProcessExecutionError()
else:
return
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(eventlet.greenthread, 'sleep',
lambda x: eventlet.sleep(0))
disk_info_json = jsonutils.dumps({})
drvr.pre_live_migration(self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json)
def test_pre_live_migration_image_not_created_with_shared_storage(self):
migrate_data_set = [{'is_shared_block_storage': False,
'block_migration': False},
{'is_shared_block_storage': True,
'block_migration': False},
{'is_shared_block_storage': False,
'block_migration': True}]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# creating mocks
with contextlib.nested(
mock.patch.object(drvr,
'_create_images_and_backing'),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
for migrate_data in migrate_data_set:
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
self.assertFalse(create_image_mock.called)
self.assertIsInstance(res, dict)
def test_pre_live_migration_with_not_shared_instance_path(self):
migrate_data = {'is_shared_block_storage': False,
'is_shared_instance_path': False}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
def check_instance_dir(context, instance,
instance_dir, disk_info,
fallback_from_host=False):
self.assertTrue(instance_dir)
# creating mocks
with contextlib.nested(
mock.patch.object(drvr,
'_create_images_and_backing',
side_effect=check_instance_dir),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
create_image_mock.assert_has_calls(
[mock.call(self.context, instance, mock.ANY, {},
fallback_from_host=instance.host)])
self.assertIsInstance(res, dict)
def test_pre_live_migration_block_migrate_fails(self):
bdms = [{
'connection_info': {
'serial': '12345',
u'data': {
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.t-lun-X'
}
},
'mount_device': '/dev/sda'}]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=bdms)):
disk_info_json = jsonutils.dumps({})
self.assertRaises(exception.MigrationError,
drvr.pre_live_migration,
self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json,
migrate_data={})
def test_get_instance_disk_info_works_correctly(self):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
def test_post_live_migration(self):
vol = {'block_device_mapping': [
{'connection_info': 'dummy1', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy2', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
inst_ref = {'id': 'foo'}
cntx = context.get_admin_context()
# Set up the mock expectations
with contextlib.nested(
mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=vol['block_device_mapping']),
mock.patch.object(drvr, '_disconnect_volume')
) as (block_device_info_get_mapping, _disconnect_volume):
drvr.post_live_migration(cntx, inst_ref, vol)
block_device_info_get_mapping.assert_has_calls([
mock.call(vol)])
_disconnect_volume.assert_has_calls([
mock.call(v['connection_info'],
v['mount_device'].rpartition("/")[2])
for v in vol['block_device_mapping']])
def test_get_instance_disk_info_excludes_volumes(self):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdc' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume2'/>"
"<target dev='vdd' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance,
block_device_info=info)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
def test_get_instance_disk_info_no_bdinfo_passed(self):
# NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method
# without access to Nova's block device information. We want to make
# sure that we guess volumes mostly correctly in that case as well
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='block'><driver name='qemu' type='raw'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(1, len(info))
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
def test_spawn_with_network_info(self):
# Preparing mocks
def fake_none(*args, **kwargs):
return
def fake_getLibVersion():
return 9011
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001,
baselineCPU=fake_baselineCPU)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance = objects.Instance(**instance_ref)
image_meta = {}
# Mock out the get_info method of the LibvirtDriver so that the polling
# in the spawn method of the LibvirtDriver returns immediately
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
libvirt_driver.LibvirtDriver.get_info(instance
).AndReturn(hardware.InstanceInfo(state=power_state.RUNNING))
# Start test
self.mox.ReplayAll()
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(imagebackend.Image,
'cache',
fake_none)
drvr.spawn(self.context, instance, image_meta, [], 'herp',
network_info=network_info)
path = os.path.join(CONF.instances_path, instance['name'])
if os.path.isdir(path):
shutil.rmtree(path)
path = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if os.path.isdir(path):
shutil.rmtree(os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name))
def test_spawn_without_image_meta(self):
self.create_image_called = False
def fake_none(*args, **kwargs):
return
def fake_create_image(*args, **kwargs):
self.create_image_called = True
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
image_meta = {}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_image', fake_create_image)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
drvr.spawn(self.context, instance, image_meta, [], None)
self.assertTrue(self.create_image_called)
drvr.spawn(self.context,
instance,
{'id': instance['image_ref']},
[],
None)
self.assertTrue(self.create_image_called)
def test_spawn_from_volume_calls_cache(self):
self.cache_called_for_disk = False
def fake_none(*args, **kwargs):
return
def fake_cache(*args, **kwargs):
if kwargs.get('image_id') == 'my_fake_image':
self.cache_called_for_disk = True
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda',
'boot_index': 0}
]
}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
image_meta = {}
drvr.spawn(self.context, instance, image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
# Booted from volume but with placeholder image
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
image_meta = {}
drvr.spawn(self.context, instance, image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
# Booted from an image
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance, image_meta, [], None)
self.assertTrue(self.cache_called_for_disk)
def test_start_lxc_from_volume(self):
self.flags(virt_type="lxc",
group='libvirt')
def check_setup_container(image, container_dir=None):
self.assertEqual(image.path, '/dev/path/to/dev')
self.assertEqual(image.format, imgmodel.FORMAT_QCOW2)
return '/dev/nbd1'
bdm = {
'guest_format': None,
'boot_index': 0,
'mount_device': '/dev/sda',
'connection_info': {
'driver_volume_type': 'iscsi',
'serial': 'afc1',
'data': {
'access_mode': 'rw',
'device_path': '/dev/path/to/dev',
'target_discovered': False,
'encrypted': False,
'qos_specs': None,
'target_iqn': 'iqn: volume-afc1',
'target_portal': 'ip: 3260',
'volume_id': 'afc1',
'target_lun': 1,
'auth_password': 'uj',
'auth_username': '47',
'auth_method': 'CHAP'
}
},
'disk_bus': 'scsi',
'device_type': 'disk',
'delete_on_termination': False
}
def _get(key, opt=None):
return bdm.get(key, opt)
def getitem(key):
return bdm[key]
def setitem(key, val):
bdm[key] = val
bdm_mock = mock.MagicMock()
bdm_mock.__getitem__.side_effect = getitem
bdm_mock.__setitem__.side_effect = setitem
bdm_mock.get = _get
disk_mock = mock.MagicMock()
disk_mock.source_path = '/dev/path/to/dev'
block_device_info = {'block_device_mapping': [bdm_mock],
'root_device_name': '/dev/sda'}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/sda'
instance_ref['ephemeral_gb'] = 0
instance_ref['uuid'] = uuidutils.generate_uuid()
instance_ref['system_metadata']['image_disk_format'] = 'qcow2'
inst_obj = objects.Instance(**instance_ref)
image_meta = {}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=disk_mock),
mock.patch.object(drvr, 'get_info',
return_value=hardware.InstanceInfo(
state=power_state.RUNNING)),
mock.patch('nova.virt.disk.api.setup_container',
side_effect=check_setup_container),
mock.patch('nova.virt.disk.api.teardown_container'),
mock.patch.object(objects.Instance, 'save')):
drvr.spawn(self.context, inst_obj, image_meta, [], None,
network_info=[],
block_device_info=block_device_info)
self.assertEqual('/dev/nbd1',
inst_obj.system_metadata.get(
'rootfs_device_name'))
def test_spawn_with_pci_devices(self):
def fake_none(*args, **kwargs):
return None
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
class FakeLibvirtPciDevice(object):
def dettach(self):
return None
def reset(self):
return None
def fake_node_device_lookup_by_name(address):
pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
% dict(hex='[\da-f]', oct='[0-8]'))
pattern = re.compile(pattern)
if pattern.match(address) is None:
raise fakelibvirt.libvirtError()
return FakeLibvirtPciDevice()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_image', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
drvr._conn.nodeDeviceLookupByName = \
fake_node_device_lookup_by_name
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance = objects.Instance(**instance_ref)
instance['pci_devices'] = objects.PciDeviceList(
objects=[objects.PciDevice(address='0000:00:00.0')])
image_meta = {}
drvr.spawn(self.context, instance, image_meta, [], None)
def test_chown_disk_config_for_instance(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(fake_libvirt_utils, 'chown')
fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid')
os.path.exists('/tmp/uuid/disk.config').AndReturn(True)
fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid())
self.mox.ReplayAll()
drvr._chown_disk_config_for_instance(instance)
def _test_create_image_plain(self, os_type='', filename='', mkfs=False):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name, is_block_dev=False):
self.path = os.path.join(instance['name'], name)
self.is_block_dev = is_block_dev
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
instance['os_type'] = os_type
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
if mkfs:
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'})
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
drvr._create_image(context, instance, disk_info['mapping'])
drvr._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * units.Gi},
{'filename': filename,
'size': 20 * units.Gi},
]
self.assertEqual(gotFiles, wantFiles)
def test_create_image_plain_os_type_blank(self):
self._test_create_image_plain(os_type='',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_none(self):
self._test_create_image_plain(os_type=None,
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_no_fs(self):
self._test_create_image_plain(os_type='test',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_with_fs(self):
ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str(
'mkfs.ext4 --label %(fs_label)s %(target)s')[:7])
self._test_create_image_plain(os_type='test',
filename=ephemeral_file_name,
mkfs=True)
def test_create_image_with_swap(self):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name, is_block_dev=False):
self.path = os.path.join(instance['name'], name)
self.is_block_dev = is_block_dev
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
# Turn on some swap to exercise that codepath in _create_image
instance.flavor.swap = 500
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
drvr._create_image(context, instance, disk_info['mapping'])
drvr._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * units.Gi},
{'filename': self._EPHEMERAL_20_DEFAULT,
'size': 20 * units.Gi},
{'filename': 'swap_500',
'size': 500 * units.Mi},
]
self.assertEqual(gotFiles, wantFiles)
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache',
side_effect=exception.ImageNotFound(image_id='fake-id'))
def test_create_image_not_exist_no_fallback(self, mock_cache):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = {'id': instance.image_ref}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
self.assertRaises(exception.ImageNotFound,
drvr._create_image,
self.context, instance, disk_info['mapping'])
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
def test_create_image_not_exist_fallback(self, mock_cache):
def side_effect(fetch_func, filename, size=None, *args, **kwargs):
def second_call(fetch_func, filename, size=None, *args, **kwargs):
# call copy_from_host ourselves because we mocked image.cache()
fetch_func('fake-target', 'fake-max-size')
# further calls have no side effect
mock_cache.side_effect = None
mock_cache.side_effect = second_call
# raise an error only the first call
raise exception.ImageNotFound(image_id='fake-id')
mock_cache.side_effect = side_effect
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = {'id': instance.image_ref}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
with mock.patch.object(libvirt_driver.libvirt_utils,
'copy_image') as mock_copy:
drvr._create_image(self.context, instance, disk_info['mapping'],
fallback_from_host='fake-source-host')
mock_copy.assert_called_once_with(src='fake-target',
dest='fake-target',
host='fake-source-host',
receive=True)
@mock.patch.object(utils, 'execute')
def test_create_ephemeral_specified_fs(self, mock_exec):
self.flags(default_ephemeral_format='ext3')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True, max_size=20,
specified_fs='ext4')
mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L',
'myVol', '/dev/something',
run_as_root=True)
def test_create_ephemeral_specified_fs_not_valid(self):
CONF.set_override('default_ephemeral_format', 'ext4')
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'guest_format': 'dummy',
'size': 1}]
block_device_info = {
'ephemerals': ephemerals}
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
disk_info['mapping'].pop('disk.local')
with contextlib.nested(
mock.patch.object(utils, 'execute'),
mock.patch.object(drvr, 'get_info'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(imagebackend.Image, 'verify_base_size')):
self.assertRaises(exception.InvalidBDMFormat, drvr._create_image,
context, instance, disk_info['mapping'],
block_device_info=block_device_info)
def test_create_ephemeral_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
'/dev/something', run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True, max_size=20)
def test_create_ephemeral_with_conf(self):
CONF.set_override('default_ephemeral_format', 'ext4')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
'/dev/something', run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_ephemeral_with_arbitrary(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'})
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs.ext4', '--label', 'myVol', '/dev/something',
run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_ephemeral_with_ext3(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'})
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something',
run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_swap_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkswap', '/dev/something', run_as_root=False)
self.mox.ReplayAll()
drvr._create_swap('/dev/something', 1, max_size=20)
def test_get_console_output_file(self):
fake_libvirt_utils.files['console.log'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
console_log = '%s/console.log' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
with mock.patch('os.path.exists', return_value=True):
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
def test_get_console_output_file_missing(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_log = os.path.join(tmpdir, instance['name'],
'non-existent.log')
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch('os.path.exists', return_value=False):
output = drvr.get_console_output(self.context, instance)
self.assertEqual('', output)
def test_get_console_output_pty(self):
fake_libvirt_utils.files['pty'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
pty_file = '%s/fake_pty' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='pty'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % pty_file
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
def _fake_flush(self, fake_pty):
return 'foo'
def _fake_append_to_file(self, data, fpath):
return 'pty'
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
def test_get_host_ip_addr(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = drvr.get_host_ip_addr()
self.assertEqual(ip, CONF.my_ip)
@mock.patch.object(libvirt_driver.LOG, 'warn')
@mock.patch('nova.compute.utils.get_machine_ips')
def test_get_host_ip_addr_failure(self, mock_ips, mock_log):
mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.get_host_ip_addr()
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
u'not found on any of the '
u'interfaces: %(ifaces)s',
{'ifaces': '8.8.8.8, 75.75.75.75',
'my_ip': mock.ANY})
def test_conn_event_handler(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with contextlib.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
# verify that the driver registers for the close callback
# and re-connects after receiving the callback
self.assertRaises(exception.HypervisorUnavailable,
drvr.init_host,
"wibble")
self.assertTrue(service_mock.disabled)
def test_command_with_broken_connection(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with contextlib.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
drvr.init_host("wibble")
self.assertRaises(exception.HypervisorUnavailable,
drvr.get_num_instances)
self.assertTrue(service_mock.disabled)
def test_service_resume_after_broken_connection(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = True
with contextlib.nested(
mock.patch.object(drvr._host, "_connect",
return_value=mock.MagicMock()),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
drvr.init_host("wibble")
drvr.get_num_instances()
self.assertTrue(not service_mock.disabled and
service_mock.disabled_reason is None)
@mock.patch.object(objects.Instance, 'save')
def test_immediate_delete(self, mock_save):
def fake_get_domain(instance):
raise exception.InstanceNotFound(instance_id=instance.name)
def fake_delete_instance_files(instance):
pass
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, {})
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True)
@mock.patch.object(objects.Instance, 'save', autospec=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume')
@mock.patch.object(driver, 'block_device_info_get_mapping')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping,
mock_disconnect_volume,
mock_delete_instance_files, mock_destroy,
mock_inst_save, mock_inst_obj_load_attr,
mock_get_by_uuid, volume_fail=False):
instance = objects.Instance(self.context, **self.test_instance)
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
mock_mapping.return_value = vol['block_device_mapping']
mock_delete_instance_files.return_value = True
mock_get_by_uuid.return_value = instance
if volume_fail:
mock_disconnect_volume.return_value = (
exception.VolumeNotFound('vol'))
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], vol)
def test_destroy_removes_disk(self):
self._test_destroy_removes_disk(volume_fail=False)
def test_destroy_removes_disk_volume_fails(self):
self._test_destroy_removes_disk(volume_fail=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy,
mock_unplug_vifs):
instance = fake_instance.fake_instance_obj(
None, name='instancename', id=1,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], None, False)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, 'get_domain')
def test_destroy_lxc_calls_teardown_container(self, mock_get_domain,
mock_teardown_container,
mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
fake_domain = FakeVirtDomain()
def destroy_side_effect(*args, **kwargs):
fake_domain._info[0] = power_state.SHUTDOWN
with mock.patch.object(fake_domain, 'destroy',
side_effect=destroy_side_effect) as mock_domain_destroy:
mock_get_domain.return_value = fake_domain
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_domain_destroy.assert_called_once_with()
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False,
None)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, 'get_domain')
def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
mock_get_domain, mock_teardown_container, mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
instance = objects.Instance(**self.test_instance)
inf_exception = exception.InstanceNotFound(instance_id=instance.name)
mock_get_domain.side_effect = inf_exception
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False,
None)
def test_reboot_different_ids(self):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_create_called = False
# Mock domain
mock_domain = self.mox.CreateMock(fakelibvirt.virDomain)
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.shutdown()
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
mock_domain.ID().AndReturn('some_other_fake_id')
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock_domain
def fake_create_domain(**kwargs):
self.reboot_create_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, '_create_domain', fake_create_domain)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_create_called)
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
mock_sleep, mock_loopingcall,
mock_get_instance_pci_devs):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_hard_reboot_called = False
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_values = [(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple,
(libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple]
mock_domain.info.side_effect = return_values
mock_domain.ID.return_value = 'some_fake_id'
mock_domain.shutdown.side_effect = mock.Mock()
def fake_hard_reboot(*args, **kwargs):
self.reboot_hard_reboot_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_hard_reboot.side_effect = fake_hard_reboot
mock_loopingcall.return_value = FakeLoopingCall()
mock_get_instance_pci_devs.return_value = []
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_hard_reboot_called)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_soft_reboot_libvirt_exception(self, mock_get_domain,
mock_hard_reboot):
# Tests that a hard reboot is performed when a soft reboot results
# in raising a libvirtError.
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
# setup mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = (
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_virDomain.ID.return_value = 'some_fake_id'
mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
context = None
instance = objects.Instance(**self.test_instance)
network_info = []
mock_get_domain.return_value = mock_virDomain
drvr.reboot(context, instance, network_info, 'SOFT')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def _test_resume_state_on_host_boot_with_state(self, state,
mock_get_domain,
mock_hard_reboot):
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = ([state, None, None, None, None])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = mock_virDomain
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drvr.resume_state_on_host_boot(self.context, instance, network_info,
block_device_info=None)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
self.assertEqual(mock_hard_reboot.called, state not in ignored_states)
def test_resume_state_on_host_boot_with_running_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_suspended_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
def test_resume_state_on_host_boot_with_paused_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
def test_resume_state_on_host_boot_with_nostate(self):
self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
def test_resume_state_on_host_boot_with_shutdown_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_crashed_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_resume_state_on_host_boot_with_instance_not_found_on_driver(
self, mock_get_domain, mock_hard_reboot):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.side_effect = exception.InstanceNotFound(
instance_id='fake')
drvr.resume_state_on_host_boot(self.context, instance, network_info=[],
block_device_info=None)
mock_hard_reboot.assert_called_once_with(self.context,
instance, [], None)
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
def test_hard_reboot(self, mock_destroy, mock_get_instance_disk_info,
mock_get_guest_xml, mock_create_images_and_backing,
mock_create_domain_and_network, mock_get_info):
self.context.auth_token = True # any non-None value will suffice
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
block_device_info = None
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN),
hardware.InstanceInfo(state=power_state.RUNNING)]
mock_get_info.side_effect = return_values
disk_info = [{"virt_disk_size": 2}]
mock_get_guest_xml.return_value = dummyxml
mock_get_instance_disk_info.return_value = disk_info
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
@mock.patch('nova.openstack.common.fileutils.ensure_tree')
@mock.patch('nova.openstack.common.loopingcall.FixedIntervalLoopingCall')
@mock.patch('nova.pci.manager.get_instance_pci_devs')
@mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
@mock.patch('nova.virt.libvirt.utils.write_to_file')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
def test_hard_reboot_does_not_call_glance_show(self,
mock_destroy, mock_get_disk_info, mock_get_guest_config,
mock_get_instance_path, mock_write_to_file,
mock_get_instance_disk_info, mock_create_images_and_backing,
mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
"""For a hard reboot, we shouldn't need an additional call to glance
to get the image metadata.
This is important for automatically spinning up instances on a
host-reboot, since we won't have a user request context that'll allow
the Glance request to go through. We have to rely on the cached image
metadata, instead.
https://bugs.launchpad.net/nova/+bug/1339386
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
network_info = mock.MagicMock()
block_device_info = mock.MagicMock()
mock_get_disk_info.return_value = {}
mock_get_guest_config.return_value = mock.MagicMock()
mock_get_instance_path.return_value = '/foo'
mock_looping_call.return_value = mock.MagicMock()
drvr._image_api = mock.MagicMock()
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
self.assertFalse(drvr._image_api.get.called)
mock_ensure_tree.assert_called_once_with('/foo')
@mock.patch.object(time, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain')
@mock.patch.object(host.Host, 'get_domain')
def _test_clean_shutdown(self, mock_get_domain, mock_create_domain,
mock_sleep, seconds_to_shutdown,
timeout, retry_interval,
shutdown_attempts, succeeds):
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
shutdown_count = []
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_infos = [(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple]
return_shutdowns = [shutdown_count.append("shutdown")]
retry_countdown = retry_interval
for x in range(min(seconds_to_shutdown, timeout)):
return_infos.append(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
if retry_countdown == 0:
return_shutdowns.append(shutdown_count.append("shutdown"))
retry_countdown = retry_interval
else:
retry_countdown -= 1
if seconds_to_shutdown < timeout:
return_infos.append(
(libvirt_driver.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
mock_domain.info.side_effect = return_infos
mock_domain.shutdown.side_effect = return_shutdowns
def fake_create_domain(**kwargs):
self.reboot_create_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_create_domain.side_effect = fake_create_domain
result = drvr._clean_shutdown(instance, timeout, retry_interval)
self.assertEqual(succeeds, result)
self.assertEqual(shutdown_attempts, len(shutdown_count))
def test_clean_shutdown_first_time(self):
self._test_clean_shutdown(seconds_to_shutdown=2,
timeout=5,
retry_interval=3,
shutdown_attempts=1,
succeeds=True)
def test_clean_shutdown_with_retry(self):
self._test_clean_shutdown(seconds_to_shutdown=4,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=True)
def test_clean_shutdown_failure(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=False)
def test_clean_shutdown_no_wait(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=0,
retry_interval=3,
shutdown_attempts=1,
succeeds=False)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_sriov_ports(self,
mock_get_image_metadata,
mock_ID,
mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_sriov_ports(self.context, instance, guest, network_info)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_sriov_ports_with_info_cache(self,
mock_get_image_metadata,
mock_ID,
mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_sriov_ports(self.context, instance, guest, None)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_detach_sriov_ports(self,
mock_get_image_metadata,
mock_detachDeviceFlags,
mock_has_min_version):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
domain = FakeVirtDomain()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest(domain)
drvr._detach_sriov_ports(self.context, instance, guest)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_detachDeviceFlags.called)
def test_resume(self):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
block_device_info = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest('fake_dom')
with contextlib.nested(
mock.patch.object(drvr, '_get_existing_domain_xml',
return_value=dummyxml),
mock.patch.object(drvr, '_create_domain_and_network',
return_value=guest),
mock.patch.object(drvr, '_attach_pci_devices'),
mock.patch.object(pci_manager, 'get_instance_pci_devs',
return_value='fake_pci_devs'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(blockinfo, 'get_disk_info'),
) as (_get_existing_domain_xml, _create_domain_and_network,
_attach_pci_devices, get_instance_pci_devs, get_image_metadata,
get_disk_info):
get_image_metadata.return_value = {'bar': 234}
disk_info = {'foo': 123}
get_disk_info.return_value = disk_info
drvr.resume(self.context, instance, network_info,
block_device_info)
_get_existing_domain_xml.assert_has_calls([mock.call(instance,
network_info, block_device_info)])
_create_domain_and_network.assert_has_calls([mock.call(
self.context, dummyxml,
instance, network_info, disk_info,
block_device_info=block_device_info,
vifs_already_plugged=True)])
_attach_pci_devices.assert_has_calls([mock.call(guest,
'fake_pci_devs')])
@mock.patch.object(host.Host, 'get_domain')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines(self, mock_save, mock_delete_instance_files,
mock_get_info, mock_get_domain):
dom_mock = mock.MagicMock()
dom_mock.undefineFlags.return_value = 1
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = dom_mock
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN, id=-1)
mock_delete_instance_files.return_value = None
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(rbd_utils, 'RBDDriver')
def test_cleanup_rbd(self, mock_driver):
driver = mock_driver.return_value
driver.cleanup_volumes = mock.Mock()
fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._cleanup_rbd(fake_instance)
driver.cleanup_volumes.assert_called_once_with(fake_instance)
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_undefine_flags(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(fakelibvirt.libvirtError('Err'))
mock.ID().AndReturn(123)
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndReturn(True)
mock.managedSaveRemove(0)
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndRaise(AttributeError())
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(self, instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
def test_destroy_timed_out(self):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(fakelibvirt.libvirtError("timed out"))
self.mox.ReplayAll()
def fake_get_domain(self, instance):
return mock
def fake_get_error_code(self):
return fakelibvirt.VIR_ERR_OPERATION_TIMEOUT
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.stubs.Set(fakelibvirt.libvirtError, 'get_error_code',
fake_get_error_code)
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.InstancePowerOffFailure,
drvr.destroy, self.context, instance, [])
def test_private_destroy_not_found(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain",
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(ex)
mock.info().AndRaise(ex)
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
instance = objects.Instance(**self.test_instance)
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
drvr._destroy(instance)
def test_private_destroy_lxc_processes_refused_to_die(self):
self.flags(virt_type='lxc', group='libvirt')
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, 'get_domain') as mock_get_domain, \
mock.patch.object(conn, 'get_info') as mock_get_info:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
mock_info = mock.MagicMock()
mock_info.id = 1
mock_info.state = power_state.SHUTDOWN
mock_get_info.return_value = mock_info
instance = objects.Instance(**self.test_instance)
conn._destroy(instance)
def test_private_destroy_processes_refused_to_die_still_raises(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, 'get_domain') as mock_get_domain:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
instance = objects.Instance(**self.test_instance)
self.assertRaises(fakelibvirt.libvirtError, conn._destroy,
instance)
def test_private_destroy_ebusy_timeout(self):
# Tests that _destroy will retry 3 times to destroy the guest when an
# EBUSY is raised, but eventually times out and raises the libvirtError
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=ex)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(fakelibvirt.libvirtError, drvr._destroy,
instance)
self.assertEqual(3, mock_guest.poweroff.call_count)
def test_private_destroy_ebusy_multiple_attempt_ok(self):
# Tests that the _destroy attempt loop is broken when EBUSY is no
# longer raised.
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=[ex, None])
inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, id=1)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
with mock.patch.object(drvr, 'get_info', return_value=inst_info):
drvr._destroy(instance)
self.assertEqual(2, mock_guest.poweroff.call_count)
def test_undefine_domain_with_not_found_instance(self):
def fake_get_domain(self, instance):
raise exception.InstanceNotFound(instance_id=instance.name)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.mox.StubOutWithMock(fakelibvirt.libvirtError, "get_error_code")
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# NOTE(wenjianhn): verifies undefine doesn't raise if the
# instance disappears
drvr._undefine_domain(instance)
@mock.patch.object(host.Host, "list_instance_domains")
def test_disk_over_committed_size_total(self, mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
def XMLDesc(self, flags):
return "<domain/>"
mock_list.return_value = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '0'}]}
def get_info(instance_name, xml, **kwargs):
return fake_disks.get(instance_name)
with mock.patch.object(drvr,
"_get_instance_disk_info") as mock_info:
mock_info.side_effect = get_info
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(result, 10653532160)
mock_list.assert_called_with()
self.assertTrue(mock_info.called)
@mock.patch.object(host.Host, "list_instance_domains")
def test_disk_over_committed_size_total_eperm(self, mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
def XMLDesc(self, flags):
return "<domain/>"
mock_list.return_value = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '21474836480'}]}
def side_effect(name, dom):
if name == 'instance0000001':
raise OSError(errno.EACCES, 'Permission denied')
if name == 'instance0000002':
return fake_disks.get(name)
get_disk_info = mock.Mock()
get_disk_info.side_effect = side_effect
drvr._get_instance_disk_info = get_disk_info
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(21474836480, result)
mock_list.assert_called_with()
@mock.patch.object(host.Host, "list_instance_domains",
return_value=[mock.MagicMock(name='foo')])
@mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info",
side_effect=exception.VolumeBDMPathNotFound(path='bar'))
def test_disk_over_committed_size_total_bdm_not_found(self,
mock_get_disk_info,
mock_list_domains):
# Tests that we handle VolumeBDMPathNotFound gracefully.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_disk_over_committed_size_total())
def test_cpu_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = arch.X86_64
cpu.cores = 2
cpu.threads = 1
cpu.sockets = 4
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = arch.X86_64
guest.domtype = ["kvm"]
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = arch.I686
guest.domtype = ["kvm"]
caps.guests.append(guest)
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
want = {"vendor": "AMD",
"features": set(["extapic", "3dnow"]),
"model": "Opteron_G4",
"arch": arch.X86_64,
"topology": {"cores": 2, "threads": 1, "sockets": 4}}
got = drvr._get_cpu_info()
self.assertEqual(want, got)
def test_get_pcidev_info(self):
def fake_nodeDeviceLookupByName(self, name):
return FakeNodeDevice(_fake_NodeDevXml[name])
self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name')
host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actualvf = drvr._get_pcidev_info("pci_0000_04_00_3")
expect_vf = {
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1521',
"dev_type": 'type-PF',
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_04_10_7")
expect_vf = {
"dev_id": "pci_0000_04_10_7",
"address": "0000:04:10.7",
"product_id": '1520',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1520',
"dev_type": 'type-VF',
"phys_function": '0000:04:00.3',
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_04_11_7")
expect_vf = {
"dev_id": "pci_0000_04_11_7",
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"label": 'label_8086_1520',
"dev_type": 'type-VF',
"phys_function": '0000:04:00.3',
}
self.assertEqual(expect_vf, actualvf)
def test_list_devices_not_supported(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Handle just the NO_SUPPORT error
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=not_supported_exc):
self.assertEqual('[]', drvr._get_pci_passthrough_devices())
# We cache not supported status to avoid emitting too many logging
# messages. Clear this value to test the other exception case.
del drvr._list_devices_supported
# Other errors should not be caught
other_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'other exc',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=other_exc):
self.assertRaises(fakelibvirt.libvirtError,
drvr._get_pci_passthrough_devices)
def test_get_pci_passthrough_devices(self):
def fakelistDevices(caps, fakeargs=0):
return ['pci_0000_04_00_3', 'pci_0000_04_10_7',
'pci_0000_04_11_7']
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices
def fake_nodeDeviceLookupByName(self, name):
return FakeNodeDevice(_fake_NodeDevXml[name])
self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name')
host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actjson = drvr._get_pci_passthrough_devices()
expectvfs = [
{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": 'type-PF',
"phys_function": None,
"numa_node": None},
{
"dev_id": "pci_0000_04_10_7",
"domain": 0,
"address": "0000:04:10.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": None,
"dev_type": 'type-VF',
"phys_function": [('0x0000', '0x04', '0x00', '0x3')]},
{
"dev_id": "pci_0000_04_11_7",
"domain": 0,
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"dev_type": 'type-VF',
"phys_function": [('0x0000', '0x04', '0x00', '0x3')],
}
]
actualvfs = jsonutils.loads(actjson)
for dev in range(len(actualvfs)):
for key in actualvfs[dev].keys():
if key not in ['phys_function', 'virt_functions', 'label']:
self.assertEqual(expectvfs[dev][key], actualvfs[dev][key])
def _fake_caps_numa_topology(self,
cells_per_host=4,
sockets_per_cell=1,
cores_per_socket=1,
threads_per_core=2,
kb_mem=1048576):
# Generate mempages list per cell
cell_mempages = list()
for cellid in range(cells_per_host):
mempages_0 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_0.size = 4
mempages_0.total = 1024 * cellid
mempages_1 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_1.size = 2048
mempages_1.total = 0 + cellid
cell_mempages.append([mempages_0, mempages_1])
topology = fakelibvirt.HostInfo._gen_numa_topology(cells_per_host,
sockets_per_cell,
cores_per_socket,
threads_per_core,
kb_mem=kb_mem,
numa_mempages_list=cell_mempages)
return topology
def _test_get_host_numa_topology(self, mempages):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = arch.X86_64
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
expected_topo_dict = {'cells': [
{'cpus': '0,1', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 0},
{'cpus': '3', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 1},
{'cpus': '', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 2},
{'cpus': '', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 3}]}
with contextlib.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([0, 1, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([0, 1, 2, 3, 6])),
):
got_topo = drvr._get_host_numa_topology()
got_topo_dict = got_topo._to_dict()
self.assertThat(
expected_topo_dict, matchers.DictMatches(got_topo_dict))
if mempages:
# cells 0
self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[0].total)
self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[1].total)
# cells 1
self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb)
self.assertEqual(1024, got_topo.cells[1].mempages[0].total)
self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb)
self.assertEqual(1, got_topo.cells[1].mempages[1].total)
else:
self.assertEqual([], got_topo.cells[0].mempages)
self.assertEqual([], got_topo.cells[1].mempages)
self.assertEqual(expected_topo_dict, got_topo_dict)
self.assertEqual(set([]), got_topo.cells[0].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[1].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[2].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[3].pinned_cpus)
self.assertEqual([set([0, 1])], got_topo.cells[0].siblings)
self.assertEqual([], got_topo.cells[1].siblings)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_get_host_numa_topology(self, mock_version):
self._test_get_host_numa_topology(mempages=True)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_no_mempages(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='kvm', group='libvirt')
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_QEMU
self._test_get_host_numa_topology(mempages=False)
def test_get_host_numa_topology_empty(self):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = arch.X86_64
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version', return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)
) as (has_min_version, get_caps):
self.assertIsNone(drvr._get_host_numa_topology())
self.assertEqual(2, get_caps.call_count)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_old_version(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_QEMU
self.assertIsNone(drvr._get_host_numa_topology())
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_xen(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION)
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_XEN
self.assertIsNone(drvr._get_host_numa_topology())
def test_diagnostic_vcpus_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
raise fakelibvirt.libvirtError('vcpus missing')
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_blockstats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
raise fakelibvirt.libvirtError('blockStats missing')
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_interfacestats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
raise fakelibvirt.libvirtError('interfaceStat missing')
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_memorystats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
raise fakelibvirt.libvirtError('memoryStats missing')
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_full(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
@mock.patch.object(timeutils, 'utcnow')
@mock.patch.object(host.Host, 'get_domain')
def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain,
mock_utcnow):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
<interface type="bridge">
<mac address="53:55:00:a5:39:39"/>
<model type="virtio"/>
<target dev="br0"/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self):
return DiagFakeDomain()
mock_get_domain.side_effect = fake_get_domain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
'br0_rx': 4408,
'br0_rx_drop': 0,
'br0_rx_errors': 0,
'br0_rx_packets': 82,
'br0_tx': 0,
'br0_tx_drop': 0,
'br0_tx_errors': 0,
'br0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
mock_utcnow.return_value = diags_time
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0},
{'mac_address': '53:55:00:a5:39:39',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10.,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
@mock.patch.object(host.Host, "list_instance_domains")
def test_failing_vcpu_count(self, mock_list):
"""Domain can fail to return the vcpu description in case it's
just starting up or shutting down. Make sure None is handled
gracefully.
"""
class DiagFakeDomain(object):
def __init__(self, vcpus):
self._vcpus = vcpus
def vcpus(self):
if self._vcpus is None:
raise fakelibvirt.libvirtError("fake-error")
else:
return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus)
def ID(self):
return 1
def name(self):
return "instance000001"
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
mock_list.return_value = [
DiagFakeDomain(None), DiagFakeDomain(5)]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(5, drvr._get_vcpu_used())
mock_list.assert_called_with()
@mock.patch.object(host.Host, "list_instance_domains")
def test_failing_vcpu_count_none(self, mock_list):
"""Domain will return zero if the current number of vcpus used
is None. This is in case of VM state starting up or shutting
down. None type returned is counted as zero.
"""
class DiagFakeDomain(object):
def __init__(self):
pass
def vcpus(self):
return None
def ID(self):
return 1
def name(self):
return "instance000001"
mock_list.return_value = [DiagFakeDomain()]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_vcpu_used())
mock_list.assert_called_with()
def test_get_instance_capabilities(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = vconfig.LibvirtConfigCaps()
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = arch.X86_64
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = arch.I686
guest.domtype = ['kvm']
caps.guests.append(guest)
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
want = [(arch.X86_64, 'kvm', 'hvm'),
(arch.X86_64, 'qemu', 'hvm'),
(arch.I686, 'kvm', 'hvm')]
got = drvr._get_instance_capabilities()
self.assertEqual(want, got)
def test_set_cache_mode(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_invalid_mode(self):
self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertIsNone(fake_conf.driver_cache)
def test_set_cache_mode_invalid_object(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuest()
fake_conf.driver_cache = 'fake'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
@mock.patch('os.unlink')
@mock.patch.object(os.path, 'exists')
def _test_shared_storage_detection(self, is_same,
mock_exists, mock_unlink):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.get_host_ip_addr = mock.MagicMock(return_value='bar')
mock_exists.return_value = is_same
with mock.patch('nova.utils.ssh_execute') as mock_ssh_method:
result = drvr._is_storage_shared_with('foo', '/path')
mock_ssh_method.assert_any_call('foo', 'touch', mock.ANY)
if is_same:
mock_unlink.assert_called_once_with(mock.ANY)
else:
self.assertEqual(2, mock_ssh_method.call_count)
mock_ssh_method.assert_called_with('foo', 'rm', mock.ANY)
return result
def test_shared_storage_detection_same_host(self):
self.assertTrue(self._test_shared_storage_detection(True))
def test_shared_storage_detection_different_host(self):
self.assertFalse(self._test_shared_storage_detection(False))
def test_shared_storage_detection_easy(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(drvr, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
drvr.get_host_ip_addr().AndReturn('foo')
self.mox.ReplayAll()
self.assertTrue(drvr._is_storage_shared_with('foo', '/path'))
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_get_domain_info_with_more_return(self, mock_get_domain):
instance = objects.Instance(**self.test_instance)
dom_mock = mock.MagicMock()
dom_mock.info.return_value = [
1, 2048, 737, 8, 12345, 888888
]
dom_mock.ID.return_value = mock.sentinel.instance_id
mock_get_domain.return_value = dom_mock
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_info(instance)
self.assertEqual(1, info.state)
self.assertEqual(2048, info.max_mem_kb)
self.assertEqual(737, info.mem_kb)
self.assertEqual(8, info.num_cpu)
self.assertEqual(12345, info.cpu_time_ns)
self.assertEqual(mock.sentinel.instance_id, info.id)
dom_mock.info.assert_called_once_with()
dom_mock.ID.assert_called_once_with()
mock_get_domain.assert_called_once_with(instance)
def test_create_domain(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_domain = mock.MagicMock()
guest = drvr._create_domain(domain=mock_domain)
self.assertEqual(mock_domain, guest._domain)
mock_domain.createWithFlags.assert_has_calls([mock.call(0)])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('nova.openstack.common.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
mock_setup_container, mock_get_info, mock_clean):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
fmt = imgmodel.FORMAT_RAW
if CONF.use_cow_images:
fmt = imgmodel.FORMAT_QCOW2
setup_container_call = mock.call(
imgmodel.LocalFileImage('/tmp/test.img', fmt),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('nova.openstack.common.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_id_maps(self, mock_get_inst_path,
mock_ensure_tree, mock_setup_container,
mock_chown, mock_get_info, mock_clean):
self.flags(virt_type='lxc', uid_maps=["0:1000:100"],
gid_maps=["0:1000:100"], group='libvirt')
def chown_side_effect(path, id_maps):
self.assertEqual('/tmp/rootfs', path)
self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap)
self.assertEqual(0, id_maps[0].start)
self.assertEqual(1000, id_maps[0].target)
self.assertEqual(100, id_maps[0].count)
self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap)
self.assertEqual(0, id_maps[1].start)
self.assertEqual(1000, id_maps[1].target)
self.assertEqual(100, id_maps[1].count)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_chown.side_effect = chown_side_effect
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
fmt = imgmodel.FORMAT_RAW
if CONF.use_cow_images:
fmt = imgmodel.FORMAT_QCOW2
setup_container_call = mock.call(
imgmodel.LocalFileImage('/tmp/test.img', fmt),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('nova.openstack.common.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_not_running(self, mock_get_inst_path,
mock_ensure_tree,
mock_setup_container,
mock_get_info, mock_teardown):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
fmt = imgmodel.FORMAT_RAW
if CONF.use_cow_images:
fmt = imgmodel.FORMAT_QCOW2
setup_container_call = mock.call(
imgmodel.LocalFileImage('/tmp/test.img', fmt),
container_dir='/tmp/rootfs')
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
teardown_call = mock.call(container_dir='/tmp/rootfs')
mock_teardown.assert_has_calls([teardown_call])
def test_create_domain_define_xml_fails(self):
"""Tests that the xml is logged when defining the domain fails."""
fake_xml = "<test>this is a test</test>"
def fake_defineXML(xml):
self.assertEqual(fake_xml, xml)
raise fakelibvirt.libvirtError('virDomainDefineXML() failed')
def fake_safe_decode(text, *args, **kwargs):
return text + 'safe decoded'
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.assertIn('safe decoded', msg % args)
self.stubs.Set(encodeutils, 'safe_decode', fake_safe_decode)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock(defineXML=fake_defineXML)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
fake_xml)
self.assertTrue(self.log_error_called)
def test_create_domain_with_flags_fails(self):
"""Tests that the xml is logged when creating the domain with flags
fails
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_createWithFlags(launch_flags):
raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed')
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
domain=fake_domain)
self.assertTrue(self.log_error_called)
def test_create_domain_enable_hairpin_fails(self):
"""Tests that the xml is logged when enabling hairpin mode for the
domain fails.
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError('error')
def fake_get_interfaces(*args):
return ["dev"]
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.stubs.Set(nova.utils, 'execute', fake_execute)
self.stubs.Set(
nova.virt.libvirt.guest.Guest, 'get_interfaces',
fake_get_interfaces)
self.assertRaises(processutils.ProcessExecutionError,
drvr._create_domain,
domain=fake_domain,
power_on=False)
self.assertTrue(self.log_error_called)
def test_get_vnc_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='vnc' port='5900'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
vnc_dict = drvr.get_vnc_console(self.context, instance)
self.assertEqual(vnc_dict.port, '5900')
def test_get_vnc_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_vnc_console, self.context, instance)
def test_get_spice_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='spice' port='5950'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
spice_dict = drvr.get_spice_console(self.context, instance)
self.assertEqual(spice_dict.port, '5950')
def test_get_spice_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(flags=0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_spice_console, self.context, instance)
def test_detach_volume_with_instance_not_found(self):
# Test that detach_volume() method does not raise exception,
# if the instance does not exist.
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(host.Host, 'get_domain',
side_effect=exception.InstanceNotFound(
instance_id=instance.name)),
mock.patch.object(drvr, '_disconnect_volume')
) as (_get_domain, _disconnect_volume):
connection_info = {'driver_volume_type': 'fake'}
drvr.detach_volume(connection_info, instance, '/dev/sda')
_get_domain.assert_called_once_with(instance)
_disconnect_volume.assert_called_once_with(connection_info,
'sda')
def _test_attach_detach_interface_get_config(self, method_name):
"""Tests that the get_config() method is properly called in
attach_interface() and detach_interface().
method_name: either \"attach_interface\" or \"detach_interface\"
depending on the method to test.
"""
self.stubs.Set(host.Host, "get_domain", lambda a, b: FakeVirtDomain())
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
if method_name == "attach_interface":
fake_image_meta = {'id': instance['image_ref']}
elif method_name == "detach_interface":
fake_image_meta = None
else:
raise ValueError("Unhandled method %s" % method_name)
if method_name == "attach_interface":
self.mox.StubOutWithMock(drvr.firewall_driver,
'setup_basic_filtering')
drvr.firewall_driver.setup_basic_filtering(instance, network_info)
expected = drvr.vif_driver.get_config(instance, network_info[0],
fake_image_meta,
instance.get_flavor(),
CONF.libvirt.virt_type)
self.mox.StubOutWithMock(drvr.vif_driver, 'get_config')
drvr.vif_driver.get_config(instance, network_info[0],
fake_image_meta,
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type).\
AndReturn(expected)
self.mox.ReplayAll()
if method_name == "attach_interface":
drvr.attach_interface(instance, fake_image_meta,
network_info[0])
elif method_name == "detach_interface":
drvr.detach_interface(instance, network_info[0])
else:
raise ValueError("Unhandled method %s" % method_name)
@mock.patch.object(lockutils, "external_lock")
def test_attach_interface_get_config(self, mock_lock):
"""Tests that the get_config() method is properly called in
attach_interface().
"""
mock_lock.return_value = threading.Semaphore()
self._test_attach_detach_interface_get_config("attach_interface")
def test_detach_interface_get_config(self):
"""Tests that the get_config() method is properly called in
detach_interface().
"""
self._test_attach_detach_interface_get_config("detach_interface")
def test_default_root_device_name(self):
instance = {'uuid': 'fake_instance'}
image_meta = {'id': 'fake'}
root_bdm = {'source_type': 'image',
'detination_type': 'volume',
'image_id': 'fake_id'}
self.flags(virt_type='fake_libvirt_type', group='libvirt')
self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type')
self.mox.StubOutWithMock(blockinfo, 'get_root_info')
blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
image_meta,
'disk').InAnyOrder().\
AndReturn('virtio')
blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
image_meta,
'cdrom').InAnyOrder().\
AndReturn('ide')
blockinfo.get_root_info('fake_libvirt_type',
image_meta, root_bdm,
'virtio', 'ide').AndReturn({'dev': 'vda'})
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(drvr.default_root_device_name(instance, image_meta,
root_bdm), '/dev/vda')
@mock.patch.object(driver, "get_block_device_info")
@mock.patch.object(blockinfo, "default_device_names")
@mock.patch.object(utils, "get_image_from_system_metadata")
def test_default_device_names_for_instance(
self, mock_meta, mock_devnames, mock_blockinfo):
instance = objects.Instance(**self.test_instance)
image_meta = {}
instance.root_device_name = '/dev/vda'
ephemerals = [{'device_name': 'vdb'}]
swap = [{'device_name': 'vdc'}]
block_device_mapping = [{'device_name': 'vdc'}]
self.flags(virt_type='fake_libvirt_type', group='libvirt')
mock_meta.return_value = image_meta
mock_blockinfo.return_value = 'fake-block-device-info'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.default_device_names_for_instance(instance,
instance.root_device_name,
ephemerals, swap,
block_device_mapping,
image_meta)
mock_devnames.assert_called_once_with(
"fake_libvirt_type", mock.ANY,
instance, 'fake-block-device-info',
image_meta)
def test_is_supported_fs_format(self):
supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertTrue(drvr.is_supported_fs_format(fs))
supported_fs = ['', 'dummy']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertFalse(drvr.is_supported_fs_format(fs))
def test_post_live_migration_at_destination_with_block_device_info(self):
# Preparing mocks
mock_domain = self.mox.CreateMock(fakelibvirt.virDomain)
self.resultXML = None
def fake_getLibVersion():
return 9011
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
if image_meta is None:
image_meta = {}
conf = drvr._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
self.resultXML = conf.to_xml()
return self.resultXML
def fake_get_domain(instance):
return mock_domain
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Westmere</model>
<vendor>Intel</vendor>
<feature policy='require' name='aes'/>
</cpu>
"""
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001,
listDefinedDomains=lambda: [],
numOfDomains=lambda: 0,
baselineCPU=fake_baselineCPU)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance = objects.Instance(**instance_ref)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr,
'_get_guest_xml',
fake_to_xml)
self.stubs.Set(host.Host,
'get_domain',
fake_get_domain)
block_device_info = {'block_device_mapping':
driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'guest_format': None,
'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vda',
'disk_bus': 'virtio',
'device_type': 'disk',
'delete_on_termination': False}),
])}
block_device_info['block_device_mapping'][0]['connection_info'] = (
{'driver_volume_type': 'iscsi'})
with contextlib.nested(
mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'),
mock.patch.object(objects.Instance, 'save')
) as (mock_volume_save, mock_instance_save):
drvr.post_live_migration_at_destination(
self.context, instance, network_info, True,
block_device_info=block_device_info)
self.assertIn('fake', self.resultXML)
mock_volume_save.assert_called_once_with()
def test_create_propagates_exceptions(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(id=1, uuid='fake-uuid',
image_ref='my_fake_image')
with contextlib.nested(
mock.patch.object(drvr, '_create_domain_setup_lxc'),
mock.patch.object(drvr, '_create_domain_cleanup_lxc'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain',
side_effect=exception.NovaException),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(exception.NovaException,
drvr._create_domain_and_network,
self.context,
'xml',
instance, None, None)
def test_create_without_pause(self):
self.flags(virt_type='lxc', group='libvirt')
@contextlib.contextmanager
def fake_lxc_disk_handler(*args, **kwargs):
yield
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(drvr, '_lxc_disk_handler',
side_effect=fake_lxc_disk_handler),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'cleanup')) as (
_handler, cleanup, firewall_driver, create, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, None, None)
self.assertEqual(0, create.call_args_list[0][1]['pause'])
self.assertEqual(0, domain.resume.call_count)
def _test_create_with_network_events(self, neutron_failure=None,
power_on=True):
generated_events = []
def wait_timeout():
event = mock.MagicMock()
if neutron_failure == 'timeout':
raise eventlet.timeout.Timeout()
elif neutron_failure == 'error':
event.status = 'failed'
else:
event.status = 'completed'
return event
def fake_prepare(instance, event_name):
m = mock.MagicMock()
m.instance = instance
m.event_name = event_name
m.wait.side_effect = wait_timeout
generated_events.append(m)
return m
virtapi = manager.ComputeVirtAPI(mock.MagicMock())
prepare = virtapi._compute.instance_events.prepare_for_instance_event
prepare.side_effect = fake_prepare
drvr = libvirt_driver.LibvirtDriver(virtapi, False)
instance = objects.Instance(**self.test_instance)
vifs = [{'id': 'vif1', 'active': False},
{'id': 'vif2', 'active': False}]
@mock.patch.object(drvr, 'plug_vifs')
@mock.patch.object(drvr, 'firewall_driver')
@mock.patch.object(drvr, '_create_domain')
@mock.patch.object(drvr, 'cleanup')
def test_create(cleanup, create, fw_driver, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, vifs, None,
power_on=power_on)
plug_vifs.assert_called_with(instance, vifs)
pause = self._get_pause_flag(drvr, vifs, power_on=power_on)
self.assertEqual(pause,
create.call_args_list[0][1]['pause'])
if pause:
domain.resume.assert_called_once_with()
if neutron_failure and CONF.vif_plugging_is_fatal:
cleanup.assert_called_once_with(self.context,
instance, network_info=vifs,
block_device_info=None)
test_create()
if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
prepare.assert_has_calls([
mock.call(instance, 'network-vif-plugged-vif1'),
mock.call(instance, 'network-vif-plugged-vif2')])
for event in generated_events:
if neutron_failure and generated_events.index(event) != 0:
self.assertEqual(0, event.call_count)
elif (neutron_failure == 'error' and
not CONF.vif_plugging_is_fatal):
event.wait.assert_called_once_with()
else:
self.assertEqual(0, prepare.call_count)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron(self, is_neutron):
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_power_off(self,
is_neutron):
# Tests that we don't wait for events if we don't start the instance.
self._test_create_with_network_events(power_on=False)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_nowait(self, is_neutron):
self.flags(vif_plugging_timeout=0)
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_timeout(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_timeout(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_error(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_error(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=False)
def test_create_with_network_events_non_neutron(self, is_neutron):
self._test_create_with_network_events()
@mock.patch('nova.volume.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_dom = mock.MagicMock()
mock_encryption_meta = mock.MagicMock()
get_encryption_metadata.return_value = mock_encryption_meta
fake_xml = """
<domain>
<name>instance-00000001</name>
<memory>1048576</memory>
<vcpu>1</vcpu>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source file='/path/fake-volume1'/>
<target dev='vda' bus='virtio'/>
</disk>
</devices>
</domain>
"""
fake_volume_id = "fake-volume-id"
connection_info = {"driver_volume_type": "fake",
"data": {"access_mode": "rw",
"volume_id": fake_volume_id}}
def fake_getitem(*args, **kwargs):
fake_bdm = {'connection_info': connection_info,
'mount_device': '/dev/vda'}
return fake_bdm.get(args[0])
mock_volume = mock.MagicMock()
mock_volume.__getitem__.side_effect = fake_getitem
block_device_info = {'block_device_mapping': [mock_volume]}
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
with contextlib.nested(
mock.patch.object(drvr, '_get_volume_encryptor'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver,
'prepare_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
) as (get_volume_encryptor, plug_vifs, setup_basic_filtering,
prepare_instance_filter, create_domain, apply_instance_filter):
create_domain.return_value = libvirt_guest.Guest(mock_dom)
guest = drvr._create_domain_and_network(
self.context, fake_xml, instance, network_info, None,
block_device_info=block_device_info)
get_encryption_metadata.assert_called_once_with(self.context,
drvr._volume_api, fake_volume_id, connection_info)
get_volume_encryptor.assert_called_once_with(connection_info,
mock_encryption_meta)
plug_vifs.assert_called_once_with(instance, network_info)
setup_basic_filtering.assert_called_once_with(instance,
network_info)
prepare_instance_filter.assert_called_once_with(instance,
network_info)
pause = self._get_pause_flag(drvr, network_info)
create_domain.assert_called_once_with(
fake_xml, pause=pause, power_on=True)
self.assertEqual(mock_dom, guest._domain)
def test_get_guest_storage_config(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_instance = copy.deepcopy(self.test_instance)
test_instance["default_swap_device"] = None
instance = objects.Instance(**test_instance)
image_meta = {}
flavor = instance.get_flavor()
conn_info = {'driver_volume_type': 'fake', 'data': {}}
bdi = {'block_device_mapping':
driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vdc'})
])}
bdm = bdi['block_device_mapping'][0]
bdm['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
bdi)
mock_conf = mock.MagicMock(source_path='fake')
with contextlib.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'save'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf),
mock.patch.object(drvr, '_set_cache_mode')
) as (volume_save, connect_volume, get_volume_config, set_cache_mode):
devices = drvr._get_guest_storage_config(instance, None,
disk_info, False, bdi, flavor, "hvm")
self.assertEqual(3, len(devices))
self.assertEqual('/dev/vdb', instance.default_ephemeral_device)
self.assertIsNone(instance.default_swap_device)
connect_volume.assert_called_with(bdm['connection_info'],
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
get_volume_config.assert_called_with(bdm['connection_info'],
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
volume_save.assert_called_once_with()
self.assertEqual(3, set_cache_mode.call_count)
def test_get_neutron_events(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
events = drvr._get_neutron_events(network_info)
self.assertEqual([('network-vif-plugged', '1')], events)
def test_unplug_vifs_ignores_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
drvr._unplug_vifs('inst', [1], ignore_errors=True)
vif_driver.unplug.assert_called_once_with('inst', 1)
def test_unplug_vifs_reports_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
self.assertRaises(exception.AgentError,
drvr.unplug_vifs, 'inst', [1])
vif_driver.unplug.assert_called_once_with('inst', 1)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_pass_with_no_mount_device(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = mock.Mock()
drvr._disconnect_volume = mock.Mock()
fake_inst = {'name': 'foo'}
fake_bdms = [{'connection_info': 'foo',
'mount_device': None}]
with mock.patch('nova.virt.driver'
'.block_device_info_get_mapping',
return_value=fake_bdms):
drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False)
self.assertTrue(drvr._disconnect_volume.called)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
fake_inst = {'name': 'foo'}
with mock.patch.object(drvr._conn, 'lookupByName') as lookup:
lookup.return_value = fake_inst
# NOTE(danms): Make unplug cause us to bail early, since
# we only care about how it was called
unplug.side_effect = test.TestingException
self.assertRaises(test.TestingException,
drvr.cleanup, 'ctxt', fake_inst, 'netinfo')
unplug.assert_called_once_with(fake_inst, 'netinfo', True)
@mock.patch('nova.virt.driver.block_device_info_get_mapping')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_get_serial_ports_from_instance')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_serial_console_enabled(
self, undefine, get_ports,
block_device_info_get_mapping):
self.flags(enabled="True", group='serial_console')
instance = 'i1'
network_info = {}
bdm_info = {}
firewall_driver = mock.MagicMock()
get_ports.return_value = iter([('127.0.0.1', 10000)])
block_device_info_get_mapping.return_value = ()
# We want to ensure undefine_domain is called after
# lookup_domain.
def undefine_domain(instance):
get_ports.side_effect = Exception("domain undefined")
undefine.side_effect = undefine_domain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = firewall_driver
drvr.cleanup(
'ctx', instance, network_info,
block_device_info=bdm_info,
destroy_disks=False, destroy_vifs=False)
get_ports.assert_called_once_with(instance)
undefine.assert_called_once_with(instance)
firewall_driver.unfilter_instance.assert_called_once_with(
instance, network_info=network_info)
block_device_info_get_mapping.assert_called_once_with(bdm_info)
def test_swap_volume(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
with mock.patch.object(drvr._conn, 'defineXML',
create=True) as mock_define:
xmldoc = "<domain/>"
srcfile = "/first/path"
dstfile = "/second/path"
mock_dom.XMLDesc.return_value = xmldoc
mock_dom.isPersistent.return_value = True
mock_dom.blockJobInfo.return_value = {}
drvr._swap_volume(mock_dom, srcfile, dstfile, 1)
mock_dom.XMLDesc.assert_called_once_with(
flags=(fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
mock_dom.blockRebase.assert_called_once_with(
srcfile, dstfile, 0,
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
mock_dom.blockResize.assert_called_once_with(
srcfile, 1 * units.Gi / units.Ki)
mock_define.assert_called_once_with(xmldoc)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume')
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_id')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_swap_volume_driver_bdm_save(self, get_domain,
connect_volume, get_volume_config,
get_by_volume_id, volume_save,
swap_volume, disconnect_volume):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
old_connection_info = {'driver_volume_type': 'fake',
'serial': 'old-volume-id',
'data': {'device_path': '/fake-old-volume',
'access_mode': 'rw'}}
new_connection_info = {'driver_volume_type': 'fake',
'serial': 'new-volume-id',
'data': {'device_path': '/fake-new-volume',
'access_mode': 'rw'}}
mock_dom = mock.MagicMock()
mock_dom.XMLDesc.return_value = """<domain>
<devices>
<disk type='file'>
<source file='/fake-old-volume'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
"""
get_domain.return_value = mock_dom
disk_info = {'bus': 'virtio', 'type': 'disk', 'dev': 'vdb'}
get_volume_config.return_value = mock.MagicMock(
source_path='/fake-new-volume')
bdm = objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-2',
'boot_index': 0}))
get_by_volume_id.return_value = bdm
conn.swap_volume(old_connection_info, new_connection_info, instance,
'/dev/vdb', 1)
get_domain.assert_called_once_with(instance)
connect_volume.assert_called_once_with(new_connection_info, disk_info)
swap_volume.assert_called_once_with(mock_dom, 'vdb',
'/fake-new-volume', 1)
disconnect_volume.assert_called_once_with(old_connection_info, 'vdb')
volume_save.assert_called_once_with()
def test_live_snapshot(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
with contextlib.nested(
mock.patch.object(drvr._conn, 'defineXML', create=True),
mock.patch.object(fake_libvirt_utils, 'get_disk_size'),
mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'),
mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
mock.patch.object(fake_libvirt_utils, 'chown'),
mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
) as (mock_define, mock_size, mock_backing, mock_create_cow,
mock_chown, mock_snapshot):
xmldoc = "<domain/>"
srcfile = "/first/path"
dstfile = "/second/path"
bckfile = "/other/path"
dltfile = dstfile + ".delta"
mock_dom.XMLDesc.return_value = xmldoc
mock_dom.isPersistent.return_value = True
mock_size.return_value = 1004009
mock_backing.return_value = bckfile
drvr._live_snapshot(self.context, self.test_instance, mock_dom,
srcfile, dstfile, "qcow2", {})
mock_dom.XMLDesc.assert_called_once_with(flags=(
fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE))
mock_dom.blockRebase.assert_called_once_with(
srcfile, dltfile, 0,
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
mock_size.assert_called_once_with(srcfile)
mock_backing.assert_called_once_with(srcfile, basename=False)
mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
mock_chown.assert_called_once_with(dltfile, os.getuid())
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
mock_define.assert_called_once_with(xmldoc)
@mock.patch.object(greenthread, "spawn")
def test_live_migration_hostname_valid(self, mock_spawn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.live_migration(self.context, self.test_instance,
"host1.example.com",
lambda x: x,
lambda x: x)
self.assertEqual(1, mock_spawn.call_count)
@mock.patch.object(greenthread, "spawn")
@mock.patch.object(fake_libvirt_utils, "is_valid_hostname")
def test_live_migration_hostname_invalid(self, mock_hostname, mock_spawn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_hostname.return_value = False
self.assertRaises(exception.InvalidHostname,
drvr.live_migration,
self.context, self.test_instance,
"foo/?com=/bin/sh",
lambda x: x,
lambda x: x)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close', return_value=None)
def test_check_instance_shared_storage_local_raw(self,
mock_close,
mock_mkstemp,
mock_exists):
instance_uuid = str(uuid.uuid4())
self.flags(images_type='raw', group='libvirt')
self.flags(instances_path='/tmp')
mock_mkstemp.return_value = (-1,
'/tmp/{0}/file'.format(instance_uuid))
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
temp_file = driver.check_instance_shared_storage_local(self.context,
instance)
self.assertEqual('/tmp/{0}/file'.format(instance_uuid),
temp_file['filename'])
def test_check_instance_shared_storage_local_rbd(self):
self.flags(images_type='rbd', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.assertIsNone(driver.
check_instance_shared_storage_local(self.context,
instance))
def test_version_to_string(self):
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
string_ver = driver._version_to_string((4, 33, 173))
self.assertEqual("4.33.173", string_ver)
def test_parallels_min_version_fail(self):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(driver._conn, 'getLibVersion',
return_value=1002011):
self.assertRaises(exception.NovaException,
driver.init_host, 'wibble')
def test_parallels_min_version_ok(self):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(driver._conn, 'getLibVersion',
return_value=1002012):
driver.init_host('wibble')
def test_get_guest_config_parallels_vm(self):
self.flags(virt_type='parallels', group='libvirt')
self.flags(images_type='ploop', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.HVM, cfg.os_type)
self.assertIsNone(cfg.os_root)
self.assertEqual(6, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[0].driver_format, "ploop")
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
def test_get_guest_config_parallels_ct(self):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
ct_instance = self.test_instance.copy()
ct_instance["vm_mode"] = vm_mode.EXE
instance_ref = objects.Instance(**ct_instance)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, {'mapping': {}})
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertIsNone(cfg.os_root)
self.assertEqual(4, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
fs = cfg.devices[0]
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.driver_type, "ploop")
self.assertEqual(fs.target_dir, "/")
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestVideo)
class HostStateTestCase(test.NoDBTestCase):
cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686",
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge",
"mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}}
instance_caps = [(arch.X86_64, "kvm", "hvm"),
(arch.I686, "kvm", "hvm")]
pci_devices = [{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:10.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": 'type-PF',
"phys_function": None}]
numa_topology = objects.NUMATopology(
cells=[objects.NUMACell(
id=1, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=2, cpuset=set([3, 4]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([]))])
class FakeConnection(libvirt_driver.LibvirtDriver):
"""Fake connection object."""
def __init__(self):
super(HostStateTestCase.FakeConnection,
self).__init__(fake.FakeVirtAPI(), True)
self._host = host.Host("qemu:///system")
def _get_memory_mb_total():
return 497
def _get_memory_mb_used():
return 88
self._host.get_memory_mb_total = _get_memory_mb_total
self._host.get_memory_mb_used = _get_memory_mb_used
def _get_vcpu_total(self):
return 1
def _get_vcpu_used(self):
return 0
def _get_cpu_info(self):
return HostStateTestCase.cpu_info
def _get_disk_over_committed_size_total(self):
return 0
def _get_local_gb_info(self):
return {'total': 100, 'used': 20, 'free': 80}
def get_host_uptime(self):
return ('10:01:16 up 1:36, 6 users, '
'load average: 0.21, 0.16, 0.19')
def _get_disk_available_least(self):
return 13091
def _get_instance_capabilities(self):
return HostStateTestCase.instance_caps
def _get_pci_passthrough_devices(self):
return jsonutils.dumps(HostStateTestCase.pci_devices)
def _get_host_numa_topology(self):
return HostStateTestCase.numa_topology
@mock.patch.object(fakelibvirt, "openAuth")
def test_update_status(self, mock_open):
mock_open.return_value = fakelibvirt.Connection("qemu:///system")
drvr = HostStateTestCase.FakeConnection()
stats = drvr.get_available_resource("compute1")
self.assertEqual(stats["vcpus"], 1)
self.assertEqual(stats["memory_mb"], 497)
self.assertEqual(stats["local_gb"], 100)
self.assertEqual(stats["vcpus_used"], 0)
self.assertEqual(stats["memory_mb_used"], 88)
self.assertEqual(stats["local_gb_used"], 20)
self.assertEqual(stats["hypervisor_type"], 'QEMU')
self.assertEqual(stats["hypervisor_version"], 1001000)
self.assertEqual(stats["hypervisor_hostname"], 'compute1')
cpu_info = jsonutils.loads(stats["cpu_info"])
self.assertEqual(cpu_info,
{"vendor": "Intel", "model": "pentium",
"arch": arch.I686,
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov",
"mca", "pge", "mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}
})
self.assertEqual(stats["disk_available_least"], 80)
self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
HostStateTestCase.pci_devices)
self.assertThat(objects.NUMATopology.obj_from_db_obj(
stats['numa_topology'])._to_dict(),
matchers.DictMatches(
HostStateTestCase.numa_topology._to_dict()))
class LibvirtDriverTestCase(test.NoDBTestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True)
self.context = context.get_admin_context()
def _create_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
flavor = objects.Flavor(memory_mb=512,
swap=0,
vcpu_weight=None,
root_gb=10,
id=2,
name=u'm1.tiny',
ephemeral_gb=20,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=1)
inst = {}
inst['id'] = 1
inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b'
inst['os_type'] = 'linux'
inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = 2
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = flavor.root_gb
inst['ephemeral_gb'] = flavor.ephemeral_gb
inst['config_drive'] = True
inst['kernel_id'] = 2
inst['ramdisk_id'] = 3
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = {}
inst['metadata'] = {}
inst.update(params)
return objects.Instance(flavor=flavor,
old_flavor=None, new_flavor=None,
**inst)
@staticmethod
def _disk_info():
# 10G root and 512M swap disk
disk_info = [{'disk_size': 1, 'type': 'qcow2',
'virt_disk_size': 10737418240, 'path': '/test/disk',
'backing_file': '/base/disk'},
{'disk_size': 1, 'type': 'qcow2',
'virt_disk_size': 536870912, 'path': '/test/disk.swap',
'backing_file': '/base/swap_512'}]
return jsonutils.dumps(disk_info)
def test_migrate_disk_and_power_off_exception(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
self.counter = 0
self.checked_shared_storage = False
def fake_get_instance_disk_info(instance,
block_device_info=None):
return '[]'
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
self.counter += 1
if self.counter == 1:
assert False, "intentional failure"
def fake_os_path_exists(path):
return True
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self.assertRaises(AssertionError,
self.drvr.migrate_disk_and_power_off,
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None)
def _test_migrate_disk_and_power_off(self, flavor_obj,
block_device_info=None,
params_for_instance=None):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
disk_info = self._disk_info()
def fake_get_instance_disk_info(instance,
block_device_info=None):
return disk_info
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(utils, 'execute', fake_execute)
ins_ref = self._create_instance(params=params_for_instance)
# dest is different host case
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None, block_device_info=block_device_info)
self.assertEqual(out, disk_info)
# dest is same host case
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), ins_ref, '10.0.0.1',
flavor_obj, None, block_device_info=block_device_info)
self.assertEqual(out, disk_info)
def test_migrate_disk_and_power_off(self):
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(flavor_obj)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
def test_migrate_disk_and_power_off_boot_from_volume(self,
disconnect_volume):
info = {'block_device_mapping': [{'boot_index': None,
'mount_device': '/dev/vdd',
'connection_info': None},
{'boot_index': 0,
'mount_device': '/dev/vda',
'connection_info': None}]}
flavor = {'root_gb': 1, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
# Note(Mike_D): The size of instance's ephemeral_gb is 0 gb.
self._test_migrate_disk_and_power_off(
flavor_obj, block_device_info=info,
params_for_instance={'image_ref': None, 'ephemeral_gb': 0})
disconnect_volume.assert_called_with(
info['block_device_mapping'][1]['connection_info'], 'vda')
@mock.patch('nova.utils.execute')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info,
get_host_ip_addr,
mock_destroy,
mock_copy_image,
mock_execute):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
self.copy_or_move_swap_called = False
disk_info = self._disk_info()
mock_get_disk_info.return_value = disk_info
get_host_ip_addr.return_value = '10.0.0.1'
def fake_copy_image(*args, **kwargs):
# disk.swap should not be touched since it is skipped over
if '/test/disk.swap' in list(args):
self.copy_or_move_swap_called = True
def fake_execute(*args, **kwargs):
# disk.swap should not be touched since it is skipped over
if set(['mv', '/test/disk.swap']).issubset(list(args)):
self.copy_or_move_swap_called = True
mock_copy_image.side_effect = fake_copy_image
mock_execute.side_effect = fake_execute
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Original instance config
instance = self._create_instance({'root_gb': 10,
'ephemeral_gb': 0})
# Re-size fake instance to 20G root and 1024M swap disk
flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024}
flavor_obj = objects.Flavor(**flavor)
# Destination is same host
out = drvr.migrate_disk_and_power_off(context.get_admin_context(),
instance, '10.0.0.1',
flavor_obj, None)
mock_get_disk_info.assert_called_once_with(instance,
block_device_info=None)
self.assertTrue(get_host_ip_addr.called)
mock_destroy.assert_called_once_with(instance)
self.assertFalse(self.copy_or_move_swap_called)
self.assertEqual(disk_info, out)
def _test_migrate_disk_and_power_off_resize_check(self, expected_exc):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
.migrate_disk_and_power_off.
"""
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return self._disk_info()
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
# Migration is not implemented for LVM backed instances
self.assertRaises(expected_exc,
self.drvr.migrate_disk_and_power_off,
None, ins_ref, '10.0.0.1', flavor_obj, None)
def test_migrate_disk_and_power_off_lvm(self):
self.flags(images_type='lvm', group='libvirt')
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
def test_migrate_disk_and_power_off_resize_cannot_ssh(self):
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError()
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info):
instance = self._create_instance()
flavor = {'root_gb': 5, 'ephemeral_gb': 10}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error_default_ephemeral(
self, mock_get_disk_info):
# Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb.
instance = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
@mock.patch('nova.virt.driver.block_device_info_get_ephemerals')
def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get,
mock_get_disk_info):
mappings = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'volume_id': 1,
'guest_format': None,
'boot_index': 1,
'volume_size': 6
},
{
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': 1,
'device_type': 'disk',
'guest_format': None,
'boot_index': 0,
'volume_size': 4
},
{
'device_name': '/dev/sda3',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_size': 3
}
]
mock_get.return_value = mappings
instance = self._create_instance()
# Old flavor, eph is 20, real disk is 3, target is 2, fail
flavor = {'root_gb': 10, 'ephemeral_gb': 2}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
# Old flavor, eph is 20, real disk is 3, target is 4
flavor = {'root_gb': 10, 'ephemeral_gb': 4}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(flavor_obj)
def test_wait_for_running(self):
def fake_get_info(instance):
if instance['name'] == "not_found":
raise exception.InstanceNotFound(instance_id=instance['uuid'])
elif instance['name'] == "running":
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
# instance not found case
self.assertRaises(exception.InstanceNotFound,
self.drvr._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
# instance is running case
self.assertRaises(loopingcall.LoopingCallDone,
self.drvr._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
# else case
self.drvr._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
def test_disk_size_from_instance_disk_info(self):
instance_data = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30}
inst = objects.Instance(**instance_data)
info = {'path': '/path/disk'}
self.assertEqual(10 * units.Gi,
self.drvr._disk_size_from_instance(inst, info))
info = {'path': '/path/disk.local'}
self.assertEqual(20 * units.Gi,
self.drvr._disk_size_from_instance(inst, info))
info = {'path': '/path/disk.swap'}
self.assertEqual(0,
self.drvr._disk_size_from_instance(inst, info))
@mock.patch('nova.utils.execute')
def test_disk_raw_to_qcow2(self, mock_execute):
path = '/test/disk'
_path_qcow = path + '_qcow'
self.drvr._disk_raw_to_qcow2(path)
mock_execute.assert_has_calls([
mock.call('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, _path_qcow),
mock.call('mv', _path_qcow, path)])
@mock.patch('nova.utils.execute')
def test_disk_qcow2_to_raw(self, mock_execute):
path = '/test/disk'
_path_raw = path + '_raw'
self.drvr._disk_qcow2_to_raw(path)
mock_execute.assert_has_calls([
mock.call('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, _path_raw),
mock.call('mv', _path_raw, path)])
@mock.patch('nova.virt.disk.api.extend')
def test_disk_resize_raw(self, mock_extend):
image = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_RAW)
self.drvr._disk_resize(image, 50)
mock_extend.assert_called_once_with(image, 50)
@mock.patch('nova.virt.disk.api.can_resize_image')
@mock.patch('nova.virt.disk.api.is_image_extendable')
@mock.patch('nova.virt.disk.api.extend')
def test_disk_resize_qcow2(
self, mock_extend, mock_can_resize, mock_is_image_extendable):
with contextlib.nested(
mock.patch.object(
self.drvr, '_disk_qcow2_to_raw'),
mock.patch.object(
self.drvr, '_disk_raw_to_qcow2'))\
as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2):
mock_can_resize.return_value = True
mock_is_image_extendable.return_value = True
imageqcow2 = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_QCOW2)
imageraw = imgmodel.LocalFileImage("/test/disk",
imgmodel.FORMAT_RAW)
self.drvr._disk_resize(imageqcow2, 50)
mock_disk_qcow2_to_raw.assert_called_once_with(imageqcow2.path)
mock_extend.assert_called_once_with(imageraw, 50)
mock_disk_raw_to_qcow2.assert_called_once_with(imageqcow2.path)
def _test_finish_migration(self, power_on, resize_instance=False):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
self.fake_disk_resize_called = False
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
pass
def fake_create_image(context, inst,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, inject_files=True,
fallback_from_host=None):
self.assertFalse(inject_files)
def fake_create_domain_and_network(
context, xml, instance, network_info, disk_info,
block_device_info=None, power_on=True, reboot=False,
vifs_already_plugged=False):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
self.assertTrue(vifs_already_plugged)
def fake_enable_hairpin():
pass
def fake_execute(*args, **kwargs):
pass
def fake_get_info(instance):
if powered_on:
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_disk_resize(image, size):
self.fake_disk_resize_called = True
self.flags(use_cow_images=True)
self.stubs.Set(self.drvr, '_disk_resize',
fake_disk_resize)
self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.drvr, '_create_image',
fake_create_image)
self.stubs.Set(self.drvr, '_create_domain_and_network',
fake_create_domain_and_network)
self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.drvr, 'firewall_driver', fw)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
ins_ref = self._create_instance()
image_meta = {}
migration = objects.Migration()
migration.source_compute = 'fake-source-compute'
migration.dest_compute = 'fake-dest-compute'
migration.source_node = 'fake-source-node'
migration.dest_node = 'fake-dest-node'
self.drvr.finish_migration(
context.get_admin_context(), migration, ins_ref,
self._disk_info(), [], image_meta,
resize_instance, None, power_on)
self.assertTrue(self.fake_create_domain_called)
self.assertEqual(
resize_instance, self.fake_disk_resize_called)
def test_finish_migration_resize(self):
self._test_finish_migration(True, resize_instance=True)
def test_finish_migration_power_on(self):
self._test_finish_migration(True)
def test_finish_migration_power_off(self):
self._test_finish_migration(False)
def _test_finish_revert_migration(self, power_on):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
def fake_execute(*args, **kwargs):
pass
def fake_plug_vifs(instance, network_info):
pass
def fake_create_domain(context, xml, instance, network_info,
disk_info, block_device_info=None,
power_on=None,
vifs_already_plugged=None):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
self.assertTrue(vifs_already_plugged)
return mock.MagicMock()
def fake_enable_hairpin():
pass
def fake_get_info(instance):
if powered_on:
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None):
return ""
self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.drvr, 'firewall_driver', fw)
self.stubs.Set(self.drvr, '_create_domain_and_network',
fake_create_domain)
self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
self.stubs.Set(utils, 'get_image_from_system_metadata', lambda *a: {})
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
ins_ref = self._create_instance()
os.mkdir(os.path.join(tmpdir, ins_ref['name']))
libvirt_xml_path = os.path.join(tmpdir,
ins_ref['name'],
'libvirt.xml')
f = open(libvirt_xml_path, 'w')
f.close()
self.drvr.finish_revert_migration(
context.get_admin_context(), ins_ref,
[], None, power_on)
self.assertTrue(self.fake_create_domain_called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(False)
def _test_finish_revert_migration_after_crash(self, backup_made=True,
del_inst_failed=False):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
context = 'fake_context'
instance = self._create_instance()
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'rmtree')
self.mox.StubOutWithMock(utils, 'execute')
self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
self.stubs.Set(self.drvr, '_get_guest_xml',
lambda *a, **k: None)
self.stubs.Set(self.drvr, '_create_domain_and_network',
lambda *a, **kw: None)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
libvirt_utils.get_instance_path(instance).AndReturn('/fake/foo')
os.path.exists('/fake/foo_resize').AndReturn(backup_made)
if backup_made:
if del_inst_failed:
os_error = OSError(errno.ENOENT, 'No such file or directory')
shutil.rmtree('/fake/foo').AndRaise(os_error)
else:
shutil.rmtree('/fake/foo')
utils.execute('mv', '/fake/foo_resize', '/fake/foo')
self.mox.ReplayAll()
self.drvr.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(backup_made=False)
def test_finish_revert_migration_after_crash_delete_failed(self):
self._test_finish_revert_migration_after_crash(backup_made=True,
del_inst_failed=True)
def test_finish_revert_migration_preserves_disk_bus(self):
def fake_get_guest_xml(context, instance, network_info, disk_info,
image_meta, block_device_info=None):
self.assertEqual('ide', disk_info['disk_bus'])
image_meta = {"properties": {"hw_disk_bus": "ide"}}
instance = self._create_instance()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=image_meta),
mock.patch.object(drvr, '_get_guest_xml',
side_effect=fake_get_guest_xml)):
drvr.finish_revert_migration('', instance, None, power_on=False)
def test_cleanup_failed_migration(self):
self.mox.StubOutWithMock(shutil, 'rmtree')
shutil.rmtree('/fake/inst')
self.mox.ReplayAll()
self.drvr._cleanup_failed_migration('/fake/inst')
def test_confirm_migration(self):
ins_ref = self._create_instance()
self.mox.StubOutWithMock(self.drvr, "_cleanup_resize")
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
self.mox.ReplayAll()
self.drvr.confirm_migration("migration_ref", ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_same_host(self):
CONF.set_override('policy_dirs', [])
ins_ref = self._create_instance({'host': CONF.host})
def fake_os_path_exists(path):
return True
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
libvirt_utils.get_instance_path(ins_ref,
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_not_same_host(self):
CONF.set_override('policy_dirs', [])
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
def fake_os_path_exists(path):
return True
def fake_undefine_domain(instance):
pass
def fake_unplug_vifs(instance, network_info, ignore_errors=False):
pass
def fake_unfilter_instance(instance, network_info):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(self.drvr, '_undefine_domain',
fake_undefine_domain)
self.stubs.Set(self.drvr, 'unplug_vifs',
fake_unplug_vifs)
self.stubs.Set(self.drvr.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
libvirt_utils.get_instance_path(ins_ref,
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_get_instance_disk_info_exception(self):
instance = self._create_instance()
class FakeExceptionDomain(FakeVirtDomain):
def __init__(self):
super(FakeExceptionDomain, self).__init__()
def XMLDesc(self, flags):
raise fakelibvirt.libvirtError("Libvirt error")
def fake_get_domain(self, instance):
return FakeExceptionDomain()
self.stubs.Set(host.Host, 'get_domain',
fake_get_domain)
self.assertRaises(exception.InstanceNotFound,
self.drvr.get_instance_disk_info,
instance)
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.lvm.list_volumes')
def test_lvm_disks(self, listlvs, exists):
instance = objects.Instance(uuid='fake-uuid', id=1)
self.flags(images_volume_group='vols', group='libvirt')
exists.return_value = True
listlvs.return_value = ['fake-uuid_foo',
'other-uuid_foo']
disks = self.drvr._lvm_disks(instance)
self.assertEqual(['/dev/vols/fake-uuid_foo'], disks)
def test_is_booted_from_volume(self):
func = libvirt_driver.LibvirtDriver._is_booted_from_volume
instance, disk_mapping = {}, {}
self.assertTrue(func(instance, disk_mapping))
disk_mapping['disk'] = 'map'
self.assertTrue(func(instance, disk_mapping))
instance['image_ref'] = 'uuid'
self.assertFalse(func(instance, disk_mapping))
@mock.patch('nova.virt.netutils.get_injected_network_template')
@mock.patch('nova.virt.disk.api.inject_data')
@mock.patch.object(libvirt_driver.LibvirtDriver, "_conn")
def _test_inject_data(self, driver_params, path, disk_params,
mock_conn, disk_inject_data, inj_network,
called=True):
class ImageBackend(object):
path = '/path'
def check_image_exists(self):
if self.path == '/fail/path':
return False
return True
def get_model(self, connection):
return imgmodel.LocalFileImage(self.path,
imgmodel.FORMAT_RAW)
def fake_inj_network(*args, **kwds):
return args[0] or None
inj_network.side_effect = fake_inj_network
image_backend = ImageBackend()
image_backend.path = path
with mock.patch.object(
self.drvr.image_backend,
'image',
return_value=image_backend):
self.flags(inject_partition=0, group='libvirt')
self.drvr._inject_data(**driver_params)
if called:
disk_inject_data.assert_called_once_with(
mock.ANY,
*disk_params,
partition=None, mandatory=('files',))
self.assertEqual(disk_inject_data.called, called)
def _test_inject_data_default_driver_params(self, **params):
return {
'instance': self._create_instance(params=params),
'network_info': None,
'admin_pass': None,
'files': None,
'suffix': ''
}
def test_inject_data_adminpass(self):
self.flags(inject_password=True, group='libvirt')
driver_params = self._test_inject_data_default_driver_params()
driver_params['admin_pass'] = 'foobar'
disk_params = [
None, # key
None, # net
{}, # metadata
'foobar', # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
# Test with the configuration setted to false.
self.flags(inject_password=False, group='libvirt')
self._test_inject_data(driver_params, "/path",
disk_params, called=False)
def test_inject_data_key(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['instance']['key_data'] = 'key-content'
self.flags(inject_key=True, group='libvirt')
disk_params = [
'key-content', # key
None, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
# Test with the configuration setted to false.
self.flags(inject_key=False, group='libvirt')
self._test_inject_data(driver_params, "/path",
disk_params, called=False)
def test_inject_data_metadata(self):
instance_metadata = {'metadata': {'data': 'foo'}}
driver_params = self._test_inject_data_default_driver_params(
**instance_metadata
)
disk_params = [
None, # key
None, # net
{'data': 'foo'}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_data_files(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['files'] = ['file1', 'file2']
disk_params = [
None, # key
None, # net
{}, # metadata
None, # admin_pass
['file1', 'file2'], # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_data_net(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['network_info'] = {'net': 'eno1'}
disk_params = [
None, # key
{'net': 'eno1'}, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/path", disk_params)
def test_inject_not_exist_image(self):
driver_params = self._test_inject_data_default_driver_params()
disk_params = [
'key-content', # key
None, # net
None, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, "/fail/path",
disk_params, called=False)
def _test_attach_detach_interface(self, method, power_state,
expected_flags):
instance = self._create_instance()
network_info = _fake_network_info(self.stubs, 1)
domain = FakeVirtDomain()
self.mox.StubOutWithMock(host.Host, 'get_domain')
self.mox.StubOutWithMock(self.drvr.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(domain, 'attachDeviceFlags')
self.mox.StubOutWithMock(domain, 'info')
host.Host.get_domain(instance).AndReturn(domain)
if method == 'attach_interface':
self.drvr.firewall_driver.setup_basic_filtering(
instance, [network_info[0]])
if method == 'attach_interface':
fake_image_meta = {'id': instance.image_ref}
elif method == 'detach_interface':
fake_image_meta = None
expected = self.drvr.vif_driver.get_config(
instance, network_info[0], fake_image_meta, instance.flavor,
CONF.libvirt.virt_type)
self.mox.StubOutWithMock(self.drvr.vif_driver,
'get_config')
self.drvr.vif_driver.get_config(
instance, network_info[0],
fake_image_meta,
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type).AndReturn(expected)
domain.info().AndReturn([power_state])
if method == 'attach_interface':
domain.attachDeviceFlags(expected.to_xml(), flags=expected_flags)
elif method == 'detach_interface':
domain.detachDeviceFlags(expected.to_xml(), expected_flags)
self.mox.ReplayAll()
if method == 'attach_interface':
self.drvr.attach_interface(
instance, fake_image_meta, network_info[0])
elif method == 'detach_interface':
self.drvr.detach_interface(
instance, network_info[0])
self.mox.VerifyAll()
def test_attach_interface_with_running_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_pause_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_shutdown_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
def test_detach_interface_with_running_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_pause_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_shutdown_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
def test_rescue(self):
instance = self._create_instance({'config_drive': None})
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
network_info = _fake_network_info(self.stubs, 1)
self.mox.StubOutWithMock(self.drvr,
'_get_existing_domain_xml')
self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
self.mox.StubOutWithMock(self.drvr, '_get_guest_xml')
self.mox.StubOutWithMock(self.drvr, '_destroy')
self.mox.StubOutWithMock(self.drvr, '_create_domain')
self.drvr._get_existing_domain_xml(mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.rescue', 'default'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg()).MultipleTimes()
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
size=None, user_id=mox.IgnoreArg())
image_meta = {'id': 'fake', 'name': 'fake'}
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
image_meta, rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
self.drvr._destroy(instance)
self.drvr._create_domain(mox.IgnoreArg())
self.mox.ReplayAll()
rescue_password = 'fake_password'
self.drvr.rescue(self.context, instance,
network_info, image_meta, rescue_password)
self.mox.VerifyAll()
@mock.patch.object(libvirt_utils, 'get_instance_path')
@mock.patch.object(libvirt_utils, 'load_file')
@mock.patch.object(host.Host, "get_domain")
def test_unrescue(self, mock_get_domain, mock_load_file,
mock_get_instance_path):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='block' device='disk'>"
"<source dev='/dev/some-vg/some-lv'/>"
"<target dev='vda' bus='virtio'/></disk>"
"</devices></domain>")
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake=uuid', id=1)
fake_dom = FakeVirtDomain(fake_xml=dummyxml)
mock_get_domain.return_value = fake_dom
mock_load_file.return_value = "fake_unrescue_xml"
unrescue_xml_path = os.path.join('/path', 'unrescue.xml')
rescue_file = os.path.join('/path', 'rescue.file')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(drvr, '_destroy'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(libvirt_utils, 'file_delete'),
mock.patch.object(drvr, '_lvm_disks',
return_value=['lvm.rescue']),
mock.patch.object(lvm, 'remove_volumes'),
mock.patch.object(glob, 'iglob', return_value=[rescue_file])
) as (mock_destroy, mock_create, mock_del, mock_lvm_disks,
mock_remove_volumes, mock_glob):
drvr.unrescue(instance, None)
mock_destroy.assert_called_once_with(instance)
mock_create.assert_called_once_with("fake_unrescue_xml",
fake_dom)
self.assertEqual(2, mock_del.call_count)
self.assertEqual(unrescue_xml_path,
mock_del.call_args_list[0][0][0])
self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0])
mock_remove_volumes.assert_called_once_with(['lvm.rescue'])
@mock.patch(
'nova.virt.configdrive.ConfigDriveBuilder.add_instance_metadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
def test_rescue_config_drive(self, mock_make, mock_add):
instance = self._create_instance()
uuid = instance.uuid
configdrive_path = uuid + '/disk.config.rescue'
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
network_info = _fake_network_info(self.stubs, 1)
self.mox.StubOutWithMock(self.drvr,
'_get_existing_domain_xml')
self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
self.mox.StubOutWithMock(instance_metadata.InstanceMetadata,
'__init__')
self.mox.StubOutWithMock(self.drvr, '_get_guest_xml')
self.mox.StubOutWithMock(self.drvr, '_destroy')
self.mox.StubOutWithMock(self.drvr, '_create_domain')
self.drvr._get_existing_domain_xml(mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.rescue', 'default'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg()).MultipleTimes()
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
size=None, user_id=mox.IgnoreArg())
instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(),
content=mox.IgnoreArg(),
extra_md=mox.IgnoreArg(),
network_info=mox.IgnoreArg())
image_meta = {'id': 'fake', 'name': 'fake'}
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
image_meta, rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
self.drvr._destroy(instance)
self.drvr._create_domain(mox.IgnoreArg())
self.mox.ReplayAll()
rescue_password = 'fake_password'
self.drvr.rescue(self.context, instance, network_info,
image_meta, rescue_password)
self.mox.VerifyAll()
mock_add.assert_any_call(mock.ANY)
expected_call = [mock.call(os.path.join(CONF.instances_path,
configdrive_path))]
mock_make.assert_has_calls(expected_call)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files(self, get_instance_path, exists, exe,
shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resize(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = [Exception(), None]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')]
self.assertEqual(expected, exe.mock_calls)
shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_failed(self, get_instance_path, exists, exe,
shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
exists.side_effect = [False, False, True, True]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
shutil.assert_called_with('/path_del')
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_mv_failed(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [True, True]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resume(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_none(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [False, False, False, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertEqual(0, len(shutil.mock_calls))
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_concurrent(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = [Exception(), Exception(), None]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')]
expected.append(expected[0])
self.assertEqual(expected, exe.mock_calls)
shutil.assert_called_with('/path_del')
self.assertTrue(result)
def _assert_on_id_map(self, idmap, klass, start, target, count):
self.assertIsInstance(idmap, klass)
self.assertEqual(start, idmap.start)
self.assertEqual(target, idmap.target)
self.assertEqual(count, idmap.count)
def test_get_id_maps(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.virt_type = "lxc"
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(len(idmaps), 4)
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
self._assert_on_id_map(idmaps[2],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[3],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_get_id_maps_not_lxc(self):
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(0, len(idmaps))
def test_get_id_maps_only_uid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = []
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
def test_get_id_maps_only_gid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = []
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_instance_on_disk(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid='fake-uuid', id=1)
self.assertFalse(drvr.instance_on_disk(instance))
def test_instance_on_disk_rbd(self):
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid='fake-uuid', id=1)
self.assertTrue(drvr.instance_on_disk(instance))
def test_get_interfaces(self):
dom_xml = """
<domain type="qemu">
<devices>
<interface type="ethernet">
<mac address="fe:eb:da:ed:ef:ac"/>
<model type="virtio"/>
<target dev="eth0"/>
</interface>
<interface type="bridge">
<mac address="ca:fe:de:ad:be:ef"/>
<model type="virtio"/>
<target dev="br0"/>
</interface>
</devices>
</domain>"""
list_interfaces = ['eth0', 'br0']
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(list_interfaces, drv._get_interfaces(dom_xml))
def test_get_disk_xml(self):
dom_xml = """
<domain type="kvm">
<devices>
<disk type="file">
<source file="disk1_file"/>
<target dev="vda" bus="virtio"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type="block">
<source dev="/path/to/dev/1"/>
<target dev="vdb" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
diska_xml = """<disk type="file" device="disk">
<source file="disk1_file"/>
<target bus="virtio" dev="vda"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>"""
diskb_xml = """<disk type="block" device="disk">
<source dev="/path/to/dev/1"/>
<target bus="virtio" dev="vdb"/>
</disk>"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
# NOTE(gcb): etree.tostring(node) returns an extra line with
# some white spaces, need to strip it.
actual_diska_xml = guest.get_disk('vda').to_xml()
self.assertEqual(diska_xml.strip(), actual_diska_xml.strip())
actual_diskb_xml = guest.get_disk('vdb').to_xml()
self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip())
self.assertIsNone(guest.get_disk('vdc'))
def test_vcpu_model_from_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
vcpu_model = drv._cpu_config_to_vcpu_model(None, None)
self.assertIsNone(vcpu_model)
cpu = vconfig.LibvirtConfigGuestCPU()
feature1 = vconfig.LibvirtConfigGuestCPUFeature()
feature2 = vconfig.LibvirtConfigGuestCPUFeature()
feature1.name = 'sse'
feature1.policy = cpumodel.POLICY_REQUIRE
feature2.name = 'aes'
feature2.policy = cpumodel.POLICY_REQUIRE
cpu.features = set([feature1, feature2])
cpu.mode = cpumodel.MODE_CUSTOM
cpu.sockets = 1
cpu.cores = 2
cpu.threads = 4
vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None)
self.assertEqual(cpumodel.MATCH_EXACT, vcpu_model.match)
self.assertEqual(cpumodel.MODE_CUSTOM, vcpu_model.mode)
self.assertEqual(4, vcpu_model.topology.threads)
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in vcpu_model.features]))
cpu.mode = cpumodel.MODE_HOST_MODEL
vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model)
self.assertEqual(cpumodel.MODE_HOST_MODEL, vcpu_model.mode)
self.assertEqual(vcpu_model, vcpu_model_1)
def test_vcpu_model_to_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
feature = objects.VirtCPUFeature(policy=cpumodel.POLICY_REQUIRE,
name='sse')
feature_1 = objects.VirtCPUFeature(policy=cpumodel.POLICY_FORBID,
name='aes')
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
vcpu_model = objects.VirtCPUModel(mode=cpumodel.MODE_HOST_MODEL,
features=[feature, feature_1],
topology=topo)
cpu = drv._vcpu_model_to_cpu_config(vcpu_model)
self.assertEqual(cpumodel.MODE_HOST_MODEL, cpu.mode)
self.assertEqual(1, cpu.sockets)
self.assertEqual(4, cpu.threads)
self.assertEqual(2, len(cpu.features))
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in cpu.features]))
self.assertEqual(set([cpumodel.POLICY_REQUIRE,
cpumodel.POLICY_FORBID]),
set([f.policy for f in cpu.features]))
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
def setUp(self):
super(LibvirtVolumeUsageTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.ins_ref = objects.Instance(
id=1729,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64'
)
# verify bootable volume device path also
self.bdms = [{'volume_id': 1,
'device_name': '/dev/vde'},
{'volume_id': 2,
'device_name': 'vda'}]
def test_get_all_volume_usage(self):
def fake_block_stats(instance_name, disk):
return (169, 688640, 0, 0, -1)
self.stubs.Set(self.drvr, 'block_stats', fake_block_stats)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
expected_usage = [{'volume': 1,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0},
{'volume': 2,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0}]
self.assertEqual(vol_usage, expected_usage)
def test_get_all_volume_usage_device_not_found(self):
def fake_get_domain(self, instance):
raise exception.InstanceNotFound(instance_id="fakedom")
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
self.assertEqual(vol_usage, [])
class LibvirtNonblockingTestCase(test.NoDBTestCase):
"""Test libvirtd calls are nonblocking."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(connection_uri="test:///default",
group='libvirt')
def test_connection_to_primitive(self):
# Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
drvr = libvirt_driver.LibvirtDriver('')
drvr.set_host_enabled = mock.Mock()
jsonutils.to_primitive(drvr._conn, convert_instances=True)
def test_tpool_execute_calls_libvirt(self):
conn = fakelibvirt.virConnect()
conn.is_expected = True
self.mox.StubOutWithMock(eventlet.tpool, 'execute')
eventlet.tpool.execute(
fakelibvirt.openAuth,
'test:///default',
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(conn)
eventlet.tpool.execute(
conn.domainEventRegisterAny,
None,
fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
mox.IgnoreArg(),
mox.IgnoreArg())
if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'):
eventlet.tpool.execute(
conn.registerCloseCallback,
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
c = driver._get_connection()
self.assertEqual(True, c.is_expected)
class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
"""Tests for libvirtDriver.volume_snapshot_create/delete."""
def setUp(self):
super(LibvirtVolumeSnapshotTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.flags(instance_name_template='instance-%s')
self.flags(qemu_allowed_storage_drivers=[], group='libvirt')
# creating instance
self.inst = {}
self.inst['uuid'] = uuidutils.generate_uuid()
self.inst['id'] = '1'
# create domain info
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
# alternate domain info with network-backed snapshot chain
self.dom_netdisk_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='2'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap-b.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
</backingStore>
</backingStore>
<target dev='vdb' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
</devices>
</domain>
"""
self.create_info = {'type': 'qcow2',
'snapshot_id': '1234-5678',
'new_file': 'new-file'}
self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
self.delete_info_1 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': None}
self.delete_info_2 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'other-snap.img'}
self.delete_info_netdisk = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'root.img'}
self.delete_info_invalid_type = {'type': 'made_up_type',
'file_to_merge': 'some_file',
'merge_target_file':
'some_other_file'}
def tearDown(self):
super(LibvirtVolumeSnapshotTestCase, self).tearDown()
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
'refresh_connection_info')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_id')
def test_volume_refresh_connection_info(self, mock_get_by_volume_id,
mock_refresh_connection_info):
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': '{"fake": "connection_info"}'})
mock_get_by_volume_id.return_value = fake_bdm
self.drvr._volume_refresh_connection_info(self.c, self.inst,
self.volume_uuid)
mock_get_by_volume_id.assert_called_once_with(self.c, self.volume_uuid)
mock_refresh_connection_info.assert_called_once_with(self.c, self.inst,
self.drvr._volume_api, self.drvr)
def test_volume_snapshot_create(self, quiesce=True):
"""Test snapshot creation with file-based disk."""
self.flags(instance_name_template='instance-%s')
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
if quiesce:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
else:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
AndRaise(fakelibvirt.libvirtError(
'quiescing failed, no qemu-ga'))
domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
self.mox.ReplayAll()
self.drvr._volume_snapshot_create(self.c, instance, domain,
self.volume_uuid, new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_libgfapi(self, quiesce=True):
"""Test snapshot creation with libgfapi network disk."""
self.flags(instance_name_template = 'instance-%s')
self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt')
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source protocol='gluster' name='gluster1/volume-1234'>
<host name='127.3.4.5' port='24007'/>
</source>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
if quiesce:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
else:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
AndRaise(fakelibvirt.libvirtError(
'quiescing failed, no qemu-ga'))
domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
self.mox.ReplayAll()
self.drvr._volume_snapshot_create(self.c, instance, domain,
self.volume_uuid, new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_noquiesce(self):
self.test_volume_snapshot_create(quiesce=False)
def test_volume_snapshot_create_outer_success(self):
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._volume_snapshot_create(self.c,
instance,
domain,
self.volume_uuid,
self.create_info['new_file'])
self.drvr._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'creating')
self.mox.StubOutWithMock(self.drvr._volume_api, 'get_snapshot')
self.drvr._volume_api.get_snapshot(self.c,
self.create_info['snapshot_id']).AndReturn({'status': 'available'})
self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info')
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
self.mox.ReplayAll()
self.drvr.volume_snapshot_create(self.c, instance, self.volume_uuid,
self.create_info)
def test_volume_snapshot_create_outer_failure(self):
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._volume_snapshot_create(self.c,
instance,
domain,
self.volume_uuid,
self.create_info['new_file']).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'error')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_create,
self.c,
instance,
self.volume_uuid,
self.create_info)
def test_volume_snapshot_delete_1(self):
"""Deleting newest snapshot -- blockRebase."""
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0, 0)
domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', 0).AndReturn({'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
def test_volume_snapshot_delete_relative_1(self):
"""Deleting newest snapshot -- blockRebase using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0,
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', 0).AndReturn({'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_2(self):
"""Deleting older snapshot -- blockCommit."""
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.drvr._volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_2)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4})
def test_volume_snapshot_delete_relative_2(self):
"""Deleting older snapshot -- blockCommit using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0,
fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', 0).AndReturn({})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_2)
self.mox.VerifyAll()
def test_volume_snapshot_delete_outer_success(self):
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1)
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'deleting')
self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info')
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
self.mox.ReplayAll()
self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_outer_failure(self):
instance = objects.Instance(**self.inst)
snapshot_id = '1234-9876'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_invalid_type(self):
instance = objects.Instance(**self.inst)
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.drvr._volume_api.update_snapshot_status(
self.c, self.snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
self.snapshot_id,
self.delete_info_invalid_type)
def test_volume_snapshot_delete_netdisk_1(self):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vdb', 'vdb[1]', 0, 0)
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
def test_volume_snapshot_delete_netdisk_relative_1(self):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vdb', 'vdb[1]', 0,
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_netdisk_2(self):
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.drvr._volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_netdisk)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4})
def test_volume_snapshot_delete_netdisk_relative_2(self):
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, flags):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(flags=0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0,
fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_netdisk)
self.mox.VerifyAll()
def _fake_convert_image(source, dest, out_format,
run_as_root=True):
libvirt_driver.libvirt_utils.files[dest] = ''
class _BaseSnapshotTests(test.NoDBTestCase):
def setUp(self):
super(_BaseSnapshotTests, self).setUp()
self.flags(snapshots_directory='./', group='libvirt')
self.context = context.get_admin_context()
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self.stubs)
self.mock_update_task_state = mock.Mock()
test_instance = _create_test_instance()
self.instance_ref = objects.Instance(**test_instance)
self.instance_ref.info_cache = objects.InstanceInfoCache(
network_info=None)
def _assert_snapshot(self, snapshot, disk_format,
expected_properties=None):
self.mock_update_task_state.assert_has_calls([
mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
mock.call(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)])
props = snapshot['properties']
self.assertEqual(props['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], disk_format)
self.assertEqual(snapshot['name'], 'test-snap')
if expected_properties:
for expected_key, expected_value in \
six.iteritems(expected_properties):
self.assertEqual(expected_value, props[expected_key])
def _create_image(self, extra_properties=None):
properties = {'instance_id': self.instance_ref['id'],
'user_id': str(self.context.user_id)}
if extra_properties:
properties.update(extra_properties)
sent_meta = {'name': 'test-snap',
'is_public': False,
'status': 'creating',
'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = self.image_service.create(self.context, sent_meta)
return recv_meta
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
@mock.patch.object(host.Host, 'get_domain')
def _snapshot(self, image_id, mock_get_domain, mock_resolve):
mock_get_domain.return_value = FakeVirtDomain()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
driver.snapshot(self.context, self.instance_ref, image_id,
self.mock_update_task_state)
snapshot = self.image_service.show(self.context, image_id)
return snapshot
def _test_snapshot(self, disk_format, extra_properties=None):
recv_meta = self._create_image(extra_properties=extra_properties)
snapshot = self._snapshot(recv_meta['id'])
self._assert_snapshot(snapshot, disk_format=disk_format,
expected_properties=extra_properties)
class LibvirtSnapshotTests(_BaseSnapshotTests):
def test_ami(self):
# Assign different image_ref from nova/images/fakes for testing ami
self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.instance_ref.system_metadata = \
utils.get_system_metadata_from_image(
{'disk_format': 'ami'})
self._test_snapshot(disk_format='ami')
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
def test_raw(self, mock_convert_image):
self._test_snapshot(disk_format='raw')
def test_qcow2(self):
self._test_snapshot(disk_format='qcow2')
def test_no_image_architecture(self):
self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
self._test_snapshot(disk_format='qcow2')
def test_no_original_image(self):
self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa'
self._test_snapshot(disk_format='qcow2')
def test_snapshot_metadata_image(self):
# Assign an image with an architecture defined (x86_64)
self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379'
extra_properties = {'architecture': 'fake_arch',
'key_a': 'value_a',
'key_b': 'value_b',
'os_type': 'linux'}
self._test_snapshot(disk_format='qcow2',
extra_properties=extra_properties)
class LXCSnapshotTests(LibvirtSnapshotTests):
"""Repeat all of the Libvirt snapshot tests, but with LXC enabled"""
def setUp(self):
super(LXCSnapshotTests, self).setUp()
self.flags(virt_type='lxc', group='libvirt')
class LVMSnapshotTests(_BaseSnapshotTests):
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
@mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info')
def _test_lvm_snapshot(self, disk_format, mock_volume_info,
mock_convert_image):
self.flags(images_type='lvm',
images_volume_group='nova-vg', group='libvirt')
self._test_snapshot(disk_format=disk_format)
mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')])
mock_convert_image.assert_called_once_with(
'/dev/nova-vg/lv', mock.ANY, disk_format, run_as_root=True)
def test_raw(self):
self._test_lvm_snapshot('raw')
def test_qcow2(self):
self.flags(snapshot_image_format='qcow2', group='libvirt')
self._test_lvm_snapshot('qcow2')
| double12gzh/nova | nova/tests/unit/virt/libvirt/test_driver.py | Python | apache-2.0 | 606,600 |
"""Extract, format and print information about Python stack traces."""
import linecache
import sys
import types
__all__ = ['extract_stack', 'extract_tb', 'format_exception',
'format_exception_only', 'format_list', 'format_stack',
'format_tb', 'print_exc', 'format_exc', 'print_exception',
'print_last', 'print_stack', 'print_tb', 'tb_lineno']
def _print(file, str='', terminator='\n'):
file.write(str+terminator)
def print_list(extracted_list, file=None):
"""Print the list of tuples as returned by extract_tb() or
extract_stack() as a formatted stack trace to the given file."""
if file is None:
file = sys.stderr
for filename, lineno, name, line in extracted_list:
_print(file,
' File "%s", line %d, in %s' % (filename,lineno,name))
if line:
_print(file, ' %s' % line.strip())
def format_list(extracted_list):
"""Format a list of traceback entry tuples for printing.
Given a list of tuples as returned by extract_tb() or
extract_stack(), return a list of strings ready for printing.
Each string in the resulting list corresponds to the item with the
same index in the argument list. Each string ends in a newline;
the strings may contain internal newlines as well, for those items
whose source text line is not None.
"""
list = []
for filename, lineno, name, line in extracted_list:
item = ' File "%s", line %d, in %s\n' % (filename,lineno,name)
if line:
item = item + ' %s\n' % line.strip()
list.append(item)
return list
def print_tb(tb, limit=None, file=None):
"""Print up to 'limit' stack trace entries from the traceback 'tb'.
If 'limit' is omitted or None, all entries are printed. If 'file'
is omitted or None, the output goes to sys.stderr; otherwise
'file' should be an open file or file-like object with a write()
method.
"""
if file is None:
file = sys.stderr
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
_print(file,
' File "%s", line %d, in %s' % (filename, lineno, name))
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: _print(file, ' ' + line.strip())
tb = tb.tb_next
n = n+1
def format_tb(tb, limit = None):
"""A shorthand for 'format_list(extract_tb(tb, limit))'."""
return format_list(extract_tb(tb, limit))
def extract_tb(tb, limit = None):
"""Return list of up to limit pre-processed entries from traceback.
This is useful for alternate formatting of stack traces. If
'limit' is omitted or None, all entries are extracted. A
pre-processed stack trace entry is a quadruple (filename, line
number, function name, text) representing the information that is
usually printed for a stack trace. The text is a string with
leading and trailing whitespace stripped; if the source is not
available it is None.
"""
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while tb is not None and (limit is None or n < limit):
f = tb.tb_frame
lineno = tb.tb_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
tb = tb.tb_next
n = n+1
return list
def print_exception(etype, value, tb, limit=None, file=None):
"""Print exception up to 'limit' stack trace entries from 'tb' to 'file'.
This differs from print_tb() in the following ways: (1) if
traceback is not None, it prints a header "Traceback (most recent
call last):"; (2) it prints the exception type and value after the
stack trace; (3) if type is SyntaxError and value has the
appropriate format, it prints the line where the syntax error
occurred with a caret on the next line indicating the approximate
position of the error.
"""
if file is None:
file = sys.stderr
if tb:
_print(file, 'Traceback (most recent call last):')
print_tb(tb, limit, file)
lines = format_exception_only(etype, value)
for line in lines:
_print(file, line, '')
def format_exception(etype, value, tb, limit = None):
"""Format a stack trace and the exception information.
The arguments have the same meaning as the corresponding arguments
to print_exception(). The return value is a list of strings, each
ending in a newline and some containing internal newlines. When
these lines are concatenated and printed, exactly the same text is
printed as does print_exception().
"""
if tb:
list = ['Traceback (most recent call last):\n']
list = list + format_tb(tb, limit)
else:
list = []
list = list + format_exception_only(etype, value)
return list
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would raise another exception and mask the original problem.
if (isinstance(etype, BaseException) or
isinstance(etype, types.InstanceType) or
etype is None or type(etype) is str):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "%s", line %d\n' % (filename, lineno))
if badline is not None:
lines.append(' %s\n' % badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(' %s^\n' % ''.join(caretspace))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return str(value)
except Exception:
pass
try:
value = unicode(value)
return value.encode("ascii", "backslashreplace")
except Exception:
pass
return '<unprintable %s object>' % type(value).__name__
def print_exc(limit=None, file=None):
"""Shorthand for 'print_exception(sys.exc_type, sys.exc_value, sys.exc_traceback, limit, file)'.
(In fact, it uses sys.exc_info() to retrieve the same information
in a thread-safe way.)"""
if file is None:
file = sys.stderr
try:
etype, value, tb = sys.exc_info()
print_exception(etype, value, tb, limit, file)
finally:
etype = value = tb = None
def format_exc(limit=None):
"""Like print_exc() but return a string."""
try:
etype, value, tb = sys.exc_info()
return ''.join(format_exception(etype, value, tb, limit))
finally:
etype = value = tb = None
def print_last(limit=None, file=None):
"""This is a shorthand for 'print_exception(sys.last_type,
sys.last_value, sys.last_traceback, limit, file)'."""
if not hasattr(sys, "last_type"):
raise ValueError("no last exception")
if file is None:
file = sys.stderr
print_exception(sys.last_type, sys.last_value, sys.last_traceback,
limit, file)
def print_stack(f=None, limit=None, file=None):
"""Print a stack trace from its invocation point.
The optional 'f' argument can be used to specify an alternate
stack frame at which to start. The optional 'limit' and 'file'
arguments have the same meaning as for print_exception().
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
print_list(extract_stack(f, limit), file)
def format_stack(f=None, limit=None):
"""Shorthand for 'format_list(extract_stack(f, limit))'."""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
return format_list(extract_stack(f, limit))
def extract_stack(f=None, limit = None):
"""Extract the raw traceback from the current stack frame.
The return value has the same format as for extract_tb(). The
optional 'f' and 'limit' arguments have the same meaning as for
print_stack(). Each item in the list is a quadruple (filename,
line number, function name, text), and the entries are in order
from oldest to newest stack frame.
"""
if f is None:
try:
raise ZeroDivisionError
except ZeroDivisionError:
f = sys.exc_info()[2].tb_frame.f_back
if limit is None:
if hasattr(sys, 'tracebacklimit'):
limit = sys.tracebacklimit
list = []
n = 0
while f is not None and (limit is None or n < limit):
lineno = f.f_lineno
co = f.f_code
filename = co.co_filename
name = co.co_name
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
if line: line = line.strip()
else: line = None
list.append((filename, lineno, name, line))
f = f.f_back
n = n+1
list.reverse()
return list
def tb_lineno(tb):
"""Calculate correct line number of traceback given in tb.
Obsolete in 2.3.
"""
return tb.tb_lineno
| jt6562/XX-Net | python27/1.0/lib/traceback.py | Python | bsd-2-clause | 11,573 |
from urllib import quote
from urlparse import urlparse
import glob
import inspect
import os.path
import re
import time
import traceback
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toSafeString, \
toUnicode, sp
from couchpotato.core.helpers.variable import getExt, md5, isLocalIP, scanForPassword, tryInt, getIdentifier
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
import requests
from requests.packages.urllib3 import Timeout
from requests.packages.urllib3.exceptions import MaxRetryError
from tornado import template
from tornado.web import StaticFileHandler
log = CPLog(__name__)
class Plugin(object):
_class_name = None
_database = None
plugin_path = None
enabled_option = 'enabled'
auto_register_static = True
_needs_shutdown = False
_running = None
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:24.0) Gecko/20130519 Firefox/24.0'
http_last_use = {}
http_time_between_calls = 0
http_failed_request = {}
http_failed_disabled = {}
def __new__(cls, *args, **kwargs):
new_plugin = super(Plugin, cls).__new__(cls)
new_plugin.registerPlugin()
return new_plugin
def registerPlugin(self):
addEvent('app.do_shutdown', self.doShutdown)
addEvent('plugin.running', self.isRunning)
self._running = []
if self.auto_register_static:
self.registerStatic(inspect.getfile(self.__class__))
# Setup database
if self._database:
addEvent('database.setup', self.databaseSetup)
def databaseSetup(self):
for index_name in self._database:
klass = self._database[index_name]
fireEvent('database.setup_index', index_name, klass)
def conf(self, attr, value = None, default = None, section = None):
class_name = self.getName().lower().split(':')[0].lower()
return Env.setting(attr, section = section if section else class_name, value = value, default = default)
def deleteConf(self, attr):
return Env._settings.delete(attr, section = self.getName().lower().split(':')[0].lower())
def getName(self):
return self._class_name or self.__class__.__name__
def setName(self, name):
self._class_name = name
def renderTemplate(self, parent_file, templ, **params):
t = template.Template(open(os.path.join(os.path.dirname(parent_file), templ), 'r').read())
return t.generate(**params)
def registerStatic(self, plugin_file, add_to_head = True):
# Register plugin path
self.plugin_path = os.path.dirname(plugin_file)
static_folder = toUnicode(os.path.join(self.plugin_path, 'static'))
if not os.path.isdir(static_folder):
return
# Get plugin_name from PluginName
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__)
class_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
# View path
path = 'static/plugin/%s/' % class_name
# Add handler to Tornado
Env.get('app').add_handlers(".*$", [(Env.get('web_base') + path + '(.*)', StaticFileHandler, {'path': static_folder})])
# Register for HTML <HEAD>
if add_to_head:
for f in glob.glob(os.path.join(self.plugin_path, 'static', '*')):
ext = getExt(f)
if ext in ['js', 'css']:
fireEvent('register_%s' % ('script' if ext in 'js' else 'style'), path + os.path.basename(f), f)
def createFile(self, path, content, binary = False):
path = sp(path)
self.makeDir(os.path.dirname(path))
if os.path.exists(path):
log.debug('%s already exists, overwriting file with new version', path)
try:
f = open(path, 'w+' if not binary else 'w+b')
f.write(content)
f.close()
os.chmod(path, Env.getPermission('file'))
except:
log.error('Unable writing to file "%s": %s', (path, traceback.format_exc()))
if os.path.isfile(path):
os.remove(path)
def makeDir(self, path):
path = sp(path)
try:
if not os.path.isdir(path):
os.makedirs(path, Env.getPermission('folder'))
return True
except Exception as e:
log.error('Unable to create folder "%s": %s', (path, e))
return False
def deleteEmptyFolder(self, folder, show_error = True, only_clean = None):
folder = sp(folder)
for item in os.listdir(folder):
full_folder = os.path.join(folder, item)
if not only_clean or (item in only_clean and os.path.isdir(full_folder)):
for root, dirs, files in os.walk(full_folder):
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
if show_error:
log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
try:
os.rmdir(folder)
except:
if show_error:
log.error('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc()))
# http request
def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True):
url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]")
if not headers: headers = {}
if not data: data = {}
# Fill in some headers
parsed_url = urlparse(url)
host = '%s%s' % (parsed_url.hostname, (':' + str(parsed_url.port) if parsed_url.port else ''))
headers['Referer'] = headers.get('Referer', '%s://%s' % (parsed_url.scheme, host))
headers['Host'] = headers.get('Host', host)
headers['User-Agent'] = headers.get('User-Agent', self.user_agent)
headers['Accept-encoding'] = headers.get('Accept-encoding', 'gzip')
headers['Connection'] = headers.get('Connection', 'keep-alive')
headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0')
r = Env.get('http_opener')
# Don't try for failed requests
if self.http_failed_disabled.get(host, 0) > 0:
if self.http_failed_disabled[host] > (time.time() - 900):
log.info2('Disabled calls to %s for 15 minutes because so many failed requests.', host)
if not show_error:
raise Exception('Disabled calls to %s for 15 minutes because so many failed requests')
else:
return ''
else:
del self.http_failed_request[host]
del self.http_failed_disabled[host]
self.wait(host)
try:
kwargs = {
'headers': headers,
'data': data if len(data) > 0 else None,
'timeout': timeout,
'files': files,
'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates..
}
method = 'post' if len(data) > 0 or files else 'get'
log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data'))
response = r.request(method, url, **kwargs)
if response.status_code == requests.codes.ok:
data = response.content
else:
response.raise_for_status()
self.http_failed_request[host] = 0
except (IOError, MaxRetryError, Timeout):
if show_error:
log.error('Failed opening url in %s: %s %s', (self.getName(), url, traceback.format_exc(0)))
# Save failed requests by hosts
try:
if not self.http_failed_request.get(host):
self.http_failed_request[host] = 1
else:
self.http_failed_request[host] += 1
# Disable temporarily
if self.http_failed_request[host] > 5 and not isLocalIP(host):
self.http_failed_disabled[host] = time.time()
except:
log.debug('Failed logging failed requests for %s: %s', (url, traceback.format_exc()))
raise
self.http_last_use[host] = time.time()
return data
def wait(self, host = ''):
if self.http_time_between_calls == 0:
return
now = time.time()
last_use = self.http_last_use.get(host, 0)
if last_use > 0:
wait = (last_use - now) + self.http_time_between_calls
if wait > 0:
log.debug('Waiting for %s, %d seconds', (self.getName(), wait))
time.sleep(wait)
def beforeCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__))
def afterCall(self, handler):
self.isRunning('%s.%s' % (self.getName(), handler.__name__), False)
def doShutdown(self):
self.shuttingDown(True)
return True
def shuttingDown(self, value = None):
if value is None:
return self._needs_shutdown
self._needs_shutdown = value
def isRunning(self, value = None, boolean = True):
if value is None:
return self._running
if boolean:
self._running.append(value)
else:
try:
self._running.remove(value)
except:
log.error("Something went wrong when finishing the plugin function. Could not find the 'is_running' key")
def getCache(self, cache_key, url = None, **kwargs):
use_cache = not len(kwargs.get('data', {})) > 0 and not kwargs.get('files')
if use_cache:
cache_key_md5 = md5(cache_key)
cache = Env.get('cache').get(cache_key_md5)
if cache:
if not Env.get('dev'): log.debug('Getting cache %s', cache_key)
return cache
if url:
try:
cache_timeout = 300
if 'cache_timeout' in kwargs:
cache_timeout = kwargs.get('cache_timeout')
del kwargs['cache_timeout']
data = self.urlopen(url, **kwargs)
if data and cache_timeout > 0 and use_cache:
self.setCache(cache_key, data, timeout = cache_timeout)
return data
except:
if not kwargs.get('show_error', True):
raise
log.debug('Failed getting cache: %s', (traceback.format_exc(0)))
return ''
def setCache(self, cache_key, value, timeout = 300):
cache_key_md5 = md5(cache_key)
log.debug('Setting cache %s', cache_key)
Env.get('cache').set(cache_key_md5, value, timeout)
return value
def createNzbName(self, data, media):
release_name = data.get('name')
tag = self.cpTag(media)
# Check if password is filename
name_password = scanForPassword(data.get('name'))
if name_password:
release_name, password = name_password
tag += '{{%s}}' % password
elif data.get('password'):
tag += '{{%s}}' % data.get('password')
max_length = 127 - len(tag) # Some filesystems don't support 128+ long filenames
return '%s%s' % (toSafeString(toUnicode(release_name)[:max_length]), tag)
def createFileName(self, data, filedata, media):
name = self.createNzbName(data, media)
if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '</nzb>' not in filedata:
return '%s.%s' % (name, 'rar')
return '%s.%s' % (name, data.get('protocol'))
def cpTag(self, media):
if Env.setting('enabled', 'renamer'):
identifier = getIdentifier(media)
return '.cp(' + identifier + ')' if identifier else ''
return ''
def checkFilesChanged(self, files, unchanged_for = 60):
now = time.time()
file_too_new = False
file_time = []
for cur_file in files:
# File got removed while checking
if not os.path.isfile(cur_file):
file_too_new = now
break
# File has changed in last 60 seconds
file_time = self.getFileTimes(cur_file)
for t in file_time:
if t > now - unchanged_for:
file_too_new = tryInt(time.time() - t)
break
if file_too_new:
break
if file_too_new:
try:
time_string = time.ctime(file_time[0])
except:
try:
time_string = time.ctime(file_time[1])
except:
time_string = 'unknown'
return file_too_new, time_string
return False, None
def getFileTimes(self, file_path):
return [os.path.getmtime(file_path), os.path.getctime(file_path) if os.name != 'posix' else 0]
def isDisabled(self):
return not self.isEnabled()
def isEnabled(self):
return self.conf(self.enabled_option) or self.conf(self.enabled_option) is None
| adaxi/couchpotato | couchpotato/core/plugins/base.py | Python | gpl-3.0 | 13,577 |
#!/usr/local/bin/python
import argparse
import sys
import os
import time
import datetime
import tempfile
import commands
from jira.client import JIRA
def get_jira_config():
# read the config file
home=jira_home=os.getenv('HOME')
home=home.rstrip('/')
jira_config = dict(line.strip().split('=') for line in open(home + '/jira.ini'))
return jira_config
def get_jira():
options = {
'server': 'https://issues.apache.org/jira'
}
jira_config = get_jira_config()
jira = JIRA(options,basic_auth=(jira_config['user'], jira_config['password']))
return jira
def cmd_exists(cmd):
status, result = commands.getstatusoutput(cmd)
return status
def main():
''' main(), shut up, pylint '''
popt = argparse.ArgumentParser(description='Sqoop patch review tool')
popt.add_argument('-b', '--branch', action='store', dest='branch', required=True, help='Tracking branch to create diff against')
popt.add_argument('-j', '--jira', action='store', dest='jira', required=True, help='JIRA corresponding to the reviewboard')
popt.add_argument('-s', '--summary', action='store', dest='summary', required=False, help='Summary for the reviewboard')
popt.add_argument('-d', '--description', action='store', dest='description', required=False, help='Description for reviewboard')
popt.add_argument('-r', '--rb', action='store', dest='reviewboard', required=False, help='Review board that needs to be updated')
popt.add_argument('-t', '--testing-done', action='store', dest='testing', required=False, help='Text for the Testing Done section of the reviewboard')
popt.add_argument('-db', '--debug', action='store_true', required=False, help='Enable debug mode')
opt = popt.parse_args()
post_review_tool = None
if (cmd_exists("post-review") == 0):
post_review_tool = "post-review"
elif (cmd_exists("rbt") == 0):
post_review_tool = "rbt post"
else:
print "please install RBTools"
sys.exit(1)
patch_file=tempfile.gettempdir() + "/" + opt.jira + ".patch"
if opt.reviewboard:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M:%S')
patch_file=tempfile.gettempdir() + "/" + opt.jira + '_' + st + '.patch'
# first check if rebase is needed
git_branch_hash="git rev-parse " + opt.branch
p_now=os.popen(git_branch_hash)
branch_now=p_now.read()
p_now.close()
git_common_ancestor="git merge-base " + opt.branch + " HEAD"
p_then=os.popen(git_common_ancestor)
branch_then=p_then.read()
p_then.close()
if branch_now != branch_then:
print 'ERROR: Your current working branch is from an older version of ' + opt.branch + '. Please rebase first by using git pull --rebase'
sys.exit(1)
git_configure_reviewboard="git config reviewboard.url https://reviews.apache.org"
print "Configuring reviewboard url to https://reviews.apache.org"
p=os.popen(git_configure_reviewboard)
p.close()
git_remote_update="git remote update"
print "Updating your remote branches to pull the latest changes"
p=os.popen(git_remote_update)
p.close()
rb_command= post_review_tool + " --publish --tracking-branch " + opt.branch + " --target-groups=Sqoop --bugs-closed=" + opt.jira
if opt.debug:
rb_command=rb_command + " --debug"
summary="Patch for " + opt.jira
if opt.summary:
summary=opt.summary
rb_command=rb_command + " --summary \"" + summary + "\""
if opt.description:
rb_command=rb_command + " --description \"" + opt.description + "\""
if opt.reviewboard:
rb_command=rb_command + " -r " + opt.reviewboard
if opt.testing:
rb_command=rb_command + " --testing-done=" + opt.testing
if opt.debug:
print rb_command
p=os.popen(rb_command)
rb_url=""
for line in p:
print line
if line.startswith('http'):
rb_url = line
elif line.startswith("There don't seem to be any diffs"):
print 'ERROR: Your reviewboard was not created/updated since there was no diff to upload. The reasons that can cause this issue are 1) Your diff is not checked into your local branch. Please check in the diff to the local branch and retry 2) You are not specifying the local branch name as part of the --branch option. Please specify the remote branch name obtained from git branch -r'
p.close()
sys.exit(1)
elif line.startswith("Your review request still exists, but the diff is not attached") and not opt.debug:
print 'ERROR: Your reviewboard was not created/updated. Please run the script with the --debug option to troubleshoot the problem'
p.close()
sys.exit(1)
if p.close() != None:
print 'ERROR: reviewboard update failed. Exiting.'
sys.exit(1)
if opt.debug:
print 'rb url=',rb_url
git_command="git format-patch " + opt.branch + " --stdout > " + patch_file
if opt.debug:
print git_command
p=os.popen(git_command)
p.close()
print 'Creating diff against', opt.branch, 'and uploading patch to JIRA',opt.jira
jira=get_jira()
issue = jira.issue(opt.jira)
attachment=open(patch_file)
jira.add_attachment(issue,attachment)
attachment.close()
comment="Created reviewboard "
if not opt.reviewboard:
print 'Created a new reviewboard',rb_url,
else:
print 'Updated reviewboard',rb_url
comment="Updated reviewboard "
comment = comment + rb_url + ' against branch ' + opt.branch
jira.add_comment(opt.jira, comment)
#update the JIRA status to PATCH AVAILABLE
transitions = jira.transitions(issue)
jira_transitions ={}
for t in transitions:
jira_transitions[t['name']] = t['id']
jira_config = get_jira_config()
if('Submit Patch' in jira_transitions):
jira.transition_issue(issue, jira_transitions['Submit Patch'] , assignee={'name': jira_config['user']} )
if __name__ == '__main__':
sys.exit(main())
| dlanza1/parquet-mr | sqoop-patch-review.py | Python | apache-2.0 | 5,767 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import logging
import re
from babelfish import Language, language_converters
from guessit import guessit
from requests import Session
from subliminal.cache import EPISODE_EXPIRATION_TIME, region
from subliminal.exceptions import ProviderError
from subliminal.matches import guess_matches
from subliminal.providers import ParserBeautifulSoup, Provider
from subliminal.score import get_equivalent_release_groups
from subliminal.subtitle import Subtitle, fix_line_ending
from subliminal.utils import sanitize, sanitize_release_group
from subliminal.video import Episode
logger = logging.getLogger(__name__)
language_converters.register('subtitulamos = medusa.subtitle_providers.converters.subtitulamos:SubtitulamosConverter')
class SubtitulamosSubtitle(Subtitle):
"""Subtitulamos Subtitle."""
provider_name = 'subtitulamos'
def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, year, version,
download_link):
super(SubtitulamosSubtitle, self).__init__(language, hearing_impaired, page_link)
self.page_link = page_link
self.series = series
self.season = season
self.episode = episode
self.title = title
self.year = year
self.version = version
self.download_link = download_link
@property
def id(self):
return self.download_link
def get_matches(self, video):
matches = set()
# series name
if video.series and sanitize(self.series) == sanitize(video.series):
matches.add('series')
# season
if video.season and self.season == video.season:
matches.add('season')
# episode
if video.episode and self.episode == video.episode:
matches.add('episode')
# title of the episode
if video.title and sanitize(self.title) == sanitize(video.title):
matches.add('title')
# year
if video.original_series and self.year is None or video.year and video.year == self.year:
matches.add('year')
# release_group
if (video.release_group and self.version
and any(r in sanitize_release_group(self.version)
for r in get_equivalent_release_groups(
sanitize_release_group(video.release_group)))):
matches.add('release_group')
# resolution
if video.resolution and self.version and video.resolution in self.version.lower():
matches.add('resolution')
# other properties
matches |= guess_matches(video, guessit(self.version), partial=True)
return matches
class SubtitulamosProvider(Provider):
"""Subtitulamos Provider."""
languages = {Language('por', 'BR')} | {Language(l) for l in [
'cat', 'eng', 'glg', 'por', 'spa'
]}
video_types = (Episode,)
server_url = 'https://www.subtitulamos.tv/'
search_url = server_url + 'search/query'
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = self.user_agent
def terminate(self):
self.session.close()
@region.cache_on_arguments(expiration_time=EPISODE_EXPIRATION_TIME)
def _search_url_titles(self, series, season, episode, year=None):
"""Search the URL titles by kind for the given `title`, `season` and `episode`.
:param str series: series to search for.
:param int season: season to search for.
:param int episode: episode to search for.
:param int year: year to search for.
:return: the episode URL.
:rtype: str
"""
# make the search
logger.info('Searching episode url for %s, season %d, episode %d', series, season, episode)
episode_url = None
search = '{} {}x{}'.format(series, season, episode)
r = self.session.get(self.search_url, headers={'Referer': self.server_url}, params={'q': search}, timeout=10)
r.raise_for_status()
if r.status_code != 200:
logger.warning('Error getting episode url')
raise ProviderError('%s: Error getting episode url', self.__class__.__name__.upper())
results = json.loads(r.text)
for result in results:
title = sanitize(result['name'])
# attempt series with year
if sanitize('{} ({})'.format(series, year)) in title:
for episode_data in result['episodes']:
if season == episode_data['season'] and episode == episode_data['number']:
episode_url = self.server_url + 'episodes/{}'.format(episode_data['id'])
logger.info('Episode url found with year %s', episode_url)
return episode_url
# attempt series without year
elif sanitize(series) in title:
for episode_data in result['episodes']:
if season == episode_data['season'] and episode == episode_data['number']:
episode_url = self.server_url + 'episodes/{}'.format(episode_data['id'])
logger.info('Episode url found without year %s', episode_url)
return episode_url
return episode_url
def query(self, series, season, episode, year=None):
# get the episode url
episode_url = self._search_url_titles(series, season, episode, year)
if episode_url is None:
logger.info('No episode url found for %s, season %d, episode %d', series, season, episode)
return []
r = self.session.get(episode_url, headers={'Referer': self.server_url}, timeout=10)
r.raise_for_status()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# get episode title
logger.debug('Getting episode title')
title_pattern = re.compile('{}x{:02d} - (.+)'.format(season, episode))
title = title_pattern.search(soup.select('.episode-name')[0].get_text(strip=True).lower()).group(1)
logger.debug('Episode title found: "%s"', title.upper())
subtitles = []
for sub in soup.find_all('div', attrs={'id': 'progress_buttons_row'}):
# read the language
language = Language.fromsubtitulamos(
sub.find_previous('div', class_='subtitle_language').get_text(strip=True)
)
hearing_impaired = False
# modify spanish latino subtitle language to only spanish and set hearing_impaired = True
# because if exists spanish and spanish latino subtitle for the same episode, the score will be
# higher with spanish subtitle. Spanish subtitle takes priority.
if language == Language('spa', 'MX'):
language = Language('spa')
hearing_impaired = True
# read the release subtitle
release = sub.find_next('div', class_='version_name').get_text(strip=True)
# ignore incomplete subtitles
status = sub.find_next('div', class_='subtitle_buttons').contents[1]
# if there isn't <a> tag, subtitle not finished and no link available to download it
if status.name != 'a':
logger.info('Ignoring subtitle in [%s] because it is not finished', language)
continue
# read the subtitle url
subtitle_url = self.server_url + status['href'][1:]
subtitle = SubtitulamosSubtitle(language, hearing_impaired, episode_url, series, season, episode, title,
year, release, subtitle_url)
logger.info('Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
return [s for s in self.query(video.series, video.season, video.episode,
video.year)
if s.language in languages]
def download_subtitle(self, subtitle):
# download the subtitle
logger.info('Downloading subtitle %s', subtitle.download_link)
r = self.session.get(subtitle.download_link, headers={'Referer': subtitle.page_link},
timeout=10)
r.raise_for_status()
subtitle.content = fix_line_ending(r.content)
| pymedusa/Medusa | medusa/subtitle_providers/subtitulamos.py | Python | gpl-3.0 | 8,471 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.servicedirectory_v1beta1.types import endpoint
from google.cloud.servicedirectory_v1beta1.types import endpoint as gcs_endpoint
from google.cloud.servicedirectory_v1beta1.types import namespace
from google.cloud.servicedirectory_v1beta1.types import namespace as gcs_namespace
from google.cloud.servicedirectory_v1beta1.types import registration_service
from google.cloud.servicedirectory_v1beta1.types import service
from google.cloud.servicedirectory_v1beta1.types import service as gcs_service
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import RegistrationServiceTransport, DEFAULT_CLIENT_INFO
from .grpc import RegistrationServiceGrpcTransport
class RegistrationServiceGrpcAsyncIOTransport(RegistrationServiceTransport):
"""gRPC AsyncIO backend transport for RegistrationService.
Service Directory API for registering services. It defines the
following resource model:
- The API has a collection of
[Namespace][google.cloud.servicedirectory.v1beta1.Namespace]
resources, named ``projects/*/locations/*/namespaces/*``.
- Each Namespace has a collection of
[Service][google.cloud.servicedirectory.v1beta1.Service]
resources, named
``projects/*/locations/*/namespaces/*/services/*``.
- Each Service has a collection of
[Endpoint][google.cloud.servicedirectory.v1beta1.Endpoint]
resources, named
``projects/*/locations/*/namespaces/*/services/*/endpoints/*``.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "servicedirectory.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def create_namespace(
self,
) -> Callable[
[registration_service.CreateNamespaceRequest],
Awaitable[gcs_namespace.Namespace],
]:
r"""Return a callable for the create namespace method over gRPC.
Creates a namespace, and returns the new namespace.
Returns:
Callable[[~.CreateNamespaceRequest],
Awaitable[~.Namespace]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_namespace" not in self._stubs:
self._stubs["create_namespace"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/CreateNamespace",
request_serializer=registration_service.CreateNamespaceRequest.serialize,
response_deserializer=gcs_namespace.Namespace.deserialize,
)
return self._stubs["create_namespace"]
@property
def list_namespaces(
self,
) -> Callable[
[registration_service.ListNamespacesRequest],
Awaitable[registration_service.ListNamespacesResponse],
]:
r"""Return a callable for the list namespaces method over gRPC.
Lists all namespaces.
Returns:
Callable[[~.ListNamespacesRequest],
Awaitable[~.ListNamespacesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_namespaces" not in self._stubs:
self._stubs["list_namespaces"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/ListNamespaces",
request_serializer=registration_service.ListNamespacesRequest.serialize,
response_deserializer=registration_service.ListNamespacesResponse.deserialize,
)
return self._stubs["list_namespaces"]
@property
def get_namespace(
self,
) -> Callable[
[registration_service.GetNamespaceRequest], Awaitable[namespace.Namespace]
]:
r"""Return a callable for the get namespace method over gRPC.
Gets a namespace.
Returns:
Callable[[~.GetNamespaceRequest],
Awaitable[~.Namespace]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_namespace" not in self._stubs:
self._stubs["get_namespace"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/GetNamespace",
request_serializer=registration_service.GetNamespaceRequest.serialize,
response_deserializer=namespace.Namespace.deserialize,
)
return self._stubs["get_namespace"]
@property
def update_namespace(
self,
) -> Callable[
[registration_service.UpdateNamespaceRequest],
Awaitable[gcs_namespace.Namespace],
]:
r"""Return a callable for the update namespace method over gRPC.
Updates a namespace.
Returns:
Callable[[~.UpdateNamespaceRequest],
Awaitable[~.Namespace]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_namespace" not in self._stubs:
self._stubs["update_namespace"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/UpdateNamespace",
request_serializer=registration_service.UpdateNamespaceRequest.serialize,
response_deserializer=gcs_namespace.Namespace.deserialize,
)
return self._stubs["update_namespace"]
@property
def delete_namespace(
self,
) -> Callable[
[registration_service.DeleteNamespaceRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the delete namespace method over gRPC.
Deletes a namespace. This also deletes all services
and endpoints in the namespace.
Returns:
Callable[[~.DeleteNamespaceRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_namespace" not in self._stubs:
self._stubs["delete_namespace"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/DeleteNamespace",
request_serializer=registration_service.DeleteNamespaceRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_namespace"]
@property
def create_service(
self,
) -> Callable[
[registration_service.CreateServiceRequest], Awaitable[gcs_service.Service]
]:
r"""Return a callable for the create service method over gRPC.
Creates a service, and returns the new service.
Returns:
Callable[[~.CreateServiceRequest],
Awaitable[~.Service]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_service" not in self._stubs:
self._stubs["create_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/CreateService",
request_serializer=registration_service.CreateServiceRequest.serialize,
response_deserializer=gcs_service.Service.deserialize,
)
return self._stubs["create_service"]
@property
def list_services(
self,
) -> Callable[
[registration_service.ListServicesRequest],
Awaitable[registration_service.ListServicesResponse],
]:
r"""Return a callable for the list services method over gRPC.
Lists all services belonging to a namespace.
Returns:
Callable[[~.ListServicesRequest],
Awaitable[~.ListServicesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_services" not in self._stubs:
self._stubs["list_services"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/ListServices",
request_serializer=registration_service.ListServicesRequest.serialize,
response_deserializer=registration_service.ListServicesResponse.deserialize,
)
return self._stubs["list_services"]
@property
def get_service(
self,
) -> Callable[[registration_service.GetServiceRequest], Awaitable[service.Service]]:
r"""Return a callable for the get service method over gRPC.
Gets a service.
Returns:
Callable[[~.GetServiceRequest],
Awaitable[~.Service]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_service" not in self._stubs:
self._stubs["get_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/GetService",
request_serializer=registration_service.GetServiceRequest.serialize,
response_deserializer=service.Service.deserialize,
)
return self._stubs["get_service"]
@property
def update_service(
self,
) -> Callable[
[registration_service.UpdateServiceRequest], Awaitable[gcs_service.Service]
]:
r"""Return a callable for the update service method over gRPC.
Updates a service.
Returns:
Callable[[~.UpdateServiceRequest],
Awaitable[~.Service]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_service" not in self._stubs:
self._stubs["update_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/UpdateService",
request_serializer=registration_service.UpdateServiceRequest.serialize,
response_deserializer=gcs_service.Service.deserialize,
)
return self._stubs["update_service"]
@property
def delete_service(
self,
) -> Callable[
[registration_service.DeleteServiceRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the delete service method over gRPC.
Deletes a service. This also deletes all endpoints
associated with the service.
Returns:
Callable[[~.DeleteServiceRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_service" not in self._stubs:
self._stubs["delete_service"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/DeleteService",
request_serializer=registration_service.DeleteServiceRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_service"]
@property
def create_endpoint(
self,
) -> Callable[
[registration_service.CreateEndpointRequest], Awaitable[gcs_endpoint.Endpoint]
]:
r"""Return a callable for the create endpoint method over gRPC.
Creates an endpoint, and returns the new endpoint.
Returns:
Callable[[~.CreateEndpointRequest],
Awaitable[~.Endpoint]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_endpoint" not in self._stubs:
self._stubs["create_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/CreateEndpoint",
request_serializer=registration_service.CreateEndpointRequest.serialize,
response_deserializer=gcs_endpoint.Endpoint.deserialize,
)
return self._stubs["create_endpoint"]
@property
def list_endpoints(
self,
) -> Callable[
[registration_service.ListEndpointsRequest],
Awaitable[registration_service.ListEndpointsResponse],
]:
r"""Return a callable for the list endpoints method over gRPC.
Lists all endpoints.
Returns:
Callable[[~.ListEndpointsRequest],
Awaitable[~.ListEndpointsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_endpoints" not in self._stubs:
self._stubs["list_endpoints"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/ListEndpoints",
request_serializer=registration_service.ListEndpointsRequest.serialize,
response_deserializer=registration_service.ListEndpointsResponse.deserialize,
)
return self._stubs["list_endpoints"]
@property
def get_endpoint(
self,
) -> Callable[
[registration_service.GetEndpointRequest], Awaitable[endpoint.Endpoint]
]:
r"""Return a callable for the get endpoint method over gRPC.
Gets an endpoint.
Returns:
Callable[[~.GetEndpointRequest],
Awaitable[~.Endpoint]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_endpoint" not in self._stubs:
self._stubs["get_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/GetEndpoint",
request_serializer=registration_service.GetEndpointRequest.serialize,
response_deserializer=endpoint.Endpoint.deserialize,
)
return self._stubs["get_endpoint"]
@property
def update_endpoint(
self,
) -> Callable[
[registration_service.UpdateEndpointRequest], Awaitable[gcs_endpoint.Endpoint]
]:
r"""Return a callable for the update endpoint method over gRPC.
Updates an endpoint.
Returns:
Callable[[~.UpdateEndpointRequest],
Awaitable[~.Endpoint]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_endpoint" not in self._stubs:
self._stubs["update_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/UpdateEndpoint",
request_serializer=registration_service.UpdateEndpointRequest.serialize,
response_deserializer=gcs_endpoint.Endpoint.deserialize,
)
return self._stubs["update_endpoint"]
@property
def delete_endpoint(
self,
) -> Callable[
[registration_service.DeleteEndpointRequest], Awaitable[empty_pb2.Empty]
]:
r"""Return a callable for the delete endpoint method over gRPC.
Deletes an endpoint.
Returns:
Callable[[~.DeleteEndpointRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_endpoint" not in self._stubs:
self._stubs["delete_endpoint"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/DeleteEndpoint",
request_serializer=registration_service.DeleteEndpointRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_endpoint"]
@property
def get_iam_policy(
self,
) -> Callable[[iam_policy_pb2.GetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the IAM Policy for a resource (namespace or
service only).
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_iam_policy" not in self._stubs:
self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/GetIamPolicy",
request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["get_iam_policy"]
@property
def set_iam_policy(
self,
) -> Callable[[iam_policy_pb2.SetIamPolicyRequest], Awaitable[policy_pb2.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the IAM Policy for a resource (namespace or
service only).
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "set_iam_policy" not in self._stubs:
self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/SetIamPolicy",
request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString,
response_deserializer=policy_pb2.Policy.FromString,
)
return self._stubs["set_iam_policy"]
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
]:
r"""Return a callable for the test iam permissions method over gRPC.
Tests IAM permissions for a resource (namespace or
service only).
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "test_iam_permissions" not in self._stubs:
self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary(
"/google.cloud.servicedirectory.v1beta1.RegistrationService/TestIamPermissions",
request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString,
)
return self._stubs["test_iam_permissions"]
def close(self):
return self.grpc_channel.close()
__all__ = ("RegistrationServiceGrpcAsyncIOTransport",)
| googleapis/python-service-directory | google/cloud/servicedirectory_v1beta1/services/registration_service/transports/grpc_asyncio.py | Python | apache-2.0 | 33,087 |
# -*- coding: utf-8 -*-
from django import template
from ..models.news import News
from ..models.news import Author
register = template.Library()
@register.inclusion_tag("django_newspaper/sidebar/authors.html")
def show_authors():
context_dict = dict()
authors_list = []
for author in Author.objects.all():
if author.is_author:
authors_list.append(author)
context_dict['authors'] = authors_list
return context_dict
| PaulWebbster/django-newspaper | newspaper/templatetags/django_newspaper_extras.py | Python | mit | 461 |
class MyMetaClass(type):
def __new__(cls, name, bases, ns):
ns['kw_created_by_metaclass'] = lambda self, arg: arg.upper()
return type.__new__(cls, name, bases, ns)
def method_in_metaclass(cls):
pass
class MetaClassLibrary(metaclass=MyMetaClass):
def greet(self, name):
return 'Hello %s!' % name
| moto-timo/robotframework | atest/testresources/testlibs/newstyleclasses3.py | Python | apache-2.0 | 344 |
# copied from http://www.roguebasin.com/index.php?title=Bresenham%27s_Line_Algorithm#Python
def get_line(x1, y1, x2, y2):
points = []
issteep = abs(y2-y1) > abs(x2-x1)
if issteep:
x1, y1 = y1, x1
x2, y2 = y2, x2
rev = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
rev = True
deltax = x2 - x1
deltay = abs(y2-y1)
error = int(deltax / 2)
y = y1
ystep = None
if y1 < y2:
ystep = 1
else:
ystep = -1
for x in range(x1, x2 + 1):
if issteep:
points.append((y, x))
else:
points.append((x, y))
error -= deltay
if error < 0:
y += ystep
error += deltax
# Reverse the list if the coordinates were reversed
if rev:
points.reverse()
return points
def draw_dots(chars, dots):
# find out average of Y-coordinate, for all of these in the invisible middle line
bottom = top = 0
for dot in dots:
if dot[1] > 8:
bottom += 1
if dot[1] < 8:
top += 1
print "We have %d bottom pixels and %d top pixels" % (bottom, top)
for dot in dots:
if dot[1] == 8 and dot[0] != 8: # invisible line
dot = (dot[0], dot[1]-1 if bottom < top or (bottom == top and dot[0] < 8) else dot[1]+1) # bugfix suitable for clock (pointer is rotated around the center)
if dot[0] < 5 and dot[1] < 8:
chars[0][dot[1]] |= (1<<(4-dot[0]))
elif 5 < dot[0] < 11 and dot[1] < 8:
chars[1][dot[1]] |= (1<<(4-(dot[0]-6)))
elif 11 < dot[0] < 17 and dot[1] < 8:
chars[2][dot[1]] |= (1<<(4-(dot[0]-12)))
elif dot[0] < 5 and 8 < dot[1]:
chars[3][dot[1]-9] |= (1<<(4-dot[0]))
elif 5 < dot[0] < 11 and 8 < dot[1]:
chars[4][dot[1]-9] |= (1<<(4-(dot[0]-6)))
elif 10 < dot[0] < 17 and 8 < dot[1]:
chars[5][dot[1]-9] |= (1<<(4-(dot[0]-12))) | Endres/piboard | draw.py | Python | gpl-2.0 | 1,676 |
# -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='crdppf',
version='3.1.0',
description='SITN, public law restriction portal core',
author='sitn',
author_email='sitn@ne.ch',
url='http://www.ne.ch/sitn',
install_requires=[
'c2c.template',
'dogpile.cache',
'httplib2',
'jstools',
'papyrus',
'pyramid_tm',
'sqlahelper',
],
packages=find_packages(exclude=['ez_setup']),
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts': [
"iconizer = crdppf.utilities.iconizer:main",
],
'paste.app_factory': [
'main = crdppf_core:main',
],
},
)
| voisardf/crdppf_core | setup.py | Python | gpl-3.0 | 923 |
import sys
from mmse15project.views.Administration import Administration
from mmse15project.views.CustomerService import CustomerService
from mmse15project.views.Financial import Financial
from mmse15project.views.HR import HR
from mmse15project.views.Marketing import Marketing
from mmse15project.views.Production import Production
from mmse15project.views.Service import Service
from mmse15project.views.TopManagement import TopManagement
from mmse15project.model.Account import AccountTeam
from mmse15project.model.Account import AccountType
from mmse15project.model.Account import Account
from mmse15project.model.Client import Client
from mmse15project.model.Request import Request
from mmse15project.model.RequestDetails import RequestDetails
from mmse15project.model.Discount import Discount
from mmse15project.model.Task import Task
from mmse15project.model.RecruitmentRequest import RecruitmentRequest
from mmse15project.model.FinancialRequest import FinancialRequest
import tkinter as tk
class MainController:
def __init__(self, model, view):
self.model = model
self.view = view
def set_frame(self, frame_class, account=None):
self.clear_frame(self.view.container) # clear container
if account is None:
frame = frame_class(self.view.container, self.model, self)
else:
frame = frame_class(self.view.container, self.model, self,
AccountTeam(account.getAccountTeam()).name,
AccountType(account.getAccountType()).name,
account.getEmail())
frame.pack()
def clear_frame(self, frame):
for widget in frame.winfo_children():
widget.destroy()
def login_auth(self, login):
account = self.model.account_db.login(login.e1.get()+"@sep.se", login.e2.get())
if account is False:
login.fail()
else:
team_name = AccountTeam(account.getAccountTeam()).name
self.set_frame(getattr(sys.modules[__name__], team_name), account)
def login_try_again(self, login):
login.auth()
def login_quit(self):
sys.exit()
###########################################################################
# Submit forms
###########################################################################
def str_to_enum(self, str):
if str == "Administration":
return AccountTeam.Administration
elif str == "HR":
return AccountTeam.HR
elif str == "Customer Service":
return AccountTeam.CustomerService
elif str == "Marketing":
return AccountTeam.Marketing
elif str == "Financial":
return AccountTeam.Financial
elif str == "Production":
return AccountTeam.Production
elif str == "Service":
return AccountTeam.Service
elif str == "Top Management":
return AccountTeam.TopManagement
elif str == "Employee":
return AccountType.Employee
elif str == "Senior":
return AccountType.Senior
elif str == "Manager":
return AccountType.Manager
else:
return None
def new_client_create(self, subview):
test = Client()
test.setEmail(subview.e1.get())
exist = self.model.client_db.get(test)
if exist is False:
subview.form.create_widgets()
else:
subview.form.already_exist()
def new_client_submit(self, subview):
data = subview.get_all()
new= Client()
new.setName(data[0])
new.setEmail(data[1])
new.setAddress(data[2])
new.setPostalCode(data[3])
new.setCity(data[4])
new.setBirthDate(data[5])
self.model.client_db.add(new)
self.clear_frame(subview)
def new_request_create(self, subview):
if self.model.client_db.getByID(int(subview.e1.get())) is False:
subview.form.not_found()
else:
subview.form.create_widgets()
def new_request_submit(self, subview):
data = subview.get_all()
new = Request()
new.setClientID(data[0])
new.setEventType(data[1])
new.setStartDate(data[2])
new.setEndDate(data[3])
new.setExpectedParticipants(data[4])
new.setPreferences(data[5])
new.setExpectedBudget(data[6])
new.comment = data[7]
self.model.request_db.add(new)
self.clear_frame(subview)
def new_request_details_create(self, subview):
request = self.model.request_db.getByID(subview.e1.get())
if request is not False and request.getStatus() == 5:
subview.form.create_widgets()
else:
subview.form.no_request_found()
def new_request_details_submit(self, subview):
data = subview.get_all()
# add request details
new = RequestDetails()
new.setAll(data)
self.model.request_details_db.add(new)
# update request status
request = self.model.request_db.getByID(new.getID())
request.setStatus(6)
self.model.request_db.update(request)
self.clear_frame(subview)
def new_task_create(self, subview):
if self.model.request_db.getByID(int(subview.e1.get())) is False:
subview.form.not_found()
else:
subview.form.create_widgets()
def new_task_submit(self, subview):
data = subview.get_all()
new = Task()
new.requestID = data[0]
new.description = data[1]
new.operator = data[2] + "@sep.se"
new.priority = data[3]
new.deadline= data[4]
new.comment = data[5]
self.model.task_db.add(new)
self.clear_frame(subview)
def new_recruitment_requests_submit(self, subview):
data = subview.get_all()
new = RecruitmentRequest()
new.date = data[0]
new.department = data[1]
new.title = data[2]
new.description = data[3]
self.model.recruitment_request_db.add(new)
self.clear_frame(subview)
subview.create_widgets()
def new_financial_request_create(self, subview):
if self.model.request_db.getByID(int(subview.e1.get())) is False:
subview.form.not_found()
else:
subview.form.create_widgets()
def new_financial_request_submit(self, subview):
data = subview.get_all()
new = FinancialRequest()
new.department = data[0]
new.requestID = data[1]
new.amount = int(data[2])
new.reason = data[3]
self.model.financial_request_db.add(new)
self.clear_frame(subview.master)
subview.master.create_widgets()
def new_discount_create(self, subview):
subview.form.create_widgets()
def new_discount_submit(self,subview):
data = subview.get_all()
new = Discount()
new.requestID = int(data[0])
new.amount = int(data[1])
new.comment = data[2]
new.date = data[3]
self.model.discount_db.add(new)
self.clear_frame(subview)
###########################################################################
# Search
###########################################################################
def search_client_get(self,subview):
subview.result.create_widgets()
def search_request_get(self,subview):
subview.result.create_widgets()
def search_request_details_get(self,subview):
subview.result.create_widgets()
def search_tasks_get(self, subview):
subview.result.create_widgets()
def search_recruitment_request_get(self,subview):
subview.result.create_widgets()
def search_financial_request_get(self,subview):
subview.result.create_widgets()
def search_discount_get(self,subview):
subview.result.create_widgets()
###########################################################################
# Update
###########################################################################
def pending_requests_update(self, subview):
subview.update.create_widgets()
def pending_tasks_update(self, subview):
subview.update.create_widgets()
def pending_recruitment_request_update(self, subview):
subview.update.create_widgets()
def pending_financial_request_update(self, subview):
subview.update.create_widgets()
###########################################################################
# Approve/reject
###########################################################################
def search_request_approve(self, subview):
status = subview.request.getStatus()
subview.request.comment = subview.e8.get(1.0, tk.END)[:-1]
subview.request.setStatus(status+1)
self.model.request_db.update(subview.request)
self.clear_frame(subview)
subview.create_widgets()
def search_request_reject(self, subview):
subview.request.setStatus(0)
subview.request.comment = subview.e8.get(1.0, tk.END)[:-1]
self.model.request_db.update(subview.request)
self.clear_frame(subview)
subview.create_widgets()
def search_task_update(self, subview):
subview.t.comment = subview.e8.get(1.0, tk.END)[:-1]
self.model.task_db.update(subview.t)
self.clear_frame(subview)
subview.create_widgets()
def search_tasks_approve(self, subview):
subview.t.status += 1
self.model.task_db.update(subview.t)
self.clear_frame(subview)
subview.create_widgets()
def search_recruitment_request_approve(self, subview):
subview.r.status = 2
self.model.recruitment_request_db.update(subview.r)
self.clear_frame(subview)
subview.create_widgets()
def search_recruitment_request_reject(self, subview):
subview.r.status = 3
self.model.recruitment_request_db.update(subview.r)
self.clear_frame(subview)
subview.create_widgets()
def search_financial_request_decide(self, subview, new_status):
subview.f.status = new_status
self.model.financial_request_db.update(subview.f)
self.clear_frame(subview)
subview.create_widgets()
###########################################################################
# Edit account
###########################################################################
def new_account_create(self, subview):
test = Account()
test.setEmail(subview.e1.get() + "@sep.se")
exist = self.model.account_db.get(test)
if exist is False:
subview.form.create_widgets()
else:
subview.form.already_exist()
def new_account_submit(self, subview):
data = subview.get_all()
data[1] += "@sep.se"
data[3] = self.str_to_enum(data[3]).value
data[4] = self.str_to_enum(data[4]).value
new = Account()
new.setName(data[0])
new.setEmail(data[1])
new.setPassword(data[2])
new.setAccountTeam(data[3])
new.setAccountType(data[4])
new.setDepartment(data[5])
new.setComment(data[6])
self.model.account_db.add(new)
self.clear_frame(subview)
def config_save(self, subview):
data = subview.get_all()
new = Account()
new.setName(data[0])
new.setEmail(data[1])
new.setPassword(data[2])
new.setAccountTeam(data[3])
new.setAccountType(data[4])
new.setDepartment(data[5])
new.setComment(data[6])
self.model.account_db.update(new) | rssalessio/MMSE15Project-RussoJohansson | mmse15project/ctrls/MainController.py | Python | gpl-2.0 | 11,737 |
"""
TESTS is a dict with all you tests.
Keys for this will be categories' names.
Each test is dict with
"input" -- input data for user function
"answer" -- your right answer
"explanation" -- not necessary key, it's using for additional info in animation.
"""
TESTS = {
"Basics": [
{"input": [[0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0]],
"answer": 394,
"explanation": ""},
{"input": [[0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0]],
"answer": 394,
"explanation": " 3,1 3,5 0,10 "},
],
"Clear": [
{"input": [[0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0]],
"answer": 123,
"explanation": ""},
{"input": [[0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0]],
"answer": 456,
"explanation": ""},
{"input": [[0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0]],
"answer": 789,
"explanation": ""},
{"input": [[0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0]],
"answer": 1034,
"explanation": ""},
{"input": [[0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0]],
"answer": 52678,
"explanation": ""},
{"input": [[0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0]],
"answer": 911,
"explanation": ""},
{"input": [[0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0]],
"answer": 777,
"explanation": ""},
{"input": [[0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0]],
"answer": 21312,
"explanation": ""},
{"input": [[0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0]],
"answer": 80808,
"explanation": ""},
],
"Noise": [
{"input": [[0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0]],
"answer": 123,
"explanation": " 1,3 1,5 2,11 "},
{"input": [[0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0]],
"answer": 456,
"explanation": " 4,2 3,5 3,9 "},
{"input": [[0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0]],
"answer": 789,
"explanation": " 2,2 2,6 1,10 "},
{"input": [[0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0]],
"answer": 1034,
"explanation": " 1,1 4,7 1,10 2,14 "},
{"input": [[0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0]],
"answer": 52678,
"explanation": " 2,1 2,5 1,9 3,15 2,17 "},
{"input": [[0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0]],
"answer": 911,
"explanation": " 4,1 4,6 0,10 "},
{"input": [[0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0]],
"answer": 777,
"explanation": " 1,2 1,5 2,9 "},
{"input": [[0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0]],
"answer": 21312,
"explanation": " 3,3 3,5 0,11 1,14 4,17 "},
{"input": [[0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0]],
"answer": 80808,
"explanation": " 1,2 4,5 3,10 0,15 2,18 "},
]
}
| Bryukh-Checkio-Tasks/checkio-task-mono-captcha | verification/tests.py | Python | gpl-2.0 | 8,473 |
# -*- coding: utf-8 -*-
############################################################################
#
# Copyright (C) 2012-2015
# Xavier Izard
#
# This file is part of DXF2GCODE.
#
# DXF2GCODE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DXF2GCODE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DXF2GCODE. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################
class CustomGCode(object):
"""
This class contains a "custom gcode" object. Custom GCode objects are part
of a layer (layercontent.py) and are used to insert custom GCode into the
generated file.
Custom GCodes are defined in the config file
@purpose: store user defined GCode
"""
def __init__(self, name, nr, gcode, parentLayer):
"""
Standard method to initialize the class
@param name: the name of the GCode, as defined in the config file
@param gcode: the user defined gcode
@param parent: The parent layer Class of the shape
"""
self.name = name
self.nr = nr
self.gcode = gcode
self.parentLayer = parentLayer
self.disabled = False
self.send_to_TSP = False # Never optimize path for CustomGCode
def __str__(self):
"""
Standard method to print the object
@return: A string
"""
return "\nCustomGCode" +\
"\nname: %s" % self.name +\
"\nnr: %i" % self.nr +\
"\ngcode: %s" % self.gcode
def setDisable(self, flag=False):
"""
Function to modify the disable property
@param flag: The flag to enable or disable Selection
"""
self.disabled = flag
def isDisabled(self):
"""
Returns the state of self.disabled
"""
return self.disabled
def Write_GCode(self, PostPro):
"""
This method returns the string to be exported for this custom gcode
@param PostPro: this is the Postprocessor class including the methods to export
"""
return self.gcode
| hehongyu1995/Dxf2GCode | core/customgcode.py | Python | gpl-3.0 | 2,598 |
#!/usr/bin/env python3
import argparse
import json
from copy import deepcopy
def main(input_file, output_file, plz_range=range(40210,40721)):
geodata = read_jsonfile(input_file)
output = {"type": "FeatureCollection", "crs": None, "features": []}
for f in range(len(geodata["features"])):
try:
for p in plz_range:
if geodata["features"][f]["properties"]["plz"] == str(p):
output["features"].append(deepcopy(geodata["features"][f]))
break
except:
pass
write_jsonfile(output_file, output)
def read_jsonfile(file):
jsondata = {}
try:
with open(file) as data_file:
jsondata = json.load(data_file)
data_file.close()
except:
print("Couldn't read json file")
raise
return jsondata
def write_jsonfile(file, content):
try:
with open(file, "w") as data_file:
data_file.write(json.dumps(content, ensure_ascii=False))
data_file.close()
except:
print("Couldn't write json file")
raise
def parse_args():
parser = argparse.ArgumentParser(
description="filter geoip file for range of zip codes (Postleitzahl)")
parser.add_argument("--input_file", type=str, required=True,
help="json file to read from")
parser.add_argument("--output_file", type=str, required=True,
help="json file to write into")
parser.add_argument("--plz_range", nargs="+", required=False,
help="list of zip codes to filter (space seperated)")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
main(input_file=args.input_file, output_file=args.output_file, plz_range=args.plz_range )
| ffrl/ff-tools | tunneldigger/geojson-plz-filter.py | Python | gpl-2.0 | 1,806 |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_RECORD_QUERIES = True
INFOGRAPHICS_DIR = '/static/img/infographics/'
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(app):
Config.init_app(app)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig
}
| marcoagner/bitcoinfographics | config.py | Python | mit | 1,107 |
import time
import gobject
import gtk
import pango
from bot_procman.printf_t import printf_t
DEFAULT_MAX_KB_PER_SECOND = 500
ANSI_CODES_TO_TEXT_TAG_PROPERTIES = { \
"1" : ("weight", pango.WEIGHT_BOLD),
"2" : ("weight", pango.WEIGHT_LIGHT),
"4" : ("underline", pango.UNDERLINE_SINGLE),
"30" : ("foreground", "black"),
"31" : ("foreground", "red"),
"32" : ("foreground", "green"),
"33" : ("foreground", "yellow"),
"34" : ("foreground", "blue"),
"35" : ("foreground", "magenta"),
"36" : ("foreground", "cyan"),
"37" : ("foreground", "white"),
"40" : ("background", "black"),
"41" : ("background", "red"),
"42" : ("background", "green"),
"43" : ("background", "yellow"),
"44" : ("background", "blue"),
"45" : ("background", "magenta"),
"46" : ("background", "cyan"),
"47" : ("background", "white"),
}
def now_str (): return time.strftime ("[%H:%M:%S] ")
class CommandExtraData(object):
def __init__ (self, text_tag_table):
self.tb = gtk.TextBuffer (text_tag_table)
self.printf_keep_count = [ 0, 0, 0, 0, 0, 0 ]
self.printf_drop_count = 0
class SheriffCommandConsole(gtk.ScrolledWindow):
def __init__(self, _sheriff, lc):
super(SheriffCommandConsole, self).__init__()
self.stdout_maxlines = 2000
self.max_kb_per_sec = 0
self.max_chars_per_2500_ms = 0
self.sheriff = _sheriff
# stdout textview
self.stdout_textview = gtk.TextView ()
self.stdout_textview.set_property ("editable", False)
self.sheriff_tb = self.stdout_textview.get_buffer ()
self.add (self.stdout_textview)
stdout_adj = self.get_vadjustment ()
stdout_adj.set_data ("scrolled-to-end", 1)
stdout_adj.connect ("changed", self.on_adj_changed)
stdout_adj.connect ("value-changed", self.on_adj_value_changed)
# add callback so we can add a clear option to the default right click popup
self.stdout_textview.connect ("populate-popup", self.on_tb_populate_menu)
# set some default appearance parameters
self.font_str = "Monospace 10"
self.set_font(self.font_str)
self.base_color = gtk.gdk.Color(65535, 65535, 65535)
self.text_color = gtk.gdk.Color(0, 0, 0)
self.set_background_color(self.base_color)
self.set_text_color(self.text_color)
# stdout rate limit maintenance events
gobject.timeout_add (500, self._stdout_rate_limit_upkeep)
self.sheriff.command_added.connect(self._on_sheriff_command_added)
self.sheriff.command_removed.connect(self._on_sheriff_command_removed)
self.sheriff.command_status_changed.connect(self._on_sheriff_command_status_changed)
self._cmd_extradata = {}
lc.subscribe ("PMD_PRINTF", self.on_procman_printf)
self.text_tags = { "normal" : gtk.TextTag("normal") }
for tt in self.text_tags.values():
self.sheriff_tb.get_tag_table().add(tt)
self.set_output_rate_limit(DEFAULT_MAX_KB_PER_SECOND)
def get_background_color(self):
return self.base_color
def get_text_color(self):
return self.text_color
def get_font(self):
return self.font_str
def set_background_color(self, color):
self.base_color = color
self.stdout_textview.modify_base(gtk.STATE_NORMAL, color)
self.stdout_textview.modify_base(gtk.STATE_ACTIVE, color)
self.stdout_textview.modify_base(gtk.STATE_PRELIGHT, color)
def set_text_color(self, color):
self.text_color = color
self.stdout_textview.modify_text(gtk.STATE_NORMAL, color)
self.stdout_textview.modify_text(gtk.STATE_ACTIVE, color)
self.stdout_textview.modify_text(gtk.STATE_PRELIGHT, color)
def set_font(self, font_str):
self.font_str = font_str
self.stdout_textview.modify_font(pango.FontDescription(font_str))
def _stdout_rate_limit_upkeep (self):
for cmd in self.sheriff.get_all_commands ():
extradata = self._cmd_extradata.get(cmd, None)
if not extradata:
continue
if extradata.printf_drop_count:
deputy = self.sheriff.get_command_deputy (cmd)
self._add_text_to_buffer (extradata.tb, now_str() +
"\nSHERIFF RATE LIMIT: Ignored %d bytes of output\n" %
(extradata.printf_drop_count))
self._add_text_to_buffer (self.sheriff_tb, now_str() +
"Ignored %d bytes of output from [%s] [%s]\n" % \
(extradata.printf_drop_count, deputy.name, cmd.command_id))
extradata.printf_keep_count.pop (0)
extradata.printf_keep_count.append (0)
extradata.printf_drop_count = 0
return True
def _tag_from_seg(self, seg):
esc_seq, seg = seg.split("m", 1)
if not esc_seq:
esc_seq = "0"
key = esc_seq
codes = esc_seq.split(";")
if len(codes) > 0:
codes.sort()
key = ";".join(codes)
if key not in self.text_tags:
tag = gtk.TextTag(key)
for code in codes:
if code in ANSI_CODES_TO_TEXT_TAG_PROPERTIES:
propname, propval = ANSI_CODES_TO_TEXT_TAG_PROPERTIES[code]
tag.set_property(propname, propval)
self.sheriff_tb.get_tag_table().add(tag)
self.text_tags[key] = tag
return self.text_tags[key], seg
def _add_text_to_buffer (self, tb, text):
if not text:
return
# interpret text as ANSI escape sequences? Try to format colors...
tag = self.text_tags["normal"]
for segnum, seg in enumerate(text.split("\x1b[")):
if not seg:
continue
if segnum > 0:
try:
tag, seg = self._tag_from_seg(seg)
except ValueError:
pass
end_iter = tb.get_end_iter()
tb.insert_with_tags(end_iter, seg, tag)
# toss out old text if the muffer is getting too big
num_lines = tb.get_line_count ()
if num_lines > self.stdout_maxlines:
start_iter = tb.get_start_iter ()
chop_iter = tb.get_iter_at_line (num_lines - self.stdout_maxlines)
tb.delete (start_iter, chop_iter)
# Sheriff event handlers
def _on_sheriff_command_added (self, deputy, command):
extradata = CommandExtraData (self.sheriff_tb.get_tag_table())
self._cmd_extradata[command] = extradata
self._add_text_to_buffer (self.sheriff_tb, now_str() +
"Added [%s] [%s] [%s]\n" % (deputy.name, command.command_id, command.exec_str))
def _on_sheriff_command_removed (self, deputy, command):
del self._cmd_extradata[command]
self._add_text_to_buffer (self.sheriff_tb, now_str() +
"[%s] removed [%s] [%s]\n" % (deputy.name, command.command_id, command.exec_str))
def _on_sheriff_command_status_changed (self, cmd,
old_status, new_status):
self._add_text_to_buffer (self.sheriff_tb, now_str() +
"[%s] new status: %s\n" % (cmd.command_id, new_status))
def on_tb_populate_menu(self,textview, menu):
sep = gtk.SeparatorMenuItem()
menu.append (sep)
sep.show()
mi = gtk.MenuItem ("_Clear")
menu.append(mi)
mi.connect ("activate", self._tb_clear)
mi.show()
def _tb_clear(self,menu):
tb = self.stdout_textview.get_buffer ()
start_iter = tb.get_start_iter ()
end_iter = tb.get_end_iter ()
tb.delete (start_iter, end_iter)
def set_output_rate_limit(self, max_kb_per_sec):
self.max_kb_per_sec = max_kb_per_sec
self.max_chars_per_2500_ms = int(max_kb_per_sec * 1000 * 2.5)
def get_output_rate_limit(self):
return self.max_kb_per_sec
def load_settings(self, save_map):
if "console_rate_limit" in save_map:
self.set_output_rate_limit(save_map["console_rate_limit"])
if "console_background_color" in save_map:
self.set_background_color(gtk.gdk.Color(save_map["console_background_color"]))
if "console_text_color" in save_map:
self.set_text_color(gtk.gdk.Color(save_map["console_text_color"]))
if "console_font" in save_map:
self.set_font(save_map["console_font"])
def save_settings(self, save_map):
save_map["console_rate_limit"] = self.max_kb_per_sec
save_map["console_background_color"] = self.base_color.to_string()
save_map["console_text_color"] = self.text_color.to_string()
save_map["console_font"] = self.font_str
def on_adj_changed (self, adj):
if adj.get_data ("scrolled-to-end"):
adj.set_value (adj.upper - adj.page_size)
def on_adj_value_changed (self, adj):
adj.set_data ("scrolled-to-end", adj.value == adj.upper-adj.page_size)
def on_procman_printf (self, channel, data):
msg = printf_t.decode (data)
if msg.sheriff_id:
try:
cmd = self.sheriff.get_command_by_sheriff_id(msg.sheriff_id)
except KeyError:
# TODO
return
extradata = self._cmd_extradata.get(cmd, None)
if not extradata:
return
# rate limit
msg_count = sum (extradata.printf_keep_count)
if msg_count >= self.max_chars_per_2500_ms:
extradata.printf_drop_count += len (msg.text)
return
tokeep = min (self.max_chars_per_2500_ms - msg_count, len (msg.text))
extradata.printf_keep_count[-1] += tokeep
if len (msg.text) > tokeep:
toadd = msg.text[:tokeep]
else:
toadd = msg.text
self._add_text_to_buffer (extradata.tb, toadd)
def show_command_buffer(self, cmd):
extradata = self._cmd_extradata.get(cmd, None)
if extradata:
self.stdout_textview.set_buffer (extradata.tb)
def show_sheriff_buffer(self):
self.stdout_textview.set_buffer (self.sheriff_tb)
| Morbotic/pronto-distro | externals/libbot-drc/bot2-procman/python/src/bot_procman/sheriff_gtk/command_console.py | Python | lgpl-2.1 | 10,381 |
__author__ = 'lorcat'
import re
import imp
from PyQt4 import QtGui, QtCore
from app.gui.ui.ui_profiledialog import Ui_ProfileDialog
from app.common import Tester
from app.config.keys import *
from app.config import configuration as config
# class enabling profile selection
class ProfileDialog(QtGui.QDialog, Tester):
# icon
PROGRAM_ICON = 'program_icon.png'
def __init__(self, parent=None):
super(ProfileDialog, self).__init__(parent=parent)
Tester.__init__(self)
self.debug("Initialization")
self.__init_variables()
self.__init_ui()
self.__init_events()
def __init_variables(self):
"""
Initializes variables used within the class and as output
:return:
"""
self.debug("Initialization of variables")
self.__error = False
# store profiles as (QFileInfo)
self._profiles = []
# gui
self._ui = None
# result - loaded module
self._module = None
@property
def module(self):
return self._module
@property
def error_state(self):
return self.__error
def __init_ui(self, path=None):
"""
Initialization of the gui, fills elements with data
:return:
"""
self.debug("Initialization of UI")
# path for the profiles
path = config.PROFILES[PROFILE_DIR]
self.debug("Profile path ({})".format(path))
# make an icon
image_path = self._provideImagePath()
if image_path is not None:
pixmap = QtGui.QPixmap(image_path.absoluteFilePath())
self.setWindowIcon(QtGui.QIcon(pixmap))
self._ui = Ui_ProfileDialog()
self._ui.setupUi(self)
dir = QtCore.QDir(path)
# get iterator on files
dirit = QtCore.QDirIterator(dir, QtCore.QDirIterator.NoIteratorFlags)
# get string list to process profiles
sl = QtCore.QStringList()
# parse directory structure, find passing files profile*.py
# get information from these files
while dirit.hasNext():
next = dirit.next()
finfo = QtCore.QFileInfo(next)
if finfo.isFile() and re.match(".*profile[^\\\/]*.py$", str(finfo.filePath()).lower()):
mod = self._loadModule(finfo)
if self.test(mod):
self.info("Found a profile ({})".format(finfo.absoluteFilePath()))
try:
sl.append(mod.START[PROFILE_NAME])
except KeyError:
sl.append(finfo.baseName())
self._profiles.append(finfo)
if type(sl) is QtCore.QStringList:
if len(sl):
self._ui.lbPath.setText(dir.absolutePath())
self._ui.lbPath.setToolTip("Path: {}".format(dir.absolutePath()))
self._ui.lwFiles.insertItems(0, sl)
self._ui.lwFiles.setCurrentRow(0)
def __init_events(self):
"""
Initialization of events working inside gui
:return:
"""
self.debug("Initialization of events")
self.connect(self._ui.lwFiles, QtCore.SIGNAL("currentRowChanged(int)"), self.processProfileSelection)
self.connect(self, QtCore.SIGNAL("finished(int)"), self.processExit)
def processProfileSelection(self, index):
"""
Processes selection of loaded module name
:param index: int('index of self._profiles')
:return:
"""
self.debug("Processes profile selection by index ({})".format(index))
def _loadModule(self, finfo):
"""
Loads specific modules
:param finfo: QFileInfo()
:return: module('loaded')
"""
self.debug("Loads a module based on QFileInfo ({})".format(finfo.absolutePath()))
res = None
name = str(finfo.baseName())
fp = pathname = desc = None
try:
fp, pathname, desc = imp.find_module(name, [str(finfo.absolutePath())])
except ImportError:
self.error("Error: cannot load profile '{}', please check path '{}'".format(name, finfo.absolutePath()))
self.__error = True
return
if self.test(fp):
try:
res = imp.load_module(name, fp, pathname, desc)
finally:
if fp:
fp.close()
return res
def processExit(self, code):
"""
Function to load specific module on exit
:param code: int('index of profile to load')
:return:
"""
berror = False
if self.test(code):
index = int(self._ui.lwFiles.currentIndex().row())
if index >-1:
mod = self._loadModule(self._profiles[index])
if self.test(mod):
self._module = mod
else:
berror = True
else:
berror = True
if berror:
self.error("No profile has been selected. Aborting..")
QtGui.QApplication.instance().quit()
self.deleteLater()
def _provideImagePath(self):
"""
Provides a reference to QFileInfo containing image file path for icon
:param path: str()
:return: None or QFileInfo()
"""
res = None
path = config.RESOURCES[RESOURCE_IMAGES]
dir = QtCore.QDir(path)
temp = QtCore.QFileInfo()
temp.setFile(dir, self.PROGRAM_ICON)
if temp.isFile():
res = temp
else:
self.error("{}. No image file is present at the path ({})".format(self.__class__.__name__, path))
return res
| lorcat/GEPACE5000 | app/gui/gui_profiledialog.py | Python | gpl-2.0 | 5,746 |
from __future__ import unicode_literals, print_function
import io
from decimal import Decimal
import simplejson
BS4_PARSER = 'html5lib'
def read_json(path):
with open(path, 'r') as f:
return simplejson.loads(f.read(), use_decimal=True)
def write_json(path, data, dumps_params):
value = simplejson.dumps(data, **dumps_params)
with io.open(path, 'w', encoding='utf-8') as f:
f.write(value)
return len(value)
def parse_decimal(s):
return Decimal(s.replace(',', '.'))
class ParseResultFix(object):
def __init__(self, route_name, text, action):
self.route_name = route_name
self.text = text
self.action = action
def interact(self):
print(self.text)
while True:
result = input('Apply (y/n): ')
if result == 'y':
return True
elif result == 'n':
return False
def apply(self, routes):
self.action(routes)
def interact_apply(self, routes):
if self.interact():
self.apply(routes)
return True
return False
| karamanolev/mtb-index | mtbbg/utils.py | Python | apache-2.0 | 1,119 |
#####################################################################
# -*- coding: utf-8 -*- #
# #
# Frets on Fire #
# Copyright (C) 2006 Sami Kyöstilä #
# 2008 myfingershurt #
# 2008 Glorandwarf #
# 2008 evilynux <evilynux@gmail.com> #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
import logging
import os
import glob
import random
from fretwork.audio import Sound
from fofix.core.Font import Font
from fofix.core.Image import ImgDrawing
from fofix.core import Config
from fofix.core import Version
from fofix.core import Player
log = logging.getLogger(__name__)
# these constants define a few customized letters in the default font
# MFH - with the new simplified Font.py, no more custom glyphs... let's do
# a simple replacement here for now...
STAR1 = ' '
STAR2 = '*'
LEFT = '<'
RIGHT = '>'
STAR3 = STAR1
STAR4 = STAR2
#-STAR1 = unicode('\x10')
#-STAR2 = unicode('\x11')
#-LEFT = unicode('\x12')
#-RIGHT = unicode('\x13')
#-STAR3 = unicode('\x14') #Worldrave - Added new Star3
#-STAR4 = unicode('\x15') #Worldrave - Added new Star4
class Data(object):
"""A collection of globally used data resources such as fonts and sound effects."""
def __init__(self, resource, svg):
self.logClassInits = Config.get("game", "log_class_inits")
if self.logClassInits == 1:
log.debug("Data class init (Data.py)...")
self.logLoadings = Config.get("game", "log_loadings")
self.logImageNotFound = Config.get("log", "log_image_not_found")
self.resource = resource
self.svg = svg
self.sfxVolume = Config.get("audio", "SFX_volume")
self.crowdVolume = Config.get("audio", "crowd_volume")
# Get theme
themename = Config.get("coffee", "themename")
self.themeLabel = themename
self.themeCoOp = False
self.players = None
self.players = Player.loadPlayers()
# myfingershurt: check for existence of theme path
themepath = os.path.join(Version.dataPath(), "themes")
self.themepath = themepath
self.path = Version.dataPath()
if not self.checkImgDrawing(os.path.join("themes", themename, "notes", "notes.png")):
# myfingershurt: here need to ensure an existing theme is selected
themes = []
defaultTheme = None # myfingershurt
allthemes = os.listdir(themepath)
for name in allthemes:
if self.checkImgDrawing(os.path.join("themes", name, "notes", "notes.png")):
themes.append(name)
if name == "MegaLight V4": # myfingershurt
defaultTheme = name # myfingershurt
if defaultTheme != "MegaLight V4": # myfingershurt
defaultTheme = themes[0] # myfingershurt
# not a valid theme if notes.png isn't there! Force default theme:
Config.set("coffee", "themename", defaultTheme)
# re-init Data with new default
themename = defaultTheme
self.themeLabel = themename
if not os.path.exists(os.path.join(Version.dataPath(), "themes", themename, "vocals")):
self.vocalPath = "vocals"
else:
self.vocalPath = os.path.join("themes", themename, "vocals")
self.theme = 2
self.themeCoOp = True
# from our current viewport's constant 3:4 aspect ratio (which is
# always stretched to fill the video resolution)
self.fontScreenBottom = 0.75
self.loadPartImages()
# myfingershurt: multi-OS compatibility file access fixes using os.path.join()
# load font customization images
# Worldrave - Use new defined Star3 and star4. Using star1 and star2 as
# a fallback.
# MFH - no more custom glyphs, these are wasting memory.
# MFH - but we do need these star1-4 images anyway. Leaving them
# loaded here in the Data object.
self.loadImgDrawing(self, "star1", os.path.join(
"themes", themename, "star1.png"), textureSize=(128, 128))
self.loadImgDrawing(self, "star2", os.path.join(
"themes", themename, "star2.png"), textureSize=(128, 128))
# MFH - let's not rely on errors here if we don't have to...
if not self.loadImgDrawing(self, "star3", os.path.join("themes", themename, "star3.png"), textureSize=(128, 128)):
self.star3 = self.star1
if not self.loadImgDrawing(self, "star4", os.path.join("themes", themename, "star4.png"), textureSize=(128, 128)):
self.star4 = self.star2
if self.loadImgDrawing(self, "starPerfect", os.path.join("themes", themename, "starperfect.png"), textureSize=(128, 128)):
self.perfectStars = True
self.maskStars = False
else:
self.starPerfect = self.star2
self.fcStars = False
self.starFC = self.star2
self.maskStars = True
self.perfectStars = False
if self.perfectStars:
if self.loadImgDrawing(self, "starFC", os.path.join("themes", themename, "starfc.png"), textureSize=(128, 128)):
self.fcStars = True
else:
self.starFC = self.starPerfect
self.fcStars = False
# load misc images
self.loadImgDrawing(self, "loadingImage", os.path.join(
"themes", themename, "loadingbg.png"), textureSize=(256, 256))
self.loadImgDrawing(self, "optionsBG", os.path.join(
"themes", themename, "menu", "optionsbg.png"))
if self.loadImgDrawing(self, "submenuSelect", os.path.join("themes", themename, "submenuselect.png")):
subSelectImgW = self.submenuSelect.width1()
self.submenuSelectFound = True
self.subSelectWFactor = 640.000 / subSelectImgW
self.subSelectImgH = self.submenuSelect.height1()
else:
self.submenuSelectFound = False
self.loadImgDrawing(self, "submenuSelect", os.path.join(
"themes", themename, "menu", "selected.png"))
self.subSelectWFactor = 0
# load all the data in parallel
# asciiOnly = not bool(Language.language) or Language.language == "Custom"
# reversed = _("__lefttoright__") == "__righttoleft__" and True or False
asciiOnly = True
reversed = False
scale = 1
# evilynux - Load bigger fonts so they're nicer when scaled, scaling
# readjusted
fontSize = [44, 132, 34, 32, 30]
w, h = [int(s) for s in Config.get("video", "resolution").split("x")]
aspectRatio = float(w) / float(h)
self.fontList = [
["font1", "font", "default.ttf", fontSize[4]],
["font2", "bigFont", "title.ttf", fontSize[1]],
["font3", "pauseFont", "pause.ttf", fontSize[2]],
["font4", "scoreFont", "score.ttf", fontSize[3]],
["font5", "streakFont", "streak.ttf", fontSize[3]],
["font6", "loadingFont", "loading.ttf", fontSize[3]],
["font7", "songFont", "song.ttf", fontSize[4]],
["font8", "songListFont", "songlist.ttf", fontSize[3]],
["font9", "shadowFont", "songlist.ttf", fontSize[3]],
["font10", "streakFont2", "streakphrase.ttf", fontSize[2]]
]
for f in self.fontList:
if self.fileExists(os.path.join("themes", themename, "fonts", f[2])):
fn = resource.fileName(os.path.join(
"themes", themename, "fonts", f[2]))
f[0] = lambda: Font(fn, f[3], scale=scale * .5, reversed=reversed,
systemFont=not asciiOnly, outline=False, aspectRatio=aspectRatio)
resource.load(self, f[1], f[0], synch=True)
elif self.fileExists(os.path.join("themes", themename, "fonts", "default.ttf")):
log.debug("Theme font not found: " + f[2])
fn = resource.fileName(os.path.join(
"themes", themename, "fonts", "default.ttf"))
f[0] = lambda: Font(fn, f[3], scale=scale * .5, reversed=reversed,
systemFont=not asciiOnly, outline=False, aspectRatio=aspectRatio)
resource.load(self, f[1], f[0], synch=True)
else:
log.debug(
"Default theme font not found: %s - using built-in default" % str(f[2]))
fn = resource.fileName(os.path.join("fonts", "default.ttf"))
f[0] = lambda: Font(fn, f[3], scale=scale * .5, reversed=reversed,
systemFont=not asciiOnly, outline=False, aspectRatio=aspectRatio)
resource.load(self, f[1], f[0], synch=True)
self.fontDict = {"font": self.font, "bigFont": self.bigFont, "pauseFont": self.pauseFont, "scoreFont": self.scoreFont,
"streakFont": self.streakFont, "songFont": self.songFont, "streakFont2": self.streakFont2,
"songListFont": self.songListFont, "shadowFont": self.shadowFont, "loadingFont": self.loadingFont}
assert self.fontDict['font'] == self.font
# load sounds asynchronously
resource.load(self, "screwUpsounds", self.loadScrewUpsounds)
resource.load(self, "screwUpsoundsBass", self.loadScrewUpsoundsBass)
resource.load(self, "screwUpsoundsDrums", self.loadScrewUpsoundsDrums)
resource.load(self, "acceptSounds", self.loadAcceptSounds)
resource.load(self, "cancelSounds", self.loadBackSounds)
# loadSoundEffect asynchronously
self.syncSounds = [
["bassDrumSound", "bassdrum.ogg"],
["battleUsedSound", "battleused.ogg"],
["CDrumSound", "crash.ogg"],
["clapSound", "clapsound.ogg"],
["coOpFailSound", "coopfail.ogg"],
#["crowdSound","crowdcheers.ogg"],
["failSound", "failsound.ogg"],
["rescueSound", "rescue.ogg"],
["rockSound", "rocksound.ogg"],
["selectSound1", "select1.ogg"],
["selectSound2", "select2.ogg"],
["selectSound3", "select3.ogg"],
["starActivateSound", "staractivate.ogg"],
["starDeActivateSound", "stardeactivate.ogg"],
["starDingSound", "starding.ogg"],
["starLostSound", "starlost.ogg"],
["starReadySound", "starpowerready.ogg"],
["starSound", "starpower.ogg"],
["startSound", "start.ogg"],
["T1DrumSound", "tom01.ogg"],
["T2DrumSound", "tom02.ogg"],
["T3DrumSound", "tom03.ogg"]
]
for self.sounds in self.syncSounds:
if self.fileExists(os.path.join("themes", themename, "sounds", self.sounds[1])):
self.loadSoundEffect(self, self.sounds[0], os.path.join(
"themes", themename, "sounds", self.sounds[1]))
elif self.fileExists(os.path.join("sounds", self.sounds[1])):
log.debug("Theme sound not found: " + self.sounds[1])
self.loadSoundEffect(
self, self.sounds[0], os.path.join("sounds", self.sounds[1]))
else:
log.warn("File " + self.sounds[1] +
" not found using default instead.")
self.loadSoundEffect(
self, self.sounds[0], os.path.join("sounds", "default.ogg"))
# TODO: Simplify crowdSound stuff so it can join the rest of us.
# MFH - fallback on sounds/crowdcheers.ogg, and then starpower.ogg.
# Note if the fallback crowdcheers was used or not.
if self.fileExists(os.path.join("themes", themename, "sounds", "crowdcheers.ogg")):
self.loadSoundEffect(self, "crowdSound", os.path.join(
"themes", themename, "sounds", "crowdcheers.ogg"), crowd=True)
self.cheerSoundFound = 2
elif self.fileExists(os.path.join("sounds", "crowdcheers.ogg")):
self.loadSoundEffect(self, "crowdSound", os.path.join(
"sounds", "crowdcheers.ogg"), crowd=True)
self.cheerSoundFound = 1
log.warn(
themename + "/sounds/crowdcheers.ogg not found -- using data/sounds/crowdcheers.ogg instead.")
else:
self.cheerSoundFound = 0
log.warn("crowdcheers.ogg not found -- no crowd cheering.")
def loadPartImages(self):
self.partImages = []
self.partImages.append(self.loadImgDrawing(None, "guitar", os.path.join(
"themes", self.themeLabel, "common", "guitar.png")))
self.partImages.append(self.loadImgDrawing(None, "rhythm", os.path.join(
"themes", self.themeLabel, "common", "rhythm.png")))
self.partImages.append(self.loadImgDrawing(None, "bass", os.path.join(
"themes", self.themeLabel, "common", "bass.png")))
self.partImages.append(self.loadImgDrawing(None, "lead", os.path.join(
"themes", self.themeLabel, "common", "lead.png")))
self.partImages.append(self.loadImgDrawing(None, "drum", os.path.join(
"themes", self.themeLabel, "common", "drum.png")))
self.partImages.append(self.loadImgDrawing(None, "vocal", os.path.join(
"themes", self.themeLabel, "common", "vocal.png")))
# MFH - single function to go through all screwup sound objects and set
# object volume to the given volume
def SetAllScrewUpSoundFxObjectVolumes(self, volume):
for s in self.screwUpsounds:
s.setVolume(volume)
for s in self.screwUpsoundsBass:
s.setVolume(volume)
for s in self.screwUpsoundsDrums:
s.setVolume(volume)
# MFH - single function to go through all sound objects (and iterate
# through all sound lists) and set object volume to the given volume
def SetAllSoundFxObjectVolumes(self, volume=None):
# MFH TODO - set every sound object's volume here...
if volume is None:
self.sfxVolume = Config.get("audio", "SFX_volume")
self.crowdVolume = Config.get("audio", "crowd_volume")
volume = self.sfxVolume
self.starDingSound.setVolume(volume)
self.bassDrumSound.setVolume(volume)
self.T1DrumSound.setVolume(volume)
self.T2DrumSound.setVolume(volume)
self.T3DrumSound.setVolume(volume)
self.CDrumSound.setVolume(volume)
for s in self.acceptSounds:
s.setVolume(volume)
for s in self.cancelSounds:
s.setVolume(volume)
self.rockSound.setVolume(volume)
self.starDeActivateSound.setVolume(volume)
self.starActivateSound.setVolume(volume)
self.battleUsedSound.setVolume(volume)
self.rescueSound.setVolume(volume)
self.coOpFailSound.setVolume(volume)
self.crowdSound.setVolume(self.crowdVolume)
self.starReadySound.setVolume(volume)
self.clapSound.setVolume(volume)
self.failSound.setVolume(volume)
self.starSound.setVolume(volume)
self.startSound.setVolume(volume)
self.selectSound1.setVolume(volume)
self.selectSound2.setVolume(volume)
self.selectSound3.setVolume(volume)
def loadSoundEffect(self, target, name, fileName, crowd=False):
volume = self.sfxVolume
if crowd:
volume = self.crowdVolume
fileName = self.resource.fileName(fileName)
self.resource.load(target, name, lambda: Sound(
fileName), onLoad=lambda s: s.setVolume(volume))
# MFH - auto random sound enumeration
def determineNumSounds(self, soundPath, soundPrefix, soundExtension=".ogg"):
soundNum = 1
while self.fileExists(os.path.join(soundPath, "%s%d%s" % (soundPrefix, soundNum, soundExtension))):
soundNum += 1
return soundNum - 1
def getSoundObjectList(self, soundPath, soundPrefix, numSounds, soundExtension=".ogg"): # MFH
log.debug("{0}1{2} - {0}{1}{2} found in {3}".format(soundPrefix,
numSounds, soundExtension, soundPath))
sounds = []
for i in xrange(1, numSounds + 1):
filePath = os.path.join(soundPath, "%s%d%s" %
(soundPrefix, i, soundExtension))
soundObject = Sound(self.resource.fileName(filePath))
sounds.append(soundObject)
return sounds
# MFH - adding optional support for random choice between two back sounds
def loadBackSounds(self):
soundPathTheme = os.path.join("themes", self.themeLabel, "sounds")
soundPath = soundPathTheme
soundPrefix = "back"
numSounds = self.determineNumSounds(soundPath, soundPrefix)
if numSounds > 0:
return self.getSoundObjectList(soundPath, soundPrefix, numSounds)
else:
return [Sound(self.resource.fileName(os.path.join("themes", self.themeLabel, "sounds", "out.ogg")))]
def loadAcceptSounds(self):
soundPathTheme = os.path.join("themes", self.themeLabel, "sounds")
soundPath = soundPathTheme
soundPrefix = "accept"
numSounds = self.determineNumSounds(soundPath, soundPrefix)
if numSounds > 0:
return self.getSoundObjectList(soundPath, soundPrefix, numSounds)
else:
if self.theme == 0 or self.theme == 1: # GH2 or GH3
return [Sound(self.resource.fileName(os.path.join("themes", self.themeLabel, "sounds", "in.ogg")))]
elif self.theme == 2:
return [Sound(self.resource.fileName(os.path.join("themes", self.themeLabel, "sounds", "action.ogg")))]
def loadScrewUpsounds(self):
soundPathTheme = os.path.join("themes", self.themeLabel, "sounds")
soundPathData = "sounds"
soundPath = soundPathTheme
soundPrefix = "guitscw"
numSounds = self.determineNumSounds(soundPath, soundPrefix)
if numSounds == 0:
soundPath = soundPathData
numSounds = self.determineNumSounds(soundPath, soundPrefix)
return self.getSoundObjectList(soundPath, soundPrefix, numSounds)
def loadScrewUpsoundsBass(self):
soundPathTheme = os.path.join("themes", self.themeLabel, "sounds")
soundPathData = "sounds"
soundPath = soundPathTheme
soundPrefix = "bassscw"
numSounds = self.determineNumSounds(soundPath, soundPrefix)
if numSounds == 0:
soundPath = soundPathData
numSounds = self.determineNumSounds(soundPath, soundPrefix)
return self.getSoundObjectList(soundPath, soundPrefix, numSounds)
def loadScrewUpsoundsDrums(self):
soundPathTheme = os.path.join("themes", self.themeLabel, "sounds")
soundPathData = "sounds"
soundPath = soundPathTheme
soundPrefix = "drumscw"
numSounds = self.determineNumSounds(soundPath, soundPrefix)
if numSounds == 0:
soundPath = soundPathData
numSounds = self.determineNumSounds(soundPath, soundPrefix)
return self.getSoundObjectList(soundPath, soundPrefix, numSounds)
def loadSyncsounds(self):
return [Sound(self.resource.fileName("sync%d.ogg" % i)) for i in range(1, 2)]
def checkImgDrawing(self, fileName):
return self.getImgDrawing(fileName, False)
def getImgDrawing(self, fileName, openImage=True):
imgDrawing = None
for dataPath in self.resource.dataPaths:
fileName1 = os.path.join(dataPath, fileName)
if self.logLoadings == 1:
if openImage:
log.info("Trying to load image: %s" % fileName1)
else:
log.info("Checking image: %s" % fileName1)
# check if fileName1 exists (has extension)
if os.path.exists(fileName1):
if openImage:
try:
imgDrawing = ImgDrawing(self.svg, fileName1)
return imgDrawing
except IOError:
log.warn("Unable to load image file: %s" % fileName1)
except OverflowError:
log.warn("Unable to read image file: %s" % fileName1)
else:
return True
else:
# find extension
fileName1 = os.path.splitext(fileName1)[0]
# glob parses [] but those are legal chars on Windows, so we must escape them.
# it must be done like this so replacements are not mangled
# by other replacements.
replacements = {
"[": "[[]",
"]": "[]]"
}
fileName1 = "".join([replacements.get(c, c)
for c in fileName1])
files = glob.glob('%s.*' % fileName1)
if openImage:
for f in files:
try:
imgDrawing = ImgDrawing(self.svg, f)
return imgDrawing
except IOError:
log.warn("Unable to load image file: %s" % f)
elif len(files) > 0:
return True
# image not found
if self.logImageNotFound:
log.warn("Image not found: %s" % fileName)
return False
def loadImgDrawing(self, target, name, fileName, textureSize=None):
"""
Load an SVG drawing synchronously.
@param target: An object that will own the drawing
@param name: The name of the attribute the drawing will be assigned to
@param fileName: The name of the file in the data directory
@param textureSize: Either None or (x, y), in which case the file will
be rendered to an x by y texture
@return: L{ImgDrawing} instance
"""
imgDrawing = self.getImgDrawing(fileName)
if not imgDrawing:
if target and name:
setattr(target, name, None)
else:
log.error("Image not found: " + fileName)
return None
if target:
drawing = self.resource.load(
target, name, lambda: imgDrawing, synch=True)
else:
drawing = imgDrawing
return drawing
def loadAllImages(self, target, directory, prefix="img_", textureSize=None): # akedrou
"""
Loads all images found in a folder to a given target.
@param target: An object that will own the drawings
@param directory: The directory that will be searched for image files.
@param textureSize: Either None or (x, y), in which case the files will
be rendered to an x by y texture
"""
if not os.path.isdir(os.path.join(self.path, directory)):
return None
imgDict = {}
for file in os.listdir(os.path.join(self.path, directory)):
if file == "thumbs.db" or file == "Thumbs.db":
continue
elif file[0] == ".":
continue
elif os.path.isdir(os.path.join(self.path, directory, file)):
continue
name = os.path.splitext(file)[0]
name = prefix + name
img = self.loadImgDrawing(
target, name, os.path.join(directory, file), textureSize)
if img and target is None:
imgDict[name] = img
if target is None and len(imgDict) > 0:
return imgDict
# glorandwarf: changed name to getPath
def getPath(self, fileName):
return self.resource.fileName(fileName)
# myfingershurt: still need this fileexists function:
def fileExists(self, fileName):
fileName = self.resource.fileName(fileName)
return os.path.exists(fileName)
# MFH - acceptSound and selectSound will now be merged into either 10
# random sounds or just the acceptSound as a fallback:
def getAcceptSound(self):
"""@return: A randomly chosen selection sound."""
return random.choice(self.acceptSounds)
acceptSound = property(getAcceptSound)
def getBackSound(self):
"""@return: A randomly chosen selection sound."""
return random.choice(self.cancelSounds)
cancelSound = property(getBackSound)
def getSelectSound(self):
"""@return: A randomly chosen selection sound."""
return random.choice([self.selectSound1, self.selectSound2, self.selectSound3])
selectSound = property(getSelectSound)
def getScrewUpSound(self):
"""@return: A randomly chosen screw-up sound."""
return random.choice(self.screwUpsounds)
def getScrewUpSoundBass(self):
"""@return: A randomly chosen screw-up sound."""
return random.choice(self.screwUpsoundsBass)
# myfingershurt: drums screw up sounds
def getScrewUpSoundDrums(self):
"""@return: A randomly chosen screw-up sound."""
return random.choice(self.screwUpsoundsDrums)
screwUpSound = property(getScrewUpSound)
screwUpSoundBass = property(getScrewUpSoundBass)
# myfingershurt: drum screw up sounds
screwUpSoundDrums = property(getScrewUpSoundDrums)
def essentialResourcesLoaded(self):
"""return: True if essential resources such as the font have been loaded."""
return bool(self.font and self.bigFont)
def resourcesLoaded(self):
"""return: True if all the resources have been loaded."""
return None not in self.__dict__.values()
| wrzwicky/fofix | fofix/core/Data.py | Python | gpl-2.0 | 27,183 |
<<<<<<< HEAD
<<<<<<< HEAD
"""This module tests SyntaxErrors.
Here's an example of the sort of thing that is tested.
>>> def f(x):
... global x
Traceback (most recent call last):
SyntaxError: name 'x' is parameter and global
The tests are all raise SyntaxErrors. They were created by checking
each C call that raises SyntaxError. There are several modules that
raise these exceptions-- ast.c, compile.c, future.c, pythonrun.c, and
symtable.c.
The parser itself outlaws a lot of invalid syntax. None of these
errors are tested here at the moment. We should add some tests; since
there are infinitely many programs with invalid syntax, we would need
to be judicious in selecting some.
The compiler generates a synthetic module name for code executed by
doctest. Since all the code comes from the same module, a suffix like
[1] is appended to the module name, As a consequence, changing the
order of tests in this module means renumbering all the errors after
it. (Maybe we should enable the ellipsis option for these tests.)
In ast.c, syntax errors are raised by calling ast_error().
Errors from set_context():
>>> obj.None = 1
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> None = 1
Traceback (most recent call last):
SyntaxError: can't assign to keyword
It's a syntax error to assign to the empty tuple. Why isn't it an
error to assign to the empty list? It will always raise some error at
runtime.
>>> () = 1
Traceback (most recent call last):
SyntaxError: can't assign to ()
>>> f() = 1
Traceback (most recent call last):
SyntaxError: can't assign to function call
>>> del f()
Traceback (most recent call last):
SyntaxError: can't delete function call
>>> a + 1 = 2
Traceback (most recent call last):
SyntaxError: can't assign to operator
>>> (x for x in x) = 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression
>>> 1 = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> "abc" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> b"" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> `1` = 1
Traceback (most recent call last):
SyntaxError: invalid syntax
If the left-hand side of an assignment is a list or tuple, an illegal
expression inside that contain should still cause a syntax error.
This test just checks a couple of cases rather than enumerating all of
them.
>>> (a, "b", c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> [a, b, c + 1] = [1, 2, 3]
Traceback (most recent call last):
SyntaxError: can't assign to operator
>>> a if 1 else b = 1
Traceback (most recent call last):
SyntaxError: can't assign to conditional expression
From compiler_complex_args():
>>> def f(None=1):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_arguments():
>>> def f(x, y=1, z):
... pass
Traceback (most recent call last):
SyntaxError: non-default argument follows default argument
>>> def f(x, None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> def f(*None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> def f(**None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_funcdef():
>>> def None(x):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_call():
>>> def f(it, *varargs):
... return list(it)
>>> L = range(10)
>>> f(x for x in L)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(x for x in L, 1)
Traceback (most recent call last):
SyntaxError: Generator expression must be parenthesized if not sole argument
>>> f((x for x in L), 1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... i244, i245, i246, i247, i248, i249, i250, i251, i252,
... i253, i254, i255)
Traceback (most recent call last):
SyntaxError: more than 255 arguments
The actual error cases counts positional arguments, keyword arguments,
and generator expression arguments separately. This test combines the
three.
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... (x for x in i244), i245, i246, i247, i248, i249, i250, i251,
... i252=1, i253=1, i254=1, i255=1)
Traceback (most recent call last):
SyntaxError: more than 255 arguments
>>> f(lambda x: x[0] = 3)
Traceback (most recent call last):
SyntaxError: lambda cannot contain assignment
The grammar accepts any test (basically, any expression) in the
keyword slot of a call site. Test a few different options.
>>> f(x()=2)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
>>> f(a or b=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
>>> f(x.y=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
More set_context():
>>> (x for x in x) += 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression
>>> None += 1
Traceback (most recent call last):
SyntaxError: can't assign to keyword
>>> f() += 1
Traceback (most recent call last):
SyntaxError: can't assign to function call
Test continue in finally in weird combinations.
continue in for loop under finally should be ok.
>>> def test():
... try:
... pass
... finally:
... for abc in range(10):
... continue
... print(abc)
>>> test()
9
Start simple, a continue in a finally should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
This is essentially a continue in a finally which should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... try:
... continue
... except:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... try:
... continue
... finally:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try: pass
... finally:
... try:
... pass
... except:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
There is one test for a break that is not in a loop. The compiler
uses a single data structure to keep track of try-finally and loops,
so we need to be sure that a break is actually inside a loop. If it
isn't, there should be a syntax error.
>>> try:
... print(1)
... break
... print(2)
... finally:
... print(3)
Traceback (most recent call last):
...
SyntaxError: 'break' outside loop
This should probably raise a better error than a SystemError (or none at all).
In 2.5 there was a missing exception and an assert was triggered in a debug
build. The number of blocks must be greater than CO_MAXBLOCKS. SF #1565514
>>> while 1:
... while 2:
... while 3:
... while 4:
... while 5:
... while 6:
... while 8:
... while 9:
... while 10:
... while 11:
... while 12:
... while 13:
... while 14:
... while 15:
... while 16:
... while 17:
... while 18:
... while 19:
... while 20:
... while 21:
... while 22:
... break
Traceback (most recent call last):
...
SystemError: too many statically nested blocks
Misuse of the nonlocal statement can lead to a few unique syntax errors.
>>> def f(x):
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: name 'x' is parameter and nonlocal
>>> def f():
... global x
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: name 'x' is nonlocal and global
>>> def f():
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: no binding for nonlocal 'x' found
From SF bug #1705365
>>> nonlocal x
Traceback (most recent call last):
...
SyntaxError: nonlocal declaration not allowed at module level
TODO(jhylton): Figure out how to test SyntaxWarning with doctest.
## >>> def f(x):
## ... def f():
## ... print(x)
## ... nonlocal x
## Traceback (most recent call last):
## ...
## SyntaxWarning: name 'x' is assigned to before nonlocal declaration
## >>> def f():
## ... x = 1
## ... nonlocal x
## Traceback (most recent call last):
## ...
## SyntaxWarning: name 'x' is assigned to before nonlocal declaration
This tests assignment-context; there was a bug in Python 2.5 where compiling
a complex 'if' (one with 'elif') would fail to notice an invalid suite,
leading to spurious errors.
>>> if 1:
... x() = 1
... elif 1:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... x() = 1
... elif 1:
... pass
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... pass
... else:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
Make sure that the old "raise X, Y[, Z]" form is gone:
>>> raise X, Y
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> raise X, Y, Z
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> f(a=23, a=234)
Traceback (most recent call last):
...
SyntaxError: keyword argument repeated
>>> del ()
Traceback (most recent call last):
SyntaxError: can't delete ()
>>> {1, 2, 3} = 42
Traceback (most recent call last):
SyntaxError: can't assign to literal
Corner-cases that used to fail to raise the correct error:
>>> def f(*, x=lambda __debug__:0): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(*args:(lambda __debug__:0)): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(**kwargs:(lambda __debug__:0)): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> with (lambda *:0): pass
Traceback (most recent call last):
SyntaxError: named arguments must follow bare *
Corner-cases that used to crash:
>>> def f(**__debug__): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(*xx, __debug__): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
"""
import re
import unittest
import warnings
from test import support
class SyntaxTestCase(unittest.TestCase):
def _check_error(self, code, errtext,
filename="<testcase>", mode="exec", subclass=None):
"""Check that compiling code raises SyntaxError with errtext.
errtest is a regular expression that must be present in the
test of the exception raised. If subclass is specified it
is the expected subclass of SyntaxError (e.g. IndentationError).
"""
try:
compile(code, filename, mode)
except SyntaxError as err:
if subclass and not isinstance(err, subclass):
self.fail("SyntaxError is not a %s" % subclass.__name__)
mo = re.search(errtext, str(err))
if mo is None:
self.fail("SyntaxError did not contain '%r'" % (errtext,))
else:
self.fail("compile() did not raise SyntaxError")
def test_assign_call(self):
self._check_error("f() = 1", "assign")
def test_assign_del(self):
self._check_error("del f()", "delete")
def test_global_err_then_warn(self):
# Bug tickler: The SyntaxError raised for one global statement
# shouldn't be clobbered by a SyntaxWarning issued for a later one.
source = """if 1:
def error(a):
global a # SyntaxError
def warning():
b = 1
global b # SyntaxWarning
"""
warnings.filterwarnings(action='ignore', category=SyntaxWarning)
self._check_error(source, "global")
warnings.filters.pop(0)
def test_break_outside_loop(self):
self._check_error("break", "outside loop")
def test_unexpected_indent(self):
self._check_error("foo()\n bar()\n", "unexpected indent",
subclass=IndentationError)
def test_no_indent(self):
self._check_error("if 1:\nfoo()", "expected an indented block",
subclass=IndentationError)
def test_bad_outdent(self):
self._check_error("if 1:\n foo()\n bar()",
"unindent does not match .* level",
subclass=IndentationError)
def test_kwargs_last(self):
self._check_error("int(base=10, '2')", "non-keyword arg")
def test_main():
support.run_unittest(SyntaxTestCase)
from test import test_syntax
support.run_doctest(test_syntax, verbosity=True)
if __name__ == "__main__":
test_main()
=======
"""This module tests SyntaxErrors.
Here's an example of the sort of thing that is tested.
>>> def f(x):
... global x
Traceback (most recent call last):
SyntaxError: name 'x' is parameter and global
The tests are all raise SyntaxErrors. They were created by checking
each C call that raises SyntaxError. There are several modules that
raise these exceptions-- ast.c, compile.c, future.c, pythonrun.c, and
symtable.c.
The parser itself outlaws a lot of invalid syntax. None of these
errors are tested here at the moment. We should add some tests; since
there are infinitely many programs with invalid syntax, we would need
to be judicious in selecting some.
The compiler generates a synthetic module name for code executed by
doctest. Since all the code comes from the same module, a suffix like
[1] is appended to the module name, As a consequence, changing the
order of tests in this module means renumbering all the errors after
it. (Maybe we should enable the ellipsis option for these tests.)
In ast.c, syntax errors are raised by calling ast_error().
Errors from set_context():
>>> obj.None = 1
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> None = 1
Traceback (most recent call last):
SyntaxError: can't assign to keyword
It's a syntax error to assign to the empty tuple. Why isn't it an
error to assign to the empty list? It will always raise some error at
runtime.
>>> () = 1
Traceback (most recent call last):
SyntaxError: can't assign to ()
>>> f() = 1
Traceback (most recent call last):
SyntaxError: can't assign to function call
>>> del f()
Traceback (most recent call last):
SyntaxError: can't delete function call
>>> a + 1 = 2
Traceback (most recent call last):
SyntaxError: can't assign to operator
>>> (x for x in x) = 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression
>>> 1 = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> "abc" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> b"" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> `1` = 1
Traceback (most recent call last):
SyntaxError: invalid syntax
If the left-hand side of an assignment is a list or tuple, an illegal
expression inside that contain should still cause a syntax error.
This test just checks a couple of cases rather than enumerating all of
them.
>>> (a, "b", c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> [a, b, c + 1] = [1, 2, 3]
Traceback (most recent call last):
SyntaxError: can't assign to operator
>>> a if 1 else b = 1
Traceback (most recent call last):
SyntaxError: can't assign to conditional expression
From compiler_complex_args():
>>> def f(None=1):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_arguments():
>>> def f(x, y=1, z):
... pass
Traceback (most recent call last):
SyntaxError: non-default argument follows default argument
>>> def f(x, None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> def f(*None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> def f(**None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_funcdef():
>>> def None(x):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_call():
>>> def f(it, *varargs):
... return list(it)
>>> L = range(10)
>>> f(x for x in L)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(x for x in L, 1)
Traceback (most recent call last):
SyntaxError: Generator expression must be parenthesized if not sole argument
>>> f((x for x in L), 1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... i244, i245, i246, i247, i248, i249, i250, i251, i252,
... i253, i254, i255)
Traceback (most recent call last):
SyntaxError: more than 255 arguments
The actual error cases counts positional arguments, keyword arguments,
and generator expression arguments separately. This test combines the
three.
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... (x for x in i244), i245, i246, i247, i248, i249, i250, i251,
... i252=1, i253=1, i254=1, i255=1)
Traceback (most recent call last):
SyntaxError: more than 255 arguments
>>> f(lambda x: x[0] = 3)
Traceback (most recent call last):
SyntaxError: lambda cannot contain assignment
The grammar accepts any test (basically, any expression) in the
keyword slot of a call site. Test a few different options.
>>> f(x()=2)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
>>> f(a or b=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
>>> f(x.y=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
More set_context():
>>> (x for x in x) += 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression
>>> None += 1
Traceback (most recent call last):
SyntaxError: can't assign to keyword
>>> f() += 1
Traceback (most recent call last):
SyntaxError: can't assign to function call
Test continue in finally in weird combinations.
continue in for loop under finally should be ok.
>>> def test():
... try:
... pass
... finally:
... for abc in range(10):
... continue
... print(abc)
>>> test()
9
Start simple, a continue in a finally should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
This is essentially a continue in a finally which should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... try:
... continue
... except:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... try:
... continue
... finally:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try: pass
... finally:
... try:
... pass
... except:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
There is one test for a break that is not in a loop. The compiler
uses a single data structure to keep track of try-finally and loops,
so we need to be sure that a break is actually inside a loop. If it
isn't, there should be a syntax error.
>>> try:
... print(1)
... break
... print(2)
... finally:
... print(3)
Traceback (most recent call last):
...
SyntaxError: 'break' outside loop
This should probably raise a better error than a SystemError (or none at all).
In 2.5 there was a missing exception and an assert was triggered in a debug
build. The number of blocks must be greater than CO_MAXBLOCKS. SF #1565514
>>> while 1:
... while 2:
... while 3:
... while 4:
... while 5:
... while 6:
... while 8:
... while 9:
... while 10:
... while 11:
... while 12:
... while 13:
... while 14:
... while 15:
... while 16:
... while 17:
... while 18:
... while 19:
... while 20:
... while 21:
... while 22:
... break
Traceback (most recent call last):
...
SystemError: too many statically nested blocks
Misuse of the nonlocal statement can lead to a few unique syntax errors.
>>> def f(x):
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: name 'x' is parameter and nonlocal
>>> def f():
... global x
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: name 'x' is nonlocal and global
>>> def f():
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: no binding for nonlocal 'x' found
From SF bug #1705365
>>> nonlocal x
Traceback (most recent call last):
...
SyntaxError: nonlocal declaration not allowed at module level
TODO(jhylton): Figure out how to test SyntaxWarning with doctest.
## >>> def f(x):
## ... def f():
## ... print(x)
## ... nonlocal x
## Traceback (most recent call last):
## ...
## SyntaxWarning: name 'x' is assigned to before nonlocal declaration
## >>> def f():
## ... x = 1
## ... nonlocal x
## Traceback (most recent call last):
## ...
## SyntaxWarning: name 'x' is assigned to before nonlocal declaration
This tests assignment-context; there was a bug in Python 2.5 where compiling
a complex 'if' (one with 'elif') would fail to notice an invalid suite,
leading to spurious errors.
>>> if 1:
... x() = 1
... elif 1:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... x() = 1
... elif 1:
... pass
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... pass
... else:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
Make sure that the old "raise X, Y[, Z]" form is gone:
>>> raise X, Y
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> raise X, Y, Z
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> f(a=23, a=234)
Traceback (most recent call last):
...
SyntaxError: keyword argument repeated
>>> del ()
Traceback (most recent call last):
SyntaxError: can't delete ()
>>> {1, 2, 3} = 42
Traceback (most recent call last):
SyntaxError: can't assign to literal
Corner-cases that used to fail to raise the correct error:
>>> def f(*, x=lambda __debug__:0): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(*args:(lambda __debug__:0)): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(**kwargs:(lambda __debug__:0)): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> with (lambda *:0): pass
Traceback (most recent call last):
SyntaxError: named arguments must follow bare *
Corner-cases that used to crash:
>>> def f(**__debug__): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(*xx, __debug__): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
"""
import re
import unittest
import warnings
from test import support
class SyntaxTestCase(unittest.TestCase):
def _check_error(self, code, errtext,
filename="<testcase>", mode="exec", subclass=None):
"""Check that compiling code raises SyntaxError with errtext.
errtest is a regular expression that must be present in the
test of the exception raised. If subclass is specified it
is the expected subclass of SyntaxError (e.g. IndentationError).
"""
try:
compile(code, filename, mode)
except SyntaxError as err:
if subclass and not isinstance(err, subclass):
self.fail("SyntaxError is not a %s" % subclass.__name__)
mo = re.search(errtext, str(err))
if mo is None:
self.fail("SyntaxError did not contain '%r'" % (errtext,))
else:
self.fail("compile() did not raise SyntaxError")
def test_assign_call(self):
self._check_error("f() = 1", "assign")
def test_assign_del(self):
self._check_error("del f()", "delete")
def test_global_err_then_warn(self):
# Bug tickler: The SyntaxError raised for one global statement
# shouldn't be clobbered by a SyntaxWarning issued for a later one.
source = """if 1:
def error(a):
global a # SyntaxError
def warning():
b = 1
global b # SyntaxWarning
"""
warnings.filterwarnings(action='ignore', category=SyntaxWarning)
self._check_error(source, "global")
warnings.filters.pop(0)
def test_break_outside_loop(self):
self._check_error("break", "outside loop")
def test_unexpected_indent(self):
self._check_error("foo()\n bar()\n", "unexpected indent",
subclass=IndentationError)
def test_no_indent(self):
self._check_error("if 1:\nfoo()", "expected an indented block",
subclass=IndentationError)
def test_bad_outdent(self):
self._check_error("if 1:\n foo()\n bar()",
"unindent does not match .* level",
subclass=IndentationError)
def test_kwargs_last(self):
self._check_error("int(base=10, '2')", "non-keyword arg")
def test_main():
support.run_unittest(SyntaxTestCase)
from test import test_syntax
support.run_doctest(test_syntax, verbosity=True)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""This module tests SyntaxErrors.
Here's an example of the sort of thing that is tested.
>>> def f(x):
... global x
Traceback (most recent call last):
SyntaxError: name 'x' is parameter and global
The tests are all raise SyntaxErrors. They were created by checking
each C call that raises SyntaxError. There are several modules that
raise these exceptions-- ast.c, compile.c, future.c, pythonrun.c, and
symtable.c.
The parser itself outlaws a lot of invalid syntax. None of these
errors are tested here at the moment. We should add some tests; since
there are infinitely many programs with invalid syntax, we would need
to be judicious in selecting some.
The compiler generates a synthetic module name for code executed by
doctest. Since all the code comes from the same module, a suffix like
[1] is appended to the module name, As a consequence, changing the
order of tests in this module means renumbering all the errors after
it. (Maybe we should enable the ellipsis option for these tests.)
In ast.c, syntax errors are raised by calling ast_error().
Errors from set_context():
>>> obj.None = 1
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> None = 1
Traceback (most recent call last):
SyntaxError: can't assign to keyword
It's a syntax error to assign to the empty tuple. Why isn't it an
error to assign to the empty list? It will always raise some error at
runtime.
>>> () = 1
Traceback (most recent call last):
SyntaxError: can't assign to ()
>>> f() = 1
Traceback (most recent call last):
SyntaxError: can't assign to function call
>>> del f()
Traceback (most recent call last):
SyntaxError: can't delete function call
>>> a + 1 = 2
Traceback (most recent call last):
SyntaxError: can't assign to operator
>>> (x for x in x) = 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression
>>> 1 = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> "abc" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> b"" = 1
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> `1` = 1
Traceback (most recent call last):
SyntaxError: invalid syntax
If the left-hand side of an assignment is a list or tuple, an illegal
expression inside that contain should still cause a syntax error.
This test just checks a couple of cases rather than enumerating all of
them.
>>> (a, "b", c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: can't assign to literal
>>> [a, b, c + 1] = [1, 2, 3]
Traceback (most recent call last):
SyntaxError: can't assign to operator
>>> a if 1 else b = 1
Traceback (most recent call last):
SyntaxError: can't assign to conditional expression
From compiler_complex_args():
>>> def f(None=1):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_arguments():
>>> def f(x, y=1, z):
... pass
Traceback (most recent call last):
SyntaxError: non-default argument follows default argument
>>> def f(x, None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> def f(*None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
>>> def f(**None):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_funcdef():
>>> def None(x):
... pass
Traceback (most recent call last):
SyntaxError: invalid syntax
From ast_for_call():
>>> def f(it, *varargs):
... return list(it)
>>> L = range(10)
>>> f(x for x in L)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(x for x in L, 1)
Traceback (most recent call last):
SyntaxError: Generator expression must be parenthesized if not sole argument
>>> f((x for x in L), 1)
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... i244, i245, i246, i247, i248, i249, i250, i251, i252,
... i253, i254, i255)
Traceback (most recent call last):
SyntaxError: more than 255 arguments
The actual error cases counts positional arguments, keyword arguments,
and generator expression arguments separately. This test combines the
three.
>>> f(i0, i1, i2, i3, i4, i5, i6, i7, i8, i9, i10, i11,
... i12, i13, i14, i15, i16, i17, i18, i19, i20, i21, i22,
... i23, i24, i25, i26, i27, i28, i29, i30, i31, i32, i33,
... i34, i35, i36, i37, i38, i39, i40, i41, i42, i43, i44,
... i45, i46, i47, i48, i49, i50, i51, i52, i53, i54, i55,
... i56, i57, i58, i59, i60, i61, i62, i63, i64, i65, i66,
... i67, i68, i69, i70, i71, i72, i73, i74, i75, i76, i77,
... i78, i79, i80, i81, i82, i83, i84, i85, i86, i87, i88,
... i89, i90, i91, i92, i93, i94, i95, i96, i97, i98, i99,
... i100, i101, i102, i103, i104, i105, i106, i107, i108,
... i109, i110, i111, i112, i113, i114, i115, i116, i117,
... i118, i119, i120, i121, i122, i123, i124, i125, i126,
... i127, i128, i129, i130, i131, i132, i133, i134, i135,
... i136, i137, i138, i139, i140, i141, i142, i143, i144,
... i145, i146, i147, i148, i149, i150, i151, i152, i153,
... i154, i155, i156, i157, i158, i159, i160, i161, i162,
... i163, i164, i165, i166, i167, i168, i169, i170, i171,
... i172, i173, i174, i175, i176, i177, i178, i179, i180,
... i181, i182, i183, i184, i185, i186, i187, i188, i189,
... i190, i191, i192, i193, i194, i195, i196, i197, i198,
... i199, i200, i201, i202, i203, i204, i205, i206, i207,
... i208, i209, i210, i211, i212, i213, i214, i215, i216,
... i217, i218, i219, i220, i221, i222, i223, i224, i225,
... i226, i227, i228, i229, i230, i231, i232, i233, i234,
... i235, i236, i237, i238, i239, i240, i241, i242, i243,
... (x for x in i244), i245, i246, i247, i248, i249, i250, i251,
... i252=1, i253=1, i254=1, i255=1)
Traceback (most recent call last):
SyntaxError: more than 255 arguments
>>> f(lambda x: x[0] = 3)
Traceback (most recent call last):
SyntaxError: lambda cannot contain assignment
The grammar accepts any test (basically, any expression) in the
keyword slot of a call site. Test a few different options.
>>> f(x()=2)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
>>> f(a or b=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
>>> f(x.y=1)
Traceback (most recent call last):
SyntaxError: keyword can't be an expression
More set_context():
>>> (x for x in x) += 1
Traceback (most recent call last):
SyntaxError: can't assign to generator expression
>>> None += 1
Traceback (most recent call last):
SyntaxError: can't assign to keyword
>>> f() += 1
Traceback (most recent call last):
SyntaxError: can't assign to function call
Test continue in finally in weird combinations.
continue in for loop under finally should be ok.
>>> def test():
... try:
... pass
... finally:
... for abc in range(10):
... continue
... print(abc)
>>> test()
9
Start simple, a continue in a finally should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
This is essentially a continue in a finally which should not be allowed.
>>> def test():
... for abc in range(10):
... try:
... pass
... finally:
... try:
... continue
... except:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try:
... pass
... finally:
... try:
... continue
... finally:
... pass
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
>>> def foo():
... for a in ():
... try: pass
... finally:
... try:
... pass
... except:
... continue
Traceback (most recent call last):
...
SyntaxError: 'continue' not supported inside 'finally' clause
There is one test for a break that is not in a loop. The compiler
uses a single data structure to keep track of try-finally and loops,
so we need to be sure that a break is actually inside a loop. If it
isn't, there should be a syntax error.
>>> try:
... print(1)
... break
... print(2)
... finally:
... print(3)
Traceback (most recent call last):
...
SyntaxError: 'break' outside loop
This should probably raise a better error than a SystemError (or none at all).
In 2.5 there was a missing exception and an assert was triggered in a debug
build. The number of blocks must be greater than CO_MAXBLOCKS. SF #1565514
>>> while 1:
... while 2:
... while 3:
... while 4:
... while 5:
... while 6:
... while 8:
... while 9:
... while 10:
... while 11:
... while 12:
... while 13:
... while 14:
... while 15:
... while 16:
... while 17:
... while 18:
... while 19:
... while 20:
... while 21:
... while 22:
... break
Traceback (most recent call last):
...
SystemError: too many statically nested blocks
Misuse of the nonlocal statement can lead to a few unique syntax errors.
>>> def f(x):
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: name 'x' is parameter and nonlocal
>>> def f():
... global x
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: name 'x' is nonlocal and global
>>> def f():
... nonlocal x
Traceback (most recent call last):
...
SyntaxError: no binding for nonlocal 'x' found
From SF bug #1705365
>>> nonlocal x
Traceback (most recent call last):
...
SyntaxError: nonlocal declaration not allowed at module level
TODO(jhylton): Figure out how to test SyntaxWarning with doctest.
## >>> def f(x):
## ... def f():
## ... print(x)
## ... nonlocal x
## Traceback (most recent call last):
## ...
## SyntaxWarning: name 'x' is assigned to before nonlocal declaration
## >>> def f():
## ... x = 1
## ... nonlocal x
## Traceback (most recent call last):
## ...
## SyntaxWarning: name 'x' is assigned to before nonlocal declaration
This tests assignment-context; there was a bug in Python 2.5 where compiling
a complex 'if' (one with 'elif') would fail to notice an invalid suite,
leading to spurious errors.
>>> if 1:
... x() = 1
... elif 1:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... x() = 1
... elif 1:
... pass
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... x() = 1
... else:
... pass
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
>>> if 1:
... pass
... elif 1:
... pass
... else:
... x() = 1
Traceback (most recent call last):
...
SyntaxError: can't assign to function call
Make sure that the old "raise X, Y[, Z]" form is gone:
>>> raise X, Y
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> raise X, Y, Z
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> f(a=23, a=234)
Traceback (most recent call last):
...
SyntaxError: keyword argument repeated
>>> del ()
Traceback (most recent call last):
SyntaxError: can't delete ()
>>> {1, 2, 3} = 42
Traceback (most recent call last):
SyntaxError: can't assign to literal
Corner-cases that used to fail to raise the correct error:
>>> def f(*, x=lambda __debug__:0): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(*args:(lambda __debug__:0)): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(**kwargs:(lambda __debug__:0)): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> with (lambda *:0): pass
Traceback (most recent call last):
SyntaxError: named arguments must follow bare *
Corner-cases that used to crash:
>>> def f(**__debug__): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
>>> def f(*xx, __debug__): pass
Traceback (most recent call last):
SyntaxError: assignment to keyword
"""
import re
import unittest
import warnings
from test import support
class SyntaxTestCase(unittest.TestCase):
def _check_error(self, code, errtext,
filename="<testcase>", mode="exec", subclass=None):
"""Check that compiling code raises SyntaxError with errtext.
errtest is a regular expression that must be present in the
test of the exception raised. If subclass is specified it
is the expected subclass of SyntaxError (e.g. IndentationError).
"""
try:
compile(code, filename, mode)
except SyntaxError as err:
if subclass and not isinstance(err, subclass):
self.fail("SyntaxError is not a %s" % subclass.__name__)
mo = re.search(errtext, str(err))
if mo is None:
self.fail("SyntaxError did not contain '%r'" % (errtext,))
else:
self.fail("compile() did not raise SyntaxError")
def test_assign_call(self):
self._check_error("f() = 1", "assign")
def test_assign_del(self):
self._check_error("del f()", "delete")
def test_global_err_then_warn(self):
# Bug tickler: The SyntaxError raised for one global statement
# shouldn't be clobbered by a SyntaxWarning issued for a later one.
source = """if 1:
def error(a):
global a # SyntaxError
def warning():
b = 1
global b # SyntaxWarning
"""
warnings.filterwarnings(action='ignore', category=SyntaxWarning)
self._check_error(source, "global")
warnings.filters.pop(0)
def test_break_outside_loop(self):
self._check_error("break", "outside loop")
def test_unexpected_indent(self):
self._check_error("foo()\n bar()\n", "unexpected indent",
subclass=IndentationError)
def test_no_indent(self):
self._check_error("if 1:\nfoo()", "expected an indented block",
subclass=IndentationError)
def test_bad_outdent(self):
self._check_error("if 1:\n foo()\n bar()",
"unindent does not match .* level",
subclass=IndentationError)
def test_kwargs_last(self):
self._check_error("int(base=10, '2')", "non-keyword arg")
def test_main():
support.run_unittest(SyntaxTestCase)
from test import test_syntax
support.run_doctest(test_syntax, verbosity=True)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| ArcherSys/ArcherSys | Lib/test/test_syntax.py | Python | mit | 54,344 |
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, sys, subprocess
plat = 'amd64' if sys.maxsize > 2**32 else 'x86'
def distutils_vcvars():
from distutils.msvc9compiler import find_vcvarsall, get_build_version
return find_vcvarsall(get_build_version())
def remove_dups(variable):
old_list = variable.split(os.pathsep)
new_list = []
for i in old_list:
if i not in new_list:
new_list.append(i)
return os.pathsep.join(new_list)
def query_process(cmd):
if plat == 'amd64' and 'PROGRAMFILES(x86)' not in os.environ:
os.environ['PROGRAMFILES(x86)'] = os.environ['PROGRAMFILES'] + ' (x86)'
result = {}
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise RuntimeError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.splitlines():
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key == 'path':
if value.endswith(os.pathsep):
value = value[:-1]
value = remove_dups(value)
result[key] = value
finally:
popen.stdout.close()
popen.stderr.close()
return result
def query_vcvarsall():
vcvarsall = distutils_vcvars()
return query_process('"%s" %s & set' % (vcvarsall, plat))
env = query_vcvarsall()
paths = env['path'].split(';')
lib = env['lib']
include = env['include']
libpath = env['libpath']
sdkdir = env['windowssdkdir']
def unix(paths):
up = []
for p in paths:
prefix, p = p.replace(os.sep, '/').partition('/')[0::2]
up.append('/cygdrive/%s/%s'%(prefix[0].lower(), p))
return ':'.join(up)
raw = '''\
#!/bin/sh
export PATH="%s:$PATH"
export LIB="%s"
export INCLUDE="%s"
export LIBPATH="%s"
export WindowsSdkDir="%s"
'''%(unix(paths), lib.replace('\\', r'\\'), include.replace('\\', r'\\'), libpath.replace('\\', r'\\'), sdkdir.replace('\\', r'\\'))
print(raw.encode('utf-8'))
| jelly/calibre | setup/vcvars.py | Python | gpl-3.0 | 2,481 |
"""
Manage AWS ElastiCache
ElastiCache provides Memcached and Redis as a service
"""
import getpass
import logging
from ConfigParser import ConfigParser
import boto3
import botocore
from semantic_version import Spec, Version
from . import normalize_path
from .disco_route53 import DiscoRoute53
from .exceptions import CommandError
from .resource_helper import throttled_call
class DiscoElastiCache(object):
"""
A simple class to manage ElastiCache
"""
def __init__(self, vpc, config_file='disco_elasticache.ini', aws=None, route53=None):
self.vpc = vpc
self.conn = boto3.client('elasticache')
self.config_file = config_file
self._config = None # lazily initialized
self.route53 = route53 or DiscoRoute53()
self.aws = aws
@property
def config(self):
"""lazy load config"""
if not self._config:
try:
config = ConfigParser()
config.read(normalize_path(self.config_file))
self._config = config
except Exception:
return None
return self._config
def list(self):
"""List all cache clusters in environment"""
response = throttled_call(self.conn.describe_replication_groups)
groups = [group for group in response.get('ReplicationGroups', [])
if group['ReplicationGroupId'].startswith(self.vpc.environment_name + '-')]
return sorted(groups, key=lambda group: (group['ReplicationGroupId']))
def update(self, cluster_name):
"""
Create a new cluster or modify an existing one based on the config file
Modifying tags, number of nodes, instance type, engine type, and port is not supported
Args:
cluster_name (str): name of cluster
"""
meta_network = self._get_option(cluster_name, 'meta_network') or self.aws.get_default_meta_network()
if not self._get_subnet_group(meta_network):
self._create_subnet_group(meta_network)
engine_version = self._get_option(cluster_name, 'engine_version')
instance_type = self._get_option(cluster_name, 'instance_type')
parameter_group = self._get_option(cluster_name, 'parameter_group')
num_nodes = int(self._get_option(cluster_name, 'num_nodes'))
port = int(self._get_option(cluster_name, 'port'))
auto_failover = self._has_auto_failover(engine_version, instance_type, num_nodes)
domain_name = self._get_option(cluster_name, 'domain_name') or self.aws.get_default_domain_name()
tags = [{
'Key': 'product_line',
'Value': self._get_option(cluster_name, 'product_line') or self.aws.get_default_product_line('')
}, {
'Key': 'owner',
'Value': getpass.getuser()
}, {
'Key': 'name',
'Value': cluster_name
}]
cache_cluster = self._get_cache_cluster(cluster_name)
if not cache_cluster:
self._create_redis_cluster(cluster_name, engine_version, num_nodes, instance_type,
parameter_group, port, meta_network, auto_failover, domain_name, tags)
else:
self._modify_redis_cluster(cluster_name, engine_version,
parameter_group, auto_failover, domain_name)
def update_all(self):
"""Update all clusters in environment to match config"""
sections = [section for section in self.config.sections()
if section.startswith(self.vpc.environment_name + ':')]
for section in sections:
cluster_name = section.split(':')[1]
self.update(cluster_name)
def delete(self, cluster_name, wait=False):
"""
Delete a cache cluster
Args:
cluster_name (str): name of cluster
wait (bool): block until cluster is deleted
"""
cluster = self._get_cache_cluster(cluster_name)
if not cluster:
logging.info('Cache cluster %s does not exist. Nothing to delete', cluster_name)
return
logging.info('Deleting cache cluster %s', cluster_name)
throttled_call(self.conn.delete_replication_group, ReplicationGroupId=cluster['ReplicationGroupId'])
self.route53.delete_records_by_value('CNAME', cluster['NodeGroups'][0]['PrimaryEndpoint']['Address'])
if wait:
self.conn.get_waiter('replication_group_deleted').wait(
ReplicationGroupId=cluster['ReplicationGroupId'])
def delete_all_cache_clusters(self, wait=False):
"""
Delete all cache clusters in environment
Args:
wait (bool): block until all cache clusters are deleted
"""
clusters = self.list()
for cluster in clusters:
logging.info('Deleting cache cluster %s', cluster['ReplicationGroupId'])
throttled_call(self.conn.delete_replication_group,
ReplicationGroupId=cluster['ReplicationGroupId'])
address = cluster['NodeGroups'][0]['PrimaryEndpoint']['Address']
self.route53.delete_records_by_value('CNAME', address)
if wait:
for cluster in clusters:
self.conn.get_waiter('replication_group_deleted').wait(
ReplicationGroupId=cluster['ReplicationGroupId'])
def delete_all_subnet_groups(self):
"""Delete all subnet groups in environment"""
response = throttled_call(self.conn.describe_cache_subnet_groups)
subnet_groups = [group for group in response.get('CacheSubnetGroups', [])
if group['CacheSubnetGroupName'].startswith(self.vpc.environment_name + '-')]
for group in subnet_groups:
logging.info('Deleting cache subnet group %s', group['CacheSubnetGroupName'])
throttled_call(self.conn.delete_cache_subnet_group,
CacheSubnetGroupName=group['CacheSubnetGroupName'])
def _get_cache_cluster(self, cluster_name):
cluster_id = self._get_cluster_id(cluster_name)
try:
response = throttled_call(self.conn.describe_replication_groups,
ReplicationGroupId=cluster_id)
clusters = response.get('ReplicationGroups', [])
return clusters[0] if clusters else None
except Exception:
return None
# too many arguments and local variables for pylint
# pylint: disable=R0913, R0914
def _create_redis_cluster(self, cluster_name, engine_version, num_nodes, instance_type,
parameter_group,
port, meta_network_name, auto_failover, domain_name, tags):
"""
Create a redis cache cluster
Redis clusters are actually 'Replication Groups' in ElastiCache.
Each Replication Group is a set of single node Redis Cache Clusters with one read/write cluster and
the rest as read only.
Waits until cluster is created
Args:
cluster_name (str): name of cluster
engine_version (str): redis version to use
num_nodes (int): number of nodes in replication group. must be at least 2 if auto_failover is on
instance_type (str): instance types. only allowed to use instance types that start with 'cache.'
parameter_group (str): name of parameter group to use
port (int): port to make cache available on
meta_network_name (str): meta network to use (intranet, tunnel, etc)
auto_failover (bool): enable automatic promotion of read only cluster when primary fails.
only supported for redis versions>2.8.6.
not allowed for T1 and T2 instance types.
domain_name (str): hosted zone id to use for Route53 domain name
tags (List[dict]): list of tags to add to replication group
"""
cluster_id = self._get_cluster_id(cluster_name)
meta_network = self.vpc.networks[meta_network_name]
subnet_group = self._get_subnet_group_name(meta_network_name)
logging.info('Creating "%s" Redis cache', cluster_id)
throttled_call(self.conn.create_replication_group,
ReplicationGroupId=cluster_id,
ReplicationGroupDescription=cluster_id,
NumCacheClusters=num_nodes,
CacheNodeType=instance_type,
Engine='redis',
EngineVersion=engine_version,
CacheParameterGroupName=parameter_group,
CacheSubnetGroupName=subnet_group,
SecurityGroupIds=[meta_network.security_group.id],
Port=port,
AutomaticFailoverEnabled=auto_failover,
Tags=tags)
self.conn.get_waiter('replication_group_available').wait(
ReplicationGroupId=cluster_id
)
cluster = self._get_cache_cluster(cluster_name)
if domain_name:
address = cluster['NodeGroups'][0]['PrimaryEndpoint']['Address']
subdomain = self._get_subdomain(cluster_name, domain_name)
self.route53.create_record(domain_name, subdomain, 'CNAME', address)
def _modify_redis_cluster(self, cluster_name, engine_version, parameter_group,
auto_failover, domain_name, apply_immediately=True):
"""
Modify an existing Redis replication group
Args:
cluster_name (str): name of cluster
engine_version (str): redis version to use
parameter_group (str): name of parameter group to use
auto_failover (bool): True to enable automatic promotion of read only cluster after primary fails
domain_name (str): Hosted zone where to create subdomain for cluster
apply_immediately (bool): True to immediately update the cluster
False to schedule update at next cluster maintenance window or restart
"""
cluster_id = self._get_cluster_id(cluster_name)
cluster = self._get_cache_cluster(cluster_name)
throttled_call(self.conn.modify_replication_group,
ReplicationGroupId=cluster_id,
AutomaticFailoverEnabled=auto_failover,
CacheParameterGroupName=parameter_group,
ApplyImmediately=apply_immediately,
EngineVersion=engine_version)
if domain_name:
address = cluster['NodeGroups'][0]['PrimaryEndpoint']['Address']
self.route53.delete_records_by_value('CNAME', address)
subdomain = self._get_subdomain(cluster_name, domain_name)
self.route53.create_record(domain_name, subdomain, 'CNAME', address)
def _create_subnet_group(self, meta_network_name):
subnet_group_name = self._get_subnet_group_name(meta_network_name)
meta_network = self.vpc.networks[meta_network_name]
logging.info('Creating cache subnet group %s', subnet_group_name)
throttled_call(self.conn.create_cache_subnet_group,
CacheSubnetGroupName=subnet_group_name,
CacheSubnetGroupDescription=subnet_group_name,
SubnetIds=[subnet.id for subnet in meta_network.subnets])
def _get_subnet_group(self, meta_network_name):
try:
response = throttled_call(self.conn.describe_cache_subnet_groups,
CacheSubnetGroupName=self._get_subnet_group_name(meta_network_name))
groups = response.get('CacheSubnetGroups', [])
return groups[0] if groups else None
except botocore.exceptions.ClientError:
return None
def _get_cluster_id(self, cluster_name):
cluster_id = self.vpc.environment_name + '-' + cluster_name
if len(cluster_id) > 20:
raise CommandError('Cache cluster name ' + cluster_id + ' is over 20 characters')
return cluster_id
def _get_subnet_group_name(self, meta_network_name):
return self.vpc.environment_name + '-' + meta_network_name
def _get_subdomain(self, cluster, domain_name):
"""Get the expected subdomain for a cache cluster"""
return cluster + '-' + self.vpc.environment_name + '.' + domain_name
def _get_option(self, cluster_name, option_name):
"""Get a config option for a cluster"""
if not self.config:
raise CommandError('ElastiCache config file missing')
section_name = self.vpc.environment_name + ':' + cluster_name
if not self.config.has_section(section_name):
raise CommandError('%s section missing in ElastiCache config' % section_name)
if self.config.has_option(section_name, option_name):
return self.config.get(section_name, option_name)
return None
def _has_auto_failover(self, engine_version, instance_type, num_nodes):
"""auto failover is only supported for Redis versions >= 2.8.6 and not for t1, t2 instance types"""
return ('t1.' not in instance_type and
't2.' not in instance_type and
Spec('>=2.8.6').match(Version(engine_version)) and
num_nodes > 1)
| Angakkuit/asiaq-aws | disco_aws_automation/disco_elasticache.py | Python | bsd-2-clause | 13,511 |
import unittest
import numpy
from templevel import TempLevel
__author__ = 'Rio'
class TestPocket(unittest.TestCase):
def setUp(self):
# self.alphaLevel = TempLevel("Dojo_64_64_128.dat")
self.level = TempLevel("PocketWorld")
self.alphalevel = TempLevel("AnvilWorld")
def testPocket(self):
level = self.level.level
# alphalevel = self.alphalevel.level
print "Chunk count", len(level.allChunks)
chunk = level.getChunk(1, 5)
a = numpy.array(chunk.SkyLight)
chunk.dirty = True
chunk.needsLighting = True
level.generateLights()
level.saveInPlace()
assert (a == chunk.SkyLight).all()
# level.copyBlocksFrom(alphalevel, BoundingBox((0, 0, 0), (64, 64, 64,)), (0, 0, 0))
# assert((level.Blocks[0:64, 0:64, 0:64] == alphalevel.Blocks[0:64, 0:64, 0:64]).all())
| arruda/pymclevel | test/pocket_test.py | Python | isc | 880 |
"""
W0711
Exception to catch is the result of a binary operation
"""
| landscape-test/all-messages | messages/pylint/W0711.py | Python | unlicense | 70 |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Qiming Sun <osirpt.sun@gmail.com>
# James D. McClain
# Timothy Berkelbach <tim.berkelbach@gmail.com>
#
import itertools
from collections import OrderedDict
from numbers import Number
import numpy as np
import scipy.linalg
from pyscf import lib
from pyscf import __config__
KPT_DIFF_TOL = getattr(__config__, 'pbc_lib_kpts_helper_kpt_diff_tol', 1e-6)
def is_zero(kpt):
return abs(np.asarray(kpt)).sum() < KPT_DIFF_TOL
gamma_point = is_zero
def member(kpt, kpts):
kpts = np.reshape(kpts, (len(kpts),kpt.size))
dk = np.einsum('ki->k', abs(kpts-kpt.ravel()))
return np.where(dk < KPT_DIFF_TOL)[0]
def unique(kpts):
kpts = np.asarray(kpts)
nkpts = len(kpts)
uniq_kpts = []
uniq_index = []
uniq_inverse = np.zeros(nkpts, dtype=int)
seen = np.zeros(nkpts, dtype=bool)
n = 0
for i, kpt in enumerate(kpts):
if not seen[i]:
uniq_kpts.append(kpt)
uniq_index.append(i)
idx = abs(kpt-kpts).sum(axis=1) < KPT_DIFF_TOL
uniq_inverse[idx] = n
seen[idx] = True
n += 1
return np.asarray(uniq_kpts), np.asarray(uniq_index), uniq_inverse
def loop_kkk(nkpts):
range_nkpts = range(nkpts)
return itertools.product(range_nkpts, range_nkpts, range_nkpts)
def get_kconserv(cell, kpts):
r'''Get the momentum conservation array for a set of k-points.
Given k-point indices (k, l, m) the array kconserv[k,l,m] returns
the index n that satifies momentum conservation,
(k(k) - k(l) + k(m) - k(n)) \dot a = 2n\pi
This is used for symmetry e.g. integrals of the form
[\phi*[k](1) \phi[l](1) | \phi*[m](2) \phi[n](2)]
are zero unless n satisfies the above.
'''
nkpts = kpts.shape[0]
a = cell.lattice_vectors() / (2*np.pi)
kconserv = np.zeros((nkpts,nkpts,nkpts), dtype=int)
kvKLM = kpts[:,None,None,:] - kpts[:,None,:] + kpts
for N, kvN in enumerate(kpts):
kvKLMN = np.einsum('wx,klmx->wklm', a, kvKLM - kvN)
# check whether (1/(2pi) k_{KLMN} dot a) is an integer
kvKLMN_int = np.rint(kvKLMN)
mask = np.einsum('wklm->klm', abs(kvKLMN - kvKLMN_int)) < 1e-9
kconserv[mask] = N
return kconserv
if kconserv is None:
kconserv = get_kconserv(cell, kpts)
arr_offset = []
arr_size = []
offset = 0
for kk, kl, km in loop_kkk(nkpts):
kn = kconserv[kk, kl, km]
# Get array size for these k-points and add offset
size = np.prod([norb_per_kpt[x] for x in [kk, kl, km, kn]])
arr_size.append(size)
arr_offset.append(offset)
offset += size
return arr_offset, arr_size, (arr_size[-1] + arr_offset[-1])
def check_kpt_antiperm_symmetry(array, idx1, idx2, tolerance=1e-8):
'''Checks antipermutational symmetry for k-point array.
Checks whether an array with k-point symmetry has antipermutational symmetry
with respect to switching the particle indices `idx1`, `idx2`. The particle
indices switches both the orbital index and k-point index associated with
the two indices.
Note:
One common reason for not obeying antipermutational symmetry in a calculation
involving FFTs is that the grid to perform the FFT may be too coarse. This
symmetry is present in operators in spin-orbital form and 'spin-free'
operators.
array (:obj:`ndarray`): array to test permutational symmetry, where for
an n-particle array, the first (2n-1) array elements are kpoint indices
while the final 2n array elements are orbital indices.
idx1 (int): first index
idx2 (int): second index
Examples:
For a 3-particle array, such as the T3 amplitude
t3[ki, kj, kk, ka, kb, i, j, a, b, c],
setting `idx1 = 0` and `idx2 = 1` would switch the orbital indices i, j as well
as the kpoint indices ki, kj.
>>> nkpts, nocc, nvir = 3, 4, 5
>>> t2 = numpy.random.random_sample((nkpts, nkpts, nkpts, nocc, nocc, nvir, nvir))
>>> t2 = t2 + t2.transpose(1,0,2,4,3,5,6)
>>> check_kpt_antiperm_symmetry(t2, 0, 1)
True
'''
# Checking to make sure bounds of idx1 and idx2 are O.K.
assert(idx1 >= 0 and idx2 >= 0 and 'indices to swap must be non-negative!')
array_shape_len = len(array.shape)
nparticles = (array_shape_len + 1) / 4
assert(idx1 < (2 * nparticles - 1) and idx2 < (2 * nparticles - 1) and
'This function does not support the swapping of the last k-point index '
'(This k-point is implicitly not indexed due to conservation of momentum '
'between k-points.).')
if (nparticles > 3):
raise NotImplementedError('Currently set up for only up to 3 particle '
'arrays. Input array has %d particles.')
kpt_idx1 = idx1
kpt_idx2 = idx2
# Start of the orbital index, located after k-point indices
orb_idx1 = (2 * nparticles - 1) + idx1
orb_idx2 = (2 * nparticles - 1) + idx2
# Sign of permutation
sign = (-1)**(abs(idx1 - idx2) + 1)
out_array_indices = np.arange(array_shape_len)
out_array_indices[kpt_idx1], out_array_indices[kpt_idx2] = \
out_array_indices[kpt_idx2], out_array_indices[kpt_idx1]
out_array_indices[orb_idx1], out_array_indices[orb_idx2] = \
out_array_indices[orb_idx2], out_array_indices[orb_idx1]
antisymmetric = (np.linalg.norm(array + array.transpose(out_array_indices)) <
tolerance)
return antisymmetric
def get_kconserv3(cell, kpts, kijkab):
r'''Get the momentum conservation array for a set of k-points.
This function is similar to get_kconserv, but instead finds the 'kc'
that satisfies momentum conservation for 5 k-points,
(ki + kj + kk - ka - kb - kc) dot a = 2n\pi
where these kpoints are stored in kijkab[ki, kj, kk, ka, kb].
'''
nkpts = kpts.shape[0]
a = cell.lattice_vectors() / (2*np.pi)
kpts_i, kpts_j, kpts_k, kpts_a, kpts_b = \
[kpts[x].reshape(-1,3) for x in kijkab]
shape = [np.size(x) for x in kijkab]
kconserv = np.zeros(shape, dtype=int)
kv_kab = kpts_k[:,None,None,:] - kpts_a[:,None,:] - kpts_b
for i, kpti in enumerate(kpts_i):
for j, kptj in enumerate(kpts_j):
kv_ijkab = kv_kab + kpti + kptj
for c, kptc in enumerate(kpts):
s = np.einsum('kabx,wx->kabw', kv_ijkab - kptc, a)
s_int = np.rint(s)
mask = np.einsum('kabw->kab', abs(s - s_int)) < 1e-9
kconserv[i,j,mask] = c
new_shape = [shape[i] for i, x in enumerate(kijkab)
if not isinstance(x, (int,np.int))]
kconserv = kconserv.reshape(new_shape)
return kconserv
class VectorComposer(object):
def __init__(self, dtype):
"""
Composes vectors.
Args:
dtype (type): array data type;
"""
self.__dtype__ = dtype
self.__transactions__ = []
self.__total_size__ = 0
self.__data__ = None
def put(self, a):
"""
Puts array into vector.
Args:
a (ndarray): array to put;
"""
if a.dtype != self.__dtype__:
raise ValueError("dtype mismatch: passed %s vs expected %s" % (a.dtype, self.dtype))
self.__transactions__.append(a)
self.__total_size__ += a.size
def flush(self):
"""
Composes the vector.
Returns:
The composed vector.
"""
if self.__data__ is None:
self.__data__ = result = np.empty(self.__total_size__, dtype=self.__dtype__)
offset = 0
else:
offset = self.__data__.size
self.__data__ = result = np.empty(self.__total_size__ + self.__data__.size, dtype=self.__dtype__)
for i in self.__transactions__:
s = i.size
result[offset:offset + s] = i.reshape(-1)
offset += s
self.__transactions__ = []
return result
class VectorSplitter(object):
def __init__(self, vector):
"""
Splits vectors into pieces.
Args:
vector (ndarray): vector to split;
"""
self.__data__ = vector
self.__offset__ = 0
def get(self, destination, slc=None):
"""
Retrieves the next array.
Args:
destination: the shape of the destination array or the destination array itself;
slc: an optional slice;
Returns:
The array.
"""
if isinstance(destination, Number):
destination = np.zeros((destination,), dtype=self.__data__.dtype)
elif isinstance(destination, tuple):
destination = np.zeros(destination, dtype=self.__data__.dtype)
elif isinstance(destination, np.ndarray):
pass
else:
raise ValueError("Unknown destination: %s" % str(destination))
if slc is None:
take_size = np.prod(destination.shape)
take_shape = destination.shape
else:
slc = np.ix_(*slc)
take_size = destination[slc].size
take_shape = destination[slc].shape
avail = self.__data__.size - self.__offset__
if take_size > avail:
raise ValueError("Insufficient # of elements: required %d %s, found %d" % (take_size, take_shape, avail))
if slc is None:
destination[:] = self.__data__[self.__offset__:self.__offset__ + take_size].reshape(take_shape)
else:
destination[slc] = self.__data__[self.__offset__:self.__offset__ + take_size].reshape(take_shape)
self.__offset__ += take_size
return destination
def truncate(self):
"""
Truncates the data vector.
"""
self.__data__ = self.__data__[self.__offset__:].copy()
self.__offset__ = 0
class KptsHelper(lib.StreamObject):
def __init__(self, cell, kpts):
'''Helper class for handling k-points in correlated calculations.
Attributes:
kconserv : (nkpts,nkpts,nkpts) ndarray
The index of the fourth momentum-conserving k-point, given
indices of three k-points
symm_map : OrderedDict of list of (3,) tuples
Keys are (3,) tuples of symmetry-unique k-point indices and
values are lists of (3,) tuples, enumerating all
symmetry-related k-point indices for ERI generation
'''
self.kconserv = get_kconserv(cell, kpts)
nkpts = len(kpts)
temp = range(0,nkpts)
kptlist = lib.cartesian_prod((temp,temp,temp))
completed = np.zeros((nkpts,nkpts,nkpts), dtype=bool)
self._operation = np.zeros((nkpts,nkpts,nkpts), dtype=int)
self.symm_map = OrderedDict()
for kpt in kptlist:
kpt = tuple(kpt)
kp,kq,kr = kpt
if not completed[kp,kq,kr]:
self.symm_map[kpt] = list()
ks = self.kconserv[kp,kq,kr]
completed[kp,kq,kr] = True
self._operation[kp,kq,kr] = 0
self.symm_map[kpt].append((kp,kq,kr))
completed[kr,ks,kp] = True
self._operation[kr,ks,kp] = 1 #.transpose(2,3,0,1)
self.symm_map[kpt].append((kr,ks,kp))
completed[kq,kp,ks] = True
self._operation[kq,kp,ks] = 2 #np.conj(.transpose(1,0,3,2))
self.symm_map[kpt].append((kq,kp,ks))
completed[ks,kr,kq] = True
self._operation[ks,kr,kq] = 3 #np.conj(.transpose(3,2,1,0))
self.symm_map[kpt].append((ks,kr,kq))
def transform_symm(self, eri_kpt, kp, kq, kr):
'''Return the symmetry-related ERI at any set of k-points.
Args:
eri_kpt : (nmo,nmo,nmo,nmo) ndarray
An in-cell ERI calculated with a set of symmetry-unique k-points.
kp, kq, kr : int
The indices of the k-points at which the ERI is desired.
'''
operation = self._operation[kp,kq,kr]
if operation == 0:
return eri_kpt
if operation == 1:
return eri_kpt.transpose(2,3,0,1)
if operation == 2:
return np.conj(eri_kpt.transpose(1,0,3,2))
if operation == 3:
return np.conj(eri_kpt.transpose(3,2,1,0))
| gkc1000/pyscf | pyscf/pbc/lib/kpts_helper.py | Python | apache-2.0 | 13,133 |
"""Home Assistant command line scripts."""
from __future__ import annotations
import argparse
import asyncio
import importlib
import logging
import os
import sys
from typing import Sequence
from homeassistant import runner
from homeassistant.bootstrap import async_mount_local_lib_path
from homeassistant.config import get_default_config_dir
from homeassistant.requirements import pip_kwargs
from homeassistant.util.package import install_package, is_installed, is_virtual_env
# mypy: allow-untyped-defs, no-warn-return-any
def run(args: list) -> int:
"""Run a script."""
scripts = []
path = os.path.dirname(__file__)
for fil in os.listdir(path):
if fil == "__pycache__":
continue
if os.path.isdir(os.path.join(path, fil)):
scripts.append(fil)
elif fil != "__init__.py" and fil.endswith(".py"):
scripts.append(fil[:-3])
if not args:
print("Please specify a script to run.")
print("Available scripts:", ", ".join(scripts))
return 1
if args[0] not in scripts:
print("Invalid script specified.")
print("Available scripts:", ", ".join(scripts))
return 1
script = importlib.import_module(f"homeassistant.scripts.{args[0]}")
config_dir = extract_config_dir()
loop = asyncio.get_event_loop()
if not is_virtual_env():
loop.run_until_complete(async_mount_local_lib_path(config_dir))
_pip_kwargs = pip_kwargs(config_dir)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
for req in getattr(script, "REQUIREMENTS", []):
if is_installed(req):
continue
if not install_package(req, **_pip_kwargs):
print("Aborting script, could not install dependency", req)
return 1
asyncio.set_event_loop_policy(runner.HassEventLoopPolicy(False))
return script.run(args[1:]) # type: ignore
def extract_config_dir(args: Sequence[str] | None = None) -> str:
"""Extract the config dir from the arguments or get the default."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-c", "--config", default=None)
parsed_args = parser.parse_known_args(args)[0]
return (
os.path.join(os.getcwd(), parsed_args.config)
if parsed_args.config
else get_default_config_dir()
)
| w1ll1am23/home-assistant | homeassistant/scripts/__init__.py | Python | apache-2.0 | 2,358 |
from __future__ import unicode_literals
def test_textx_issue155():
import textx
METAMODEL = textx.metamodel_from_str("""
Model: expression=Expr ;
ParenExpr: '(' Expr ')';
Expr: ParenExpr | BaseExpr;
BaseExpr: left=Condition operations*=Operation;
Operation: op=BoolOperator remaining=Condition;
Condition: ParenExpr | RawCondition;
RawCondition: id=Identifier op=MathOperator val=INT;
Identifier: id=/[a-zA-Z0-9_-]+/;
MathOperator: op=/=|>|</;
BoolOperator: op=/AND|OR/;
""")
from textx import textx_isinstance
m1 = METAMODEL.model_from_str('(b=3 OR c=4)')
assert textx_isinstance(m1.expression, METAMODEL['BaseExpr'])
assert textx_isinstance(m1.expression.left, METAMODEL['RawCondition'])
assert len(m1.expression.operations) == 1
assert m1.expression.operations[0].op.op == 'OR'
assert textx_isinstance(m1.expression.operations[0].remaining,
METAMODEL['RawCondition'])
m2 = METAMODEL.model_from_str('a=2 AND (b=3 OR c=4)')
assert textx_isinstance(m2.expression, METAMODEL['BaseExpr'])
assert textx_isinstance(m2.expression.left, METAMODEL['RawCondition'])
assert len(m2.expression.operations) == 1
assert m2.expression.operations[0].op.op == 'AND'
assert textx_isinstance(m2.expression.operations[0].remaining,
METAMODEL['BaseExpr'])
assert textx_isinstance(m2.expression.operations[0].remaining.left,
METAMODEL['RawCondition'])
assert len(m2.expression.operations[0].remaining.operations) == 1
assert m2.expression.operations[0].remaining.operations[0].op.op == 'OR'
assert textx_isinstance(m2.expression.operations[0].remaining.
operations[0].remaining,
METAMODEL['RawCondition'])
| igordejanovic/textX | tests/functional/regressions/test_issue155.py | Python | mit | 1,857 |
#!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from unittest import TestCase
from boundary import HostgroupCreate
from boundary import HostgroupDelete
from cli_runner import CLIRunner
from cli_test import CLITest
class HostgroupDeleteTest(TestCase):
def setUp(self):
self.cli = HostgroupDelete()
def test_cli_description(self):
CLITest.check_description(self, self.cli)
def test_cli_help(self):
CLITest.check_cli_help(self, self.cli)
def test_create_curl(self):
runner = CLIRunner(self.cli)
filter_id = 1024
curl = runner.get_output(['-i', str(filter_id),
'-z'])
CLITest.check_curl(self, self.cli, curl)
def test_delete_filter(self):
runner_create = CLIRunner(HostgroupCreate())
filter_name = 'Filter' + CLITest.random_string(6)
sources = 'foo,bar,red,green'
create = runner_create.get_output(['-n', filter_name,
'-s', sources])
filter_create = json.loads(create)
filter = filter_create['result']
filter_id = filter['id']
runner_delete = CLIRunner(HostgroupDelete())
delete = runner_delete.get_output(['-i', str(filter_id)])
delete_result = json.loads(delete)
self.assertTrue(delete_result['result']['success'])
| boundary/pulse-api-cli | tests/unit/boundary/hostgroup_delete_test.py | Python | apache-2.0 | 1,940 |
from django.shortcuts import render_to_response, get_object_or_404
from django.template import Context, loader
from stories.models import Story, Sprint
from django.http import HttpResponse
from django.views.generic import DetailView, ListView
class SprintView(DetailView):
days = ["", "","Mon", "", "", "", "Tue", "", "", "", "Wed", "", "", "", "Thu", "", "Fri"]
model = Sprint
def get_context_data(self, **kwargs):
context = super(SprintView, self).get_context_data(**kwargs)
if self.object.is_finished:
context['burndown'] = self.burndown()
else:
context['burndown_schema'] = self.burndown_schema()
return context
def burndown(self):
total = self.object.original_commitment()
burn = map(lambda (i,e): (self.days[i], total-total*i/4, total*1.2-total*i/4*1.2, total*0.8-total*i/4*0.8,total-e),enumerate(self.object.burnup()))
return burn
def burndown_schema(self):
total = self.object.original_commitment()
burn = map(lambda (i,e): (
self.days[i],
total-total*i/17,
total*1.2-total*i/17*1.2,
total*0.8-total*i/17*0.8)
,enumerate(range(17)))
return burn
class SprintListView(ListView):
queryset = Sprint.objects.all().order_by('-start_date')
def get_context_data(self, **kwargs):
context = super(SprintListView, self).get_context_data(**kwargs)
context['TVI'] = self.getTVI()
context['Points'] = self.getPoints()
context['Pct'] = self.getPct()
return context
def getTVI(self):
return map(lambda s: (s.number, s.targeted_value_increase()), self.object_list.order_by('start_date').filter(is_finished=True).all())
def getPoints(self):
return map(lambda s: (s.number, s.work_capacity()*100/s.member_dedication, s.velocity()*100/s.member_dedication, s.original_commitment()*100/s.member_dedication),
self.object_list.order_by('start_date').filter(is_finished=True).all())
def getPct(self):
return map(lambda s: (s.number, s.focus_factor(), s.accuracy_of_estimation(), s.accuracy_of_commit()),
self.object_list.order_by('start_date').filter(is_finished=True).all())
| alexsiri7/RoboScrum | stories/views.py | Python | gpl-3.0 | 2,140 |
########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# Built in Imports
import requests
import platform
import tempfile
# Cloudify Imports
from utils import run
from cloudify import ctx
from cloudify import exceptions
from cloudify.decorators import operation
from constants import (
ELATIC_CO_BASE_URL,
DEFAULT_DEB_URL,
DEFAULT_RPM_URL,
INSTALLED_UBUNTU,
INSTALLED_CENTOS
)
@operation
def configure(conf, **_):
""" Configure Logstash """
if 'template' in conf.get('type'):
if not conf.get('path'):
raise exceptions.NonRecoverableError(
'logstash property conf.path '
'cannot be empty if conf.type is "template".')
static_config = generate_static_config(conf.get('path'))
elif 'static' in conf.get('type'):
if not conf.get('path') and not conf.get('inline'):
raise exceptions.NonRecoverableError(
'either logstash property conf.path '
'or conf.inline are required when conf.type is "static".')
static_config = conf.get('path')
else:
raise exceptions.NonRecoverableError(
'logstash property conf.type '
'can only be "template" or "static".')
upload_static_config(static_config, conf.get('destination_path'))
def generate_static_config(template_conf):
ctx.logger.info('Generating static conf from template')
raise NotImplementedError
def upload_static_config(static_conf, conf_path):
""" Upload the static config to the service. """
ctx.logger.info('Copying config to {0}'.format(conf_path))
try:
downloaded_file = \
ctx.download_resource(static_conf, tempfile.mktemp())
except Exception as e:
raise exceptions.NonRecoverableError(
'failed to download. Error: {0}.'.format(str(e)))
run('sudo cp {0} {1}'.format(downloaded_file, conf_path))
@operation
def start(command, **_):
"""starts logstash daemon"""
ctx.logger.debug('Attempting to start log transport service.')
output = run(command)
if output.returncode != 0:
raise exceptions.NonRecoverableError(
'Unable to start log transport service: {0}'.format(output))
@operation
def stop(command, **_):
"""stops logstash daemon"""
ctx.logger.debug('Attempting to stop log transport service.')
output = run(command)
if output.returncode != 0:
raise exceptions.NonRecoverableError(
'Unable to stop log transport service: {0}'.format(output))
@operation
def install(package_url, **_):
""" Installs Logstash """
ctx.logger.debug('Attempting to install log transport service.')
distro = platform.linux_distribution(full_distribution_name=False)
_install(distro.lower(), package_url)
def _install(platform, url):
""" installs logstash from package """
_, package_file = tempfile.mkstemp()
if 'ubuntu' in platform:
install_command = 'sudo dpkg -i {0}'.format(package_file)
if 'install' in run(INSTALLED_UBUNTU):
ctx.logger.info('Logstash already installed.')
return
if not url:
url = ELATIC_CO_BASE_URL \
+ DEFAULT_DEB_URL
elif 'centos' in platform:
install_command = 'sudo yum install -y {0}'.format(package_file)
if 'not installed' not in run(INSTALLED_CENTOS):
ctx.logger.info('Logstash already installed.')
return
if not url:
url = ELATIC_CO_BASE_URL \
+ DEFAULT_RPM_URL
else:
raise exceptions.NonRecoverableError(
'Only Centos and Ubuntu supported.')
_download_package(package_file, url)
run(install_command)
def _download_package(package_file, url):
""" Downloads package from url to tempfile """
ctx.logger.debug('Downloading: {0}'.format(url))
package = requests.get(url, stream=True)
with open(package_file, 'wb') as f:
for chunk in package.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
| EarthmanT/cloudify-logstash-plugin | logstash_plugin/tasks.py | Python | apache-2.0 | 4,699 |
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Tests for robotupload."""
from __future__ import print_function, absolute_import
from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase
import httpretty
class RobotUploadTests(InvenioTestCase):
"""Test the robotupload functions."""
@httpretty.activate
def test_robotupload_bad_xml(self):
"""Test proper handling when bad MARCXML is sent."""
from inspire.utils.robotupload import make_robotupload_marcxml
httpretty.register_uri(
httpretty.POST,
"http://localhost:4000/batchuploader/robotupload/insert",
body="[ERROR] MARCXML is not valid.\n",
status=400
)
invalid_marcxml = "record></record>"
response = make_robotupload_marcxml(
"http://localhost:4000",
invalid_marcxml,
mode="insert",
)
self.assertEqual(response.status_code, 400)
self.assertTrue("not valid" in response.text)
@httpretty.activate
def test_robotupload_success(self):
"""Test proper handling when good MARCXML is sent."""
from inspire.utils.robotupload import make_robotupload_marcxml
httpretty.register_uri(
httpretty.POST,
"http://localhost:4000/batchuploader/robotupload/insert",
body="[INFO] bibupload batchupload --insert /dummy/file/path\n",
status=200
)
valid_marcxml = "<record></record>"
response = make_robotupload_marcxml(
"http://localhost:4000",
valid_marcxml,
mode="insert",
)
self.assertEqual(response.status_code, 200)
self.assertTrue("[INFO] bibupload batchupload" in response.text)
@httpretty.activate
def test_robotupload_success_append(self):
"""Test proper handling when good MARCXML is sent."""
from inspire.utils.robotupload import make_robotupload_marcxml
httpretty.register_uri(
httpretty.POST,
"http://localhost:4000/batchuploader/robotupload/append",
body="[INFO] bibupload batchupload --append /dummy/file/path\n",
status=200
)
valid_marcxml = "<record></record>"
response = make_robotupload_marcxml(
"http://localhost:4000",
valid_marcxml,
mode="append",
)
self.assertEqual(response.status_code, 200)
self.assertTrue("[INFO] bibupload batchupload" in response.text)
@httpretty.activate
def test_robotupload_callback_url(self):
"""Test passing of a callback URL."""
from inspire.utils.robotupload import make_robotupload_marcxml
body = (
"[INFO] bibupload batchupload --insert /some/path"
"--callback-url http://localhost"
)
httpretty.register_uri(
httpretty.POST,
"http://localhost:4000/batchuploader/robotupload/insert",
body=body,
status=200
)
valid_marcxml = "<record></record>"
response = make_robotupload_marcxml(
"http://localhost:4000",
valid_marcxml,
mode="insert",
callback_url="http://localhost",
)
self.assertEqual(response.status_code, 200)
self.assertTrue("--callback-url http://localhost" in response.text)
TEST_SUITE = make_test_suite(RobotUploadTests)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| ioannistsanaktsidis/inspire-next | inspire/testsuite/test_robotupload.py | Python | gpl-2.0 | 4,251 |
import logging
import requests
import dateutil.parser
from datetime import datetime, timedelta
from django.conf import settings
from django.db.models import Max
from treeherder.model.models import Bugscache, BugJobMap
from treeherder.utils.github import fetch_json
from treeherder.utils.http import make_request
logger = logging.getLogger(__name__)
def reopen_request(url, method, headers, json):
make_request(url, method=method, headers=headers, json=json)
def reopen_intermittent_bugs():
# Don't reopen bugs from non-production deployments.
if settings.BUGFILER_API_KEY is None:
return
incomplete_bugs = set(
Bugscache.objects.filter(resolution='INCOMPLETE').values_list('id', flat=True)
)
# Intermittent bugs get closed after 3 weeks of inactivity if other conditions don't apply:
# https://github.com/mozilla/relman-auto-nag/blob/c7439e247677333c1cd8c435234b3ef3adc49680/auto_nag/scripts/close_intermittents.py#L17
RECENT_DAYS = 7
recently_used_bugs = set(
BugJobMap.objects.filter(created__gt=datetime.now() - timedelta(RECENT_DAYS)).values_list(
'bug_id', flat=True
)
)
bugs_to_reopen = incomplete_bugs & recently_used_bugs
for bug_id in bugs_to_reopen:
bug_data = (
BugJobMap.objects.filter(bug_id=bug_id)
.select_related('job__repository')
.order_by('-created')
.values('job_id', 'job__repository__name')[0]
)
job_id = bug_data.get('job_id')
repository = bug_data.get('job__repository__name')
log_url = f"https://treeherder.mozilla.org/logviewer?job_id={job_id}&repo={repository}"
comment = {'body': "New failure instance: " + log_url}
url = settings.BUGFILER_API_URL + "/rest/bug/" + str(bug_id)
headers = {'x-bugzilla-api-key': settings.BUGFILER_API_KEY, 'Accept': 'application/json'}
data = {
'status': 'REOPENED',
'comment': comment,
'comment_tags': "treeherder",
}
try:
reopen_request(url, method='PUT', headers=headers, json=data)
except requests.exceptions.HTTPError as e:
try:
message = e.response.json()['message']
except (ValueError, KeyError):
message = e.response.text
logger.error(f"Reopening bug {str(bug_id)} failed: {message}")
def fetch_intermittent_bugs(additional_params, limit, duplicate_chain_length):
url = settings.BZ_API_URL + '/rest/bug'
params = {
'include_fields': ','.join(
[
'id',
'summary',
'status',
'resolution',
'dupe_of',
'duplicates',
'cf_crash_signature',
'keywords',
'last_change_time',
'whiteboard',
]
),
'limit': limit,
}
params.update(additional_params)
response = fetch_json(url, params=params)
return response.get('bugs', [])
class BzApiBugProcess:
def run(self):
year_ago = datetime.utcnow() - timedelta(days=365)
last_change_time_max = (
Bugscache.objects.all().aggregate(Max('modified'))['modified__max'] or None
)
if last_change_time_max:
last_change_time_max -= timedelta(minutes=10)
else:
last_change_time_max = year_ago
max_summary_length = Bugscache._meta.get_field('summary').max_length
max_whiteboard_length = Bugscache._meta.get_field('whiteboard').max_length
last_change_time_string = last_change_time_max.strftime('%Y-%m-%dT%H:%M:%SZ')
bugs_to_duplicates = {}
duplicates_to_bugs = {}
insert_errors_observed = False
duplicates_to_check = set()
# The bugs are ingested in different phases:
# 1. Intermittent bugs with activity in the bug in the last year
# (Bugzilla seed). Iteration 0.
# 2. Bugs used for classification (classification seed). They will be
# part of the previous phase once a report about the classification
# has been posted in the bug (schedule weekly or daily).
# Processed as part of iteration 1.
# 3. For bugs which have been resolved as duplicates, the bugs as whose
# duplicates they have been set will be fetched. The open bugs will
# be used to store the classifications. Iterations 1-5.
# 4. Duplicates of the bugs from previous phases get fetched. Duplicate
# bugs included in those eventually end up here due to inactivity but
# are still needed for matching failure lines against bug summaries.
# Iterations 6-10.
duplicate_chain_length = -1
# make flake8 happy
bugs_to_process = []
while duplicate_chain_length < 10:
duplicate_chain_length += 1
if duplicate_chain_length > 0:
bugs_to_process = list(
bugs_to_process
- set(
Bugscache.objects.filter(processed_update=True).values_list('id', flat=True)
)
)
if len(bugs_to_process) == 0:
break
bug_list = []
bugs_count_limit = 500
bugs_offset = 0
# Keep querying Bugzilla until there are no more results.
while True:
if duplicate_chain_length == 0:
additional_params = {
'keywords': 'intermittent-failure',
'last_change_time': last_change_time_string,
'offset': bugs_offset,
}
else:
additional_params = {
'id': ','.join(
list(
map(
str,
bugs_to_process[bugs_offset : bugs_offset + bugs_count_limit],
)
)
),
}
bug_results_chunk = fetch_intermittent_bugs(
additional_params, bugs_count_limit, duplicate_chain_length
)
bug_list += bug_results_chunk
bugs_offset += bugs_count_limit
if duplicate_chain_length == 0 and len(bug_results_chunk) < bugs_count_limit:
break
if duplicate_chain_length > 0 and bugs_offset >= len(bugs_to_process):
break
bugs_to_process_next = set()
if bug_list:
if duplicate_chain_length == 0:
Bugscache.objects.filter(modified__lt=year_ago).delete()
Bugscache.objects.all().update(processed_update=False)
for bug in bug_list:
# we currently don't support timezones in treeherder, so
# just ignore it when importing/updating the bug to avoid
# a ValueError
try:
dupe_of = bug.get('dupe_of', None)
Bugscache.objects.update_or_create(
id=bug['id'],
defaults={
'status': bug.get('status', ''),
'resolution': bug.get('resolution', ''),
'summary': bug.get('summary', '')[:max_summary_length],
'dupe_of': dupe_of,
'crash_signature': bug.get('cf_crash_signature', ''),
'keywords': ",".join(bug['keywords']),
'modified': dateutil.parser.parse(
bug['last_change_time'], ignoretz=True
),
'whiteboard': bug.get('whiteboard', '')[:max_whiteboard_length],
'processed_update': True,
},
)
except Exception as e:
logger.error("error inserting bug '%s' into db: %s", bug, e)
insert_errors_observed = True
continue
if dupe_of is not None:
openish = (
duplicates_to_bugs[dupe_of]
if dupe_of in duplicates_to_bugs
else dupe_of
)
duplicates_to_bugs[bug['id']] = openish
if openish not in bugs_to_duplicates:
bugs_to_process_next.add(openish)
bugs_to_duplicates[openish] = set()
bugs_to_duplicates[openish].add(bug['id'])
if bug['id'] in bugs_to_duplicates:
for duplicate_id in bugs_to_duplicates[bug['id']]:
duplicates_to_bugs[duplicate_id] = openish
bugs_to_duplicates[openish] |= bugs_to_duplicates[bug['id']]
duplicates = bug.get('duplicates')
if len(duplicates) > 0:
duplicates_to_check |= set(duplicates)
if duplicate_chain_length == 0:
# Phase 2: Bugs used for classification should be kept.
# Can return invalid bug numbers (e.g. too large because of
# typo) but they don't cause issues.
# distinct('bug_id') is not supported by Django + MySQL 5.7
bugs_to_process_next |= set(
BugJobMap.objects.all().values_list('bug_id', flat=True)
)
bugs_to_process = bugs_to_process_next - set(
Bugscache.objects.filter(processed_update=True).values_list('id', flat=True)
)
if duplicate_chain_length == 5 and len(bugs_to_process):
logger.warn(
"Found a chain of duplicate bugs longer than 6 bugs, stopped following chain. Bugscache's 'dupe_of' column contains duplicates instead of non-duplicate bugs. Unprocessed bugs: "
+ (" ".join(list(map(str, bugs_to_process))))
)
if 0 <= duplicate_chain_length < 6 and len(bugs_to_process) == 0:
# phase 3: looking for open bugs based on duplicates
duplicate_chain_length = 5
if duplicate_chain_length >= 5:
# phase 4: fetching duplicates
bugs_to_process_next = duplicates_to_check
duplicates_to_check = set()
bugs_to_process = bugs_to_process_next - set(
Bugscache.objects.filter(processed_update=True).values_list('id', flat=True)
)
if len(bugs_to_process) == 0:
break
elif duplicate_chain_length == 10 and len(bugs_to_process):
logger.warn(
"Found a chain of duplicate bugs longer than 6 bugs, stopped following chain. Not all duplicates have been loaded. Unprocessed bugs: "
+ (" ".join(list(map(str, bugs_to_process))))
)
# Duplicate bugs don't see any activity. Use the modification date of
# the bug against which they have been set as duplicate to prevent them
# from getting dropped - they are still needed to match the failure line
# against the bug summary.
for (bug_duplicate, bug_openish) in duplicates_to_bugs.items():
bug_openish_object = Bugscache.objects.filter(id=bug_openish)
if len(bug_openish_object) == 0:
# Script does not have access to open bug but to duplicate
continue
Bugscache.objects.filter(id=bug_duplicate).update(
dupe_of=bug_openish, modified=bug_openish_object[0].modified
)
# Switch classifications from duplicate bugs to open ones.
duplicates_db = set(
Bugscache.objects.filter(dupe_of__isnull=False).values_list('id', flat=True)
)
bugs_used = set(BugJobMap.objects.all().values_list('bug_id', flat=True))
duplicates_used = duplicates_db & bugs_used
for bug_id in duplicates_used:
dupe_of = Bugscache.objects.get(id=bug_id).dupe_of
# Jobs both already classified with new duplicate and its open bug.
jobs_openish = list(
BugJobMap.objects.filter(bug_id=dupe_of).values_list('job_id', flat=True)
)
BugJobMap.objects.filter(bug_id=bug_id, job_id__in=jobs_openish).delete()
BugJobMap.objects.filter(bug_id=bug_id).update(bug_id=dupe_of)
# Delete open bugs and related duplicates if modification date (of open
# bug) is too old.
Bugscache.objects.filter(modified__lt=year_ago).delete()
if insert_errors_observed:
logger.error(
"error inserting some bugs, bugscache is incomplete, bugs updated during run will be ingested again during the next run"
)
# Move modification date of bugs inserted/updated during this
# run back to attempt to ingest bug data which failed during
# this insert/update in the next run.
Bugscache.objects.filter(modified__gt=last_change_time_max).update(
modified=last_change_time_max
)
reopen_intermittent_bugs()
| jmaher/treeherder | treeherder/etl/bugzilla.py | Python | mpl-2.0 | 13,803 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A template to define composite ops."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl import app
from tensorflow.compiler.mlir.tfr.python.composite import Composite
from tensorflow.compiler.mlir.tfr.python.op_reg_gen import gen_register_op
from tensorflow.compiler.mlir.tfr.python.tfr_gen import tfr_gen_from_module
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
'output', None,
'Path to write the genereated register op file and MLIR file.')
flags.DEFINE_bool('gen_register_op', True,
'Generate register op cc file or tfr mlir file.')
flags.mark_flag_as_required('output')
@Composite('TestRandom', derived_attrs=['T: numbertype'], outputs=['o: T'])
def _composite_random_op():
pass
def main(_):
if FLAGS.gen_register_op:
assert FLAGS.output.endswith('.cc')
generated_code = gen_register_op(sys.modules[__name__], '_composite_')
else:
assert FLAGS.output.endswith('.mlir')
generated_code = tfr_gen_from_module(sys.modules[__name__], '_composite_')
dirname = os.path.dirname(FLAGS.output)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(FLAGS.output, 'w') as f:
f.write(generated_code)
if __name__ == '__main__':
app.run(main=main)
| annarev/tensorflow | tensorflow/compiler/mlir/tfr/define_op_template.py | Python | apache-2.0 | 2,013 |
#!/usr/bin/python
# Copyright (c) 2016-2021 Julien Peloton, Giulio Fabbian.
#
# This file is part of s4cmb
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Script to simulate and handle input sky maps to be scanned.
Default file format is .fits containing healpix maps, and it comes with a
class HealpixFitsMap to handle it easily.
If you have a different I/O in your pipeline, just add a new class.
Author: Julien Peloton, peloton@lal.in2p3.fr
Giulio Fabbian, g.fabbian@sussex.ac.uk
"""
from __future__ import division, absolute_import, print_function
import glob
import os
import warnings
import healpy as hp
import numpy as np
from s4cmb.tools import alm2map_spin_der1
class HealpixFitsMap:
""" Class to handle fits file containing healpix maps """
def __init__(
self,
input_filename,
do_pol=True,
verbose=False,
fwhm_in=0.0,
fwhm_in2=None,
nside_in=16,
lmax=None,
map_seed=53543,
no_ileak=False,
no_quleak=False,
compute_derivatives=None,
derivatives_type="T",
ext_map_gal=False,
):
"""
Parameters
----------
input_filename : string, or list of strings
Either fits file containing the sky maps (data will just be
loaded), or CAMB lensed cl file (.dat) containing lensed
power spectra with order ell, TT, EE, BB, TE (maps will be
created on-the-fly), or a list of 3 fits files containing alms
(maps will be created on-the-fly).
do_pol : bool, optional
If True, load temperature and polarisation. Temperature only
otherwise. Default is True.
verbose : bool, optional
If True, print out plenty of useless messages.
fwhm_in : float, optional
If input_filename is a CAMB lensed cl file, the generated maps will
be convolved with a beam having this fwhm_in. In arcmin.
No effect if you provide maps directly.
fwhm_in2 : float, optional
If provided, will generate another set of I, Q, U with this
resolution (useful for dichroic detectors). Default is None.
No effect if you provide maps directly.
nside_in : int, optional
If input_filename is a CAMB lensed cl file, the maps will be
generated at a resolution nside_in. No effect if you provide maps
directly.
lmax : None or int, optional
Maximum multipole when creating a map from cl. If none, it
is set automatically to 2*nside_in
map_seed : int, optional
If input_filename is a CAMB lensed cl file, this is the seed used
to create the maps. No effect if you provide maps directly.
no_ileak : bool, optional
If True, load temperature and polarisation, but set the temperature
to zero to avoid leakages.
no_quleak : bool, optional
If True, load temperature and polarisation, but set the
polarisation to zero to avoid leakages.
ext_map_gal : bool, optional
Set it to True if you are reading a map in Galactic coordinate.
(Planck maps for example).
compute_derivatives : bool, optional
If True, return derivatives of relevant Stokes component according
to the derivatives_type option.
Be sure that you have enough memory!
derivatives_type : str, optional
If 'T' ('P') present in the string return 1st and 2nd derivatives
of the input temperature (Q and U) map (t=theta, p=phi).
Note that d/dp is already divided by sin(theta).
Options:
- 'T1' will compute 1st derivatives dI/dt, dI/dp alone
- 'P1' will compute 1st derivatives dQ/dt, dU/dt, dQ/dp, dU/dp.
Be sure that you have enough memory!
"""
self.input_filename = input_filename
self.do_pol = do_pol
self.verbose = verbose
self.no_ileak = no_ileak
self.no_quleak = no_quleak
self.ext_map_gal = ext_map_gal
self.fwhm_in = fwhm_in
self.fwhm_in2 = fwhm_in2
self.nside_in = nside_in
if lmax is None:
self.lmax = 2 * self.nside_in
else:
self.lmax = lmax
self.map_seed = map_seed
self.compute_derivatives = compute_derivatives
self.derivatives_type = derivatives_type
self.I = None
self.Q = None
self.U = None
self.I2 = None
self.Q2 = None
self.U2 = None
fromalms = False
if type(self.input_filename) == list:
if self.verbose:
print("Reading sky maps from alms file...")
self.load_healpix_fits_map_from_alms()
fromalms = True
elif self.input_filename[-4:] == ".dat":
if self.verbose:
print("Creating sky maps from cl file...")
self.create_healpix_fits_map()
elif self.input_filename[-5:] == ".fits":
if self.verbose:
print("Reading sky maps from fits file...")
self.load_healpix_fits_map()
else:
raise IOError(
"""Input file not understood! Should be either a
fits file containing the sky maps
(data will be loaded), or a CAMB lensed cl file
(.dat) containing lensed power spectra with
order ell, TT, EE, BB, TE
(maps will be created on-the-fly)."""
)
self.set_leakage_to_zero()
if self.compute_derivatives:
if "T" in self.derivatives_type:
self.compute_intensity_derivatives(fromalm=fromalms)
if "P" in self.derivatives_type:
self.compute_pol_derivatives(fromalm=fromalms)
def load_healpix_fits_map(self, force=False):
"""
Load from disk into memory a sky map.
Not updated for dichroic for the moment.
Parameters
----------
force : bool
If true, force to load the maps in memory even if it is already
loaded. Default is False.
Examples
----------
Let's generate fake data
>>> filename = 'myfits_to_test_.fits'
>>> write_dummy_map(filename, nside=16)
Let's now read the data
>>> hpmap = HealpixFitsMap(input_filename=filename)
>>> print(hpmap.nside)
16
If the data is already loaded, it won't reload it by default
>>> hpmap.load_healpix_fits_map()
External data already present in memory
But you can force it
>>> hpmap.load_healpix_fits_map(force=True)
"""
if self.I is None or force:
if self.do_pol:
self.I, self.Q, self.U = hp.read_map(
self.input_filename, (0, 1, 2), verbose=self.verbose
)
else:
self.I = hp.read_map(
self.input_filename, field=0, verbose=self.verbose
)
self.nside = hp.npix2nside(len(self.I))
else:
print("External data already present in memory")
def load_healpix_fits_map_from_alms(self, force=False):
"""
Load from disk into memory alms and make sky maps.
Parameters
----------
force : bool
If true, force to load the maps in memory even if it is already
loaded. Default is False.
Examples
----------
Let's generate fake data
>>> np.random.seed(548397)
>>> sky_maps = create_sky_map('s4cmb/data/test_data_set_lensedCls.dat')
>>> alms = hp.map2alm(sky_maps)
>>> filenames = ['myalms_to_test_tlm.fits', 'myalms_to_test_elm.fits',
... 'myalms_to_test_blm.fits']
>>> for fn, alm in zip(filenames, alms):
... hp.write_alm(fn, alm)
Let's now read the data
>>> hpmap = HealpixFitsMap(input_filename=filenames)
>>> print(hpmap.nside)
16
If the data is already loaded, it won't reload it by default
>>> hpmap.load_healpix_fits_map_from_alms()
External data already present in memory
But you can force it
>>> hpmap.load_healpix_fits_map_from_alms(force=True)
You can also generate 2 sets of maps with different resolution which
is useful for dichroic detectors
>>> hpmap = HealpixFitsMap(input_filename=filenames, fwhm_in=3.5,
... fwhm_in2=1.8, nside_in=16,)
>>> hasattr(hpmap, 'Q2')
True
"""
if self.I is None or force:
if self.do_pol:
tlm = hp.read_alm(self.input_filename[0])
elm = hp.read_alm(self.input_filename[1])
blm = hp.read_alm(self.input_filename[2])
self.I, self.Q, self.U = hp.alm2map(
[tlm, elm, blm],
nside=self.nside_in,
pixwin=False,
fwhm=self.fwhm_in / 60.0 * np.pi / 180.0,
sigma=None,
pol=True,
inplace=False,
verbose=self.verbose,
)
if self.fwhm_in2 is not None:
self.I2, self.Q2, self.U2 = hp.alm2map(
[tlm, elm, blm],
nside=self.nside_in,
pixwin=False,
fwhm=self.fwhm_in2 / 60.0 * np.pi / 180.0,
sigma=None,
pol=True,
inplace=False,
verbose=self.verbose,
)
else:
tlm = hp.read_alm(self.input_filename[0])
self.I = hp.alm2map(
tlm,
nside=self.nside_in,
pixwin=False,
fwhm=self.fwhm_in / 60.0 * np.pi / 180.0,
sigma=None,
pol=False,
inplace=False,
verbose=self.verbose,
)
if self.fwhm_in2 is not None:
self.I2 = hp.alm2map(
tlm,
nside=self.nside_in,
pixwin=False,
fwhm=self.fwhm_in2 / 60.0 * np.pi / 180.0,
sigma=None,
pol=False,
inplace=False,
verbose=self.verbose,
)
self.nside = hp.npix2nside(len(self.I))
else:
print("External data already present in memory")
def create_healpix_fits_map(self, force=False):
"""
Create sky maps from cl file.
Do nothing if data already presents in the memory.
Parameters
----------
force : bool
If true, force to recreate the maps in memory even
if it is already loaded. Default is False.
Examples
----------
Let's generate the map from a CAMB file
>>> filename = 's4cmb/data/test_data_set_lensedCls.dat'
>>> hpmap = HealpixFitsMap(input_filename=filename, fwhm_in=3.5,
... nside_in=16, map_seed=489237)
>>> print(hpmap.nside)
16
If the data is already loaded, it won't reload it by default
>>> hpmap.create_healpix_fits_map()
External data already present in memory
But you can force it
>>> hpmap.create_healpix_fits_map(force=True)
You can also load 2 sets of maps with different resolution, which
is useful for dichroic detectors
>>> filename = 's4cmb/data/test_data_set_lensedCls.dat'
>>> hpmap = HealpixFitsMap(input_filename=filename, fwhm_in=3.5,
... fwhm_in2=1.8, nside_in=16, map_seed=489237)
>>> hasattr(hpmap, 'I2')
True
"""
if self.I is None or force:
if self.do_pol:
self.I, self.Q, self.U = create_sky_map(
self.input_filename,
nside=self.nside_in,
FWHM=self.fwhm_in,
seed=self.map_seed,
lmax=self.lmax,
)
if self.fwhm_in2 is not None:
self.I2, self.Q2, self.U2 = create_sky_map(
self.input_filename,
nside=self.nside_in,
FWHM=self.fwhm_in2,
seed=self.map_seed,
lmax=self.lmax,
)
else:
self.I = create_sky_map(
self.input_filename,
nside=self.nside_in,
FWHM=self.fwhm_in,
seed=self.map_seed,
lmax=self.lmax,
)
if self.fwhm_in2 is not None:
self.I2 = create_sky_map(
self.input_filename,
nside=self.nside_in,
FWHM=self.fwhm_in2,
seed=self.map_seed,
lmax=self.lmax,
)
self.nside = hp.npix2nside(len(self.I))
else:
print("External data already present in memory")
def set_leakage_to_zero(self):
"""
Remove either I, Q or U to remove possible leakages
Examples
----------
Test with no input intensity
>>> write_dummy_map('myfits_to_test_.fits')
>>> hpmap = HealpixFitsMap('myfits_to_test_.fits', no_ileak=True)
>>> print(hpmap.I)
[ 0. 0. 0. ..., 0. 0. 0.]
Test with no input polarisation
>>> write_dummy_map('myfits_to_test_.fits')
>>> hpmap = HealpixFitsMap('myfits_to_test_.fits', no_quleak=True)
>>> print(hpmap.Q, hpmap.U)
[ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.]
If you have two sets of maps, it will remove leakages from the two sets
>>> filename = 's4cmb/data/test_data_set_lensedCls.dat'
>>> hpmap = HealpixFitsMap(input_filename=filename, fwhm_in=3.5,
... fwhm_in2=1.8, nside_in=16, map_seed=489237,
... no_ileak=True, no_quleak=True)
>>> print(hpmap.I, hpmap.I2)
[ 0. 0. 0. ..., 0. 0. 0.] [ 0. 0. 0. ..., 0. 0. 0.]
"""
# Set temperature to zero to avoid I->QU leakage
if self.no_ileak:
self.I[:] = 0.0
if self.I2 is not None:
self.I2[:] = 0.0
# Set polarisation to zero to avoid QU leakage
if self.no_quleak:
if self.Q is not None:
self.Q[:] = 0.0
if self.U is not None:
self.U[:] = 0.0
if self.Q2 is not None:
self.Q2[:] = 0.0
if self.U2 is not None:
self.U2[:] = 0.0
def compute_intensity_derivatives(self, fromalm=False):
"""
Compute derivatives of the input temperature map (healpix).
Not updated for dichroic for the moment.
Parameters
----------
fromalm : bool, optional
If True, loads alm file from disk instead of fourier
transform the input map. Automatically turns True if you input
alm files. False otherwise.
Examples
----------
>>> filename = 's4cmb/data/test_data_set_lensedCls.dat'
>>> hpmap = HealpixFitsMap(input_filename=filename, fwhm_in=3.5,
... nside_in=16, compute_derivatives=True, map_seed=489237)
>>> hasattr(hpmap, 'dIdp')
True
"""
if fromalm:
alm = hp.read_alm(self.input_filename[0])
else:
alm = hp.map2alm(self.I, self.lmax)
# lmax = hp.Alm.getlmax(alm.size)
if "T1" in self.derivatives_type:
junk, self.dIdt, self.dIdp = hp.alm2map_der1(
alm, self.nside_in, self.lmax
)
else:
# computes first and second derivative as derivatives of spin-1
# transform of a scalar field with _1Elm=sqrt(l(l+1))Ilm _1Blm=0
l = np.arange(self.lmax + 1)
grad = np.sqrt(l * (l + 1))
curl = np.zeros_like(alm)
dervs = alm2map_spin_der1([hp.almxfl(alm, grad), curl], self.nside_in, 1)
self.dIdt = dervs[0][0]
self.dIdp = dervs[0][1]
self.d2Id2t = dervs[1][0]
self.d2Id2p = dervs[2][1]
self.d2Idpdt = dervs[2][0]
def compute_pol_derivatives(self, fromalm=False):
"""
Compute derivatives of the input polarization components (healpix).
Not updated for dichroic for the moment.
Parameters
----------
fromalm : bool, optional
If True, loads alm file from disk instead of fourier
transform the input map. Automatically turns True if you input
alm files. False otherwise.
Examples
----------
>>> filename = 's4cmb/data/test_data_set_lensedCls.dat'
>>> hpmap = HealpixFitsMap(input_filename=filename, fwhm_in=3.5,
... nside_in=16, compute_derivatives=True, map_seed=489237)
>>> hasattr(hpmap, 'dIdp')
True
"""
if fromalm:
Elm = hp.read_alm(self.input_filename[1])
Blm = hp.read_alm(self.input_filename[2])
else:
alm = hp.map2alm([self.I, self.Q, self.U], self.lmax)
Elm = alm[1]
Blm = alm[2]
# lmax = hp.Alm.getlmax(Elm.size)
if "P1" in self.derivatives_type:
out = alm2map_spin_der1([Elm, Blm], self.nside_in, 2)
self.dQdt = out[1][0]
self.dUdt = out[1][1]
self.dQdp = out[2][0]
self.dUdp = out[2][1]
else:
warnings.warn("""
Computation of second order polarization derivatives not
implemented yet. Set to 0.
""")
out = alm2map_spin_der1([Elm, Blm], self.nside_in, 2)
self.dQdt = out[1][0]
self.dUdt = out[1][1]
self.dQdp = out[2][0]
self.dUdp = out[2][1]
self.d2Qd2t = np.zeros_like(self.dQdt)
self.d2Qd2p = np.zeros_like(self.dQdt)
self.d2Qdpdt = np.zeros_like(self.dQdt)
self.d2Ud2t = np.zeros_like(self.dQdt)
self.d2Ud2p = np.zeros_like(self.dQdt)
self.d2Udpdt = np.zeros_like(self.dQdt)
def add_hierarch(lis):
"""
Convert in correct format for fits header.
Parameters
----------
lis: list of tuples
Contains tuples (keyword, value [, comment]).
Returns
----------
lis : list of strings
Contains strings in the pyfits header format.
Examples
----------
>>> lis = [['toto', 3, 'I am a comment']]
>>> add_hierarch(lis)
[('HIERARCH toto', 3, 'I am a comment')]
"""
for i, item in enumerate(lis):
if len(item) == 3:
lis[i] = ("HIERARCH " + item[0], item[1], item[2])
else:
lis[i] = ("HIERARCH " + item[0], item[1])
return lis
def get_obspix(xmin, xmax, ymin, ymax, nside):
"""
Given RA/Dec boundaries, return the observed pixels in the healpix scheme.
Parameters
----------
xmin : float
x coordinate of the bottom left corner in radian. (RA min)
xmax : float
x coordinate of the bottom right corner in radian. (RA max)
ymin : float
y coordinate of the top left corner in radian. (Dec min)
ymax : float
y coordinate of the top right corner in radian. (Dec max)
nside : int
Resolution of the healpix map.
Returns
----------
obspix : 1d array of int
The list of observed pixels.
Examples
----------
>>> get_obspix(-np.pi/2, np.pi/2,
... -np.pi/2, np.pi/2, nside=2) # doctest: +NORMALIZE_WHITESPACE
array([ 0, 3, 4, 5, 10, 11, 12, 13, 14,
18, 19, 20, 21, 26, 27, 28, 29,
30, 34, 35, 36, 37, 42, 43, 44])
"""
theta_min = np.pi / 2.0 - ymax
theta_max = np.pi / 2.0 - ymin
fpix, lpix = hp.ang2pix(nside, [theta_min, theta_max], [0.0, 2.0 * np.pi])
pixs = np.arange(fpix, lpix + 1, dtype=np.int)
theta, phi = hp.pix2ang(nside, pixs)
if xmin < 0:
phi[phi > np.pi] = phi[phi > np.pi] - 2 * np.pi
good = (theta >= theta_min) * (theta <= theta_max) * (phi <= xmax) * (phi >= xmin)
obspix = pixs[good]
obspix.sort()
return obspix
def LamCyl(ra, dec):
"""
Referred to as cylindrical equal-area in the USGS report, assuming
that the parallel of true scale is zero
"""
return ra, np.sin(dec)
def SFL(ra, dec):
"""SFL stands for Sanson-Flamsteed. In the USGS report this is
referred to as the Sinusoidal Projection. It is equal-area. Parallels
are equally spaced straight lines. Scale is true along central meridian
and all paralles."""
return ra * np.cos(dec), dec
def deSFL(x, y):
return x / np.cos(y), y
def deLamCyl(x, y):
return x, np.arcsin(y)
def create_sky_map(cl_fn, nside=16, FWHM=0.0, seed=548397, lmax=None):
"""
Create full sky map from input cl.
Parameters
----------
cl_fn : string
Name of the file containing cl (CAMB lensed cl format)
nside : int, optional
Resolution for the output map.
FWHM : float
The fwhm of the Gaussian used to smooth the map (applied on alm).
In arcmin.
Returns
----------
maps : ndarray
Maps of the sky (I, Q, U) of size 12 * nside**2.
Examples
----------
Create a sky map. Seed is fixed for testing purposes.
>>> np.random.seed(548397)
>>> sky_maps = create_sky_map('s4cmb/data/test_data_set_lensedCls.dat')
>>> print(sky_maps[0])
[ 55.51567033 50.94330727 39.69851524 ..., 36.2265932 107.64964085
80.8613084 ]
"""
if lmax is None:
lmax = 2 * nside
ell, TT, EE, BB, TE = np.loadtxt(cl_fn).T
# Take out the normalisation...
llp = ell * (ell + 1.0) / (2 * np.pi)
# Arcmin to rad
FWHM_rad = FWHM / 60.0 * np.pi / 180.0
np.random.seed(seed)
I, Q, U = hp.synfast(
[TT / llp, EE / llp, BB / llp, TE / llp],
nside,
lmax=lmax,
mmax=None,
alm=False,
pol=True,
pixwin=False,
fwhm=FWHM_rad,
sigma=None,
new=True,
verbose=False,
)
return I, Q, U
def write_healpix_cmbmap(
output_filename,
data,
fits_IDL=False,
coord=None,
colnames=["I", "Q", "U"],
partial=True,
nest=False,
):
"""
Write healpix fits map in full sky mode or partial sky,
Input data have to be a list with n fields to be written.
Parameters
----------
output_filename : string
Name of the output file (.fits).
data : list of 1d array(s)
Data to save on disk.
fits_IDL : bool
If True, store the data reshaped in row of 1024 (IDL style).
Default is False.
coord : string
The system of coordinates in which the data are
(G(alactic), C(elestial), and so on). Default is None.
colnames : list of strings
The name of each data vector to be saved.
partial : bool
If True, store only non-zero pixels. Default is True.
nest : bool, optional
If True, save the data in the nest scheme. Default is False (i.e.
data are saved in the RING format).
Examples
----------
>>> nside = 16
>>> I, Q, U = np.random.rand(3, hp.nside2npix(nside))
>>> colnames = ['I', 'Q', 'U']
>>> write_healpix_cmbmap('myfits_to_test_.fits',
... data=[I, Q, U], colnames=colnames)
"""
# Write the header
extra_header = []
for c in colnames:
extra_header.append(("column_names", c))
extra_header = add_hierarch(extra_header)
hp.write_map(
output_filename,
data,
fits_IDL=fits_IDL,
coord=coord,
column_names=None,
partial=partial,
extra_header=extra_header,
overwrite=True,
)
def write_dummy_map(filename="myfits_to_test_.fits", nside=16):
"""
Write dummy file on disk for test purposes.
Parameters
----------
filename : string, optional
Name of the output file (.fits)
nside : int
Resolution of the maps.
Examples
----------
>>> write_dummy_map()
"""
nside = 16
I, Q, U = np.random.rand(3, hp.nside2npix(nside))
colnames = ["I", "Q", "U"]
write_healpix_cmbmap(filename, data=[I, Q, U], colnames=colnames)
def remove_test_data(has_id="_to_test_", silent=True):
"""
Remove data with name containing the `has_id`.
Parameters
----------
has_id : string
String included in filename(s) to remove.
Examples
----------
>>> file = open('file_to_erase_.txt', 'w')
>>> file.close()
>>> remove_test_data(has_id='_to_erase_', silent=False)
Removing files: ['file_to_erase_.txt']
"""
fns = glob.glob("*" + has_id + "*")
if not silent:
print("Removing files: ", fns)
for fn in fns:
os.remove(fn)
if __name__ == "__main__":
import doctest
if np.__version__ >= "1.14.0":
np.set_printoptions(legacy="1.13")
doctest.testmod()
remove_test_data(has_id="_to_test_", silent=True)
| JulienPeloton/s4cmb | s4cmb/input_sky.py | Python | gpl-3.0 | 26,317 |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import gzip
from StringIO import StringIO
from conary_test import rephelp
from conary import errors, files, trove, versions
from conary.deps import deps
from conary.lib import sha1helper, util
from conary.repository import changeset, filecontainer, filecontents, netclient
from conary.repository import datastore
class ChangesetTest(rephelp.RepositoryHelper):
def testBadChangeset(self):
csFile = self.workDir + '/foo.ccs'
try:
changeset.ChangeSetFromFile(csFile)
except errors.ConaryError, err:
assert(str(err) == "Error opening changeset '%s': No such file or directory" % csFile)
else:
assert(0)
open(csFile, 'w').close()
os.chmod(csFile, 0000)
try:
changeset.ChangeSetFromFile(csFile)
except errors.ConaryError, err:
assert(str(err) == "Error opening changeset '%s': Permission denied" % csFile)
else:
assert(0)
os.chmod(csFile, 0666)
def testChangeSetFromFile(self):
# ensure that absolute changesets that are read from disk
# that contain config files write out changesets to a file
# that do not change the file type to a diff.
# set up a file with some contents
cont = self.workDir + '/contents'
f = open(cont, 'w')
f.write('hello, world!\n')
f.close()
pathId = sha1helper.md5FromString('0' * 32)
f = files.FileFromFilesystem(cont, pathId)
f.flags.isConfig(1)
# create an absolute changeset
cs = changeset.ChangeSet()
# add a pkg diff
v = versions.VersionFromString('/localhost@rpl:devel/1.0-1-1',
timeStamps = [1.000])
flavor = deps.parseFlavor('')
t = trove.Trove('test', v, flavor, None)
t.addFile(pathId, '/contents', v, f.fileId())
diff = t.diff(None, absolute = 1)[0]
cs.newTrove(diff)
# add the file and file contents
cs.addFile(None, f.fileId(), f.freeze())
cs.addFileContents(pathId, f.fileId(), changeset.ChangedFileTypes.file,
filecontents.FromFilesystem(cont),
f.flags.isConfig())
# write out the changeset
cs.writeToFile(self.workDir + '/foo.ccs')
# read it back in
cs2 = changeset.ChangeSetFromFile(self.workDir + '/foo.ccs')
# write it out again (there was a bug where all config files
# became diffs)
cs2.writeToFile(self.workDir + '/bar.ccs')
# read it again
cs3 = changeset.ChangeSetFromFile(self.workDir + '/bar.ccs')
# verify that the file is a file, not a diff
ctype, contents = cs3.getFileContents(pathId, f.fileId())
assert(ctype == changeset.ChangedFileTypes.file)
def testIndexByPathIdConversion(self):
def _testCs(repos, troves, idxLength, fileCount):
job = [ (x.getName(), (None, None),
(x.getVersion(), x.getFlavor() ), True) for x in troves ]
repos.createChangeSetFile(job, self.workDir + '/foo.ccs')
fc = filecontainer.FileContainer(
util.ExtendedFile(self.workDir + '/foo.ccs', "r",
buffering = False))
info = fc.getNextFile()
assert(info[0] == 'CONARYCHANGESET')
info = fc.getNextFile()
while info is not None:
assert(len(info[0]) == idxLength)
fileCount -= 1
if 'ptr' in info[1]:
s = info[2].read()
s = gzip.GzipFile(None, "r", fileobj = StringIO(s)).read()
assert(len(s) == idxLength)
info = fc.getNextFile()
assert(fileCount == 0)
f1 = rephelp.RegularFile(pathId = '1', contents = '1')
f2 = rephelp.RegularFile(pathId = '1', contents = '2')
t1 = self.addComponent('foo:runtime', fileContents = [ ( '/1', f1 ) ] )
t2 = self.addComponent('bar:runtime', fileContents = [ ( '/2', f2 ) ] )
repos = self.openRepository()
_testCs(repos, [ t1 ], 36, 1)
_testCs(repos, [ t1, t2 ], 36, 2)
repos.c['localhost'].setProtocolVersion(41)
_testCs(repos, [ t1 ], 16, 1)
self.assertRaises(changeset.PathIdsConflictError,
_testCs, repos, [ t1, t2 ], 16, 1)
# now test PTR types to make sure they get converted
self.resetRepository()
repos = self.openRepository()
f1 = rephelp.RegularFile(pathId = '1', contents = '1')
f2 = rephelp.RegularFile(pathId = '2', contents = '1')
t1 = self.addComponent('foo:runtime',
fileContents = [ ( '/1', f1 ), ( '/2', f2) ] )
_testCs(repos, [ t1 ], 36, 2)
repos.c['localhost'].setProtocolVersion(41)
_testCs(repos, [ t1 ], 16, 2)
# make sure we can install old-format changesets with PTRs
self.updatePkg([ 'foo:runtime' ])
self.verifyFile(self.rootDir + '/1', '1')
self.verifyFile(self.rootDir + '/2', '1')
def testGetNativeChangesetVersion(self):
# When adding things here, make sure you update netclient's
# FILE_CONTAINER_* constants too
self.assertEqual(changeset.getNativeChangesetVersion(37),
filecontainer.FILE_CONTAINER_VERSION_NO_REMOVES)
self.assertEqual(changeset.getNativeChangesetVersion(38),
filecontainer.FILE_CONTAINER_VERSION_WITH_REMOVES)
self.assertEqual(changeset.getNativeChangesetVersion(42),
filecontainer.FILE_CONTAINER_VERSION_WITH_REMOVES)
self.assertEqual(changeset.getNativeChangesetVersion(43),
filecontainer.FILE_CONTAINER_VERSION_FILEID_IDX)
current = netclient.CLIENT_VERSIONS[-1]
self.assertEqual(changeset.getNativeChangesetVersion(current),
filecontainer.FILE_CONTAINER_VERSION_FILEID_IDX)
def testDictAsCsf(self):
self.mock(changeset.DictAsCsf, 'maxMemSize', 256)
def testOne(s):
# test compression of large files for CNY-1896
d = changeset.DictAsCsf(
{ 'id' : ( changeset.ChangedFileTypes.file,
filecontents.FromString(s), False ) } )
f = d.getNextFile()[2]
gzf = gzip.GzipFile('', 'r', fileobj = f)
assert(gzf.read() == s)
return f
# this doesn't need to open any files
fobj = testOne('short contents')
self.assertEqual(fobj.getBackendType(), 'memory')
fobj = testOne('0123456789' * 20000)
self.assertEqual(fobj.getBackendType(), 'file')
def testChangeSetMerge(self):
os.chdir(self.workDir)
cs1 = changeset.ChangeSet()
p1 = '0' * 16; f1 = '0' * 20
cs1.addFileContents(p1, f1, changeset.ChangedFileTypes.file,
filecontents.FromString('zero'), False)
assert(cs1.writeToFile('foo.ccs') == 129)
cs2 = changeset.ReadOnlyChangeSet()
cs2.merge(cs1)
assert(cs2.writeToFile('foo.ccs') == 129)
cs2.reset()
assert(cs2.writeToFile('foo.ccs') == 129)
cs2.reset()
cs3 = changeset.ReadOnlyChangeSet()
cs3.merge(cs2)
assert(cs3.writeToFile('foo.ccs') == 129)
cs3.reset()
assert(cs3.writeToFile('foo.ccs') == 129)
def testChangeSetFilter(self):
def addFirst():
return self.addComponent('first:run')
def addSecond():
return self.addComponent('second:run')
def job(trv):
return (trv.getName(), (None, None),
trv.getNameVersionFlavor()[1:], True)
first = addFirst()
second = addSecond()
repos = self.openRepository()
cs = repos.createChangeSet([ job(first), job(second) ])
self.resetRepository()
repos = self.openRepository()
addFirst()
cs.removeCommitted(repos)
repos.commitChangeSet(cs)
cs = repos.createChangeSet([ job(first), job(second) ])
def testChangeSetDumpOffset(self):
"""Stress test offset arg to dumpIter"""
# Make a changeset with one regular file
cs = changeset.ChangeSet()
pathId = '0' * 16
fileId = '0' * 20
contents = 'contents'
store = datastore.FlatDataStore(self.workDir)
sha1 = sha1helper.sha1String(contents)
store.addFile(StringIO(contents), sha1)
rawFile = store.openRawFile(sha1)
rawSize = os.fstat(rawFile.fileno()).st_size
contObj = filecontents.CompressedFromDataStore(store, sha1)
cs.addFileContents(pathId, fileId, changeset.ChangedFileTypes.file,
contObj, cfgFile=False, compressed=True)
# Test dumping a fully populated changeset with every possible resume
# point
path = os.path.join(self.workDir, 'full.ccs')
size = cs.writeToFile(path)
expected = open(path).read()
self.assertEqual(len(expected), size)
fc = filecontainer.FileContainer(util.ExtendedFile(path,
'r', buffering=False))
def noop(name, tag, size, subfile):
assert tag[2:] != changeset.ChangedFileTypes.refr[4:]
return tag, size, subfile
for offset in range(size + 1):
fc.reset()
actual = ''.join(fc.dumpIter(noop, (), offset))
self.assertEqual(actual, expected[offset:])
# Test dumping a changeset with contents stripped out
path = os.path.join(self.workDir, 'stubby.ccs')
size2 = cs.writeToFile(path, withReferences=True)
self.assertEqual(size2, size)
fc = filecontainer.FileContainer(util.ExtendedFile(path,
'r', buffering=False))
expect_reference = '%s %d' % (sha1.encode('hex'), rawSize)
def addfile(name, tag, size, subfile, dummy):
self.assertEqual(dummy, 'dummy')
if name == 'CONARYCHANGESET':
return tag, size, subfile
elif name == pathId + fileId:
self.assertEqual(tag[2:], changeset.ChangedFileTypes.refr[4:])
self.assertEqual(subfile.read(), expect_reference)
tag = tag[0:2] + changeset.ChangedFileTypes.file[4:]
rawFile.seek(0)
return tag, rawSize, rawFile
else:
assert False
for offset in range(size + 1):
fc.reset()
actual = ''.join(fc.dumpIter(addfile, ('dummy',), offset))
self.assertEqual(actual, expected[offset:])
| sassoftware/conary | conary_test/repositorytest/changesettest.py | Python | apache-2.0 | 11,409 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import re
import webapp2
import jinja2
import logging
import StringIO
from markupsafe import Markup, escape # https://pypi.python.org/pypi/MarkupSafe
import parsers
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.api import users
from google.appengine.ext.webapp import blobstore_handlers
from api import inLayer, read_file, full_path, read_schemas, read_extensions, read_examples, namespaces, DataCache
from api import Unit, GetTargets, GetSources
from api import GetComment, all_terms, GetAllTypes, GetAllProperties, GetAllEnumerationValues
from api import GetParentList, GetImmediateSubtypes, HasMultipleBaseTypes
from api import GetJsonLdContext, ShortenOnSentence, StripHtmlTags
logging.basicConfig(level=logging.INFO) # dev_appserver.py --log_level debug .
log = logging.getLogger(__name__)
SCHEMA_VERSION=2.2
FEEDBACK_FORM_BASE_URL='https://docs.google.com/a/google.com/forms/d/1krxHlWJAO3JgvHRZV9Rugkr9VYnMdrI10xbGsWt733c/viewform?entry.1174568178&entry.41124795={0}&entry.882602760={1}'
# {0}: term URL, {1} category of term.
sitemode = "mainsite" # whitespaced list for CSS tags,
# e.g. "mainsite testsite" when off expected domains
# "extensionsite" when in an extension (e.g. blue?)
releaselog = { "2.0": "2015-05-13", "2.1": "2015-08-06" }
#
silent_skip_list = [ "favicon.ico" ] # Do nothing for now
all_layers = {}
ext_re = re.compile(r'([^\w,])+')
PageCache = {}
#TODO: Modes:
# mainsite
# webschemadev
# known extension (not skiplist'd, eg. demo1 on schema.org)
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
extensions=['jinja2.ext.autoescape'], autoescape=True, cache_size=0)
ENABLE_JSONLD_CONTEXT = True
ENABLE_CORS = True
ENABLE_HOSTED_EXTENSIONS = True
INTESTHARNESS = False #Used to indicate we are being called from tests - use setInTestHarness() & getInTestHarness() to manage value
EXTENSION_SUFFIX = "" # e.g. "*"
#ENABLED_EXTENSIONS = [ 'admin', 'auto', 'bib' ]
ENABLED_EXTENSIONS = [ 'auto', 'bib' ]
ALL_LAYERS = [ 'core', 'auto', 'bib' ]
FORCEDEBUGGING = False
# FORCEDEBUGGING = True
def cleanPath(node):
"""Return the substring of a string matching chars approved for use in our URL paths."""
return re.sub(r'[^a-zA-Z0-9\-/,\.]', '', str(node), flags=re.DOTALL)
class HTMLOutput:
"""Used in place of http response when we're collecting HTML to pass to template engine."""
def __init__(self):
self.outputStrings = []
def write(self, str):
self.outputStrings.append(str)
def toHTML(self):
return Markup ( "".join(self.outputStrings) )
def __str__(self):
return self.toHTML()
# Core API: we have a single schema graph built from triples and units.
# now in api.py
class TypeHierarchyTree:
def __init__(self, prefix=""):
self.txt = ""
self.visited = {}
self.prefix = prefix
def emit(self, s):
self.txt += s + "\n"
def emit2buff(self, buff, s):
buff.write(s + "\n")
def toHTML(self):
return '%s<ul>%s</ul>' % (self.prefix, self.txt)
def toJSON(self):
return self.txt
def traverseForHTML(self, node, depth = 1, hashorslash="/", layers='core', buff=None):
"""Generate a hierarchical tree view of the types. hashorslash is used for relative link prefixing."""
log.debug("traverseForHTML: node=%s hashorslash=%s" % ( node.id, hashorslash ))
localBuff = False
if buff == None:
localBuff = True
buff = StringIO.StringIO()
urlprefix = ""
home = node.getHomeLayer()
gotOutput = False
if home in layers:
gotOutput = True
if home in ENABLED_EXTENSIONS and home != getHostExt():
urlprefix = makeUrl(home)
extclass = ""
extflag = ""
tooltip=""
if home != "core" and home != "":
extclass = "class=\"ext ext-%s\"" % home
extflag = EXTENSION_SUFFIX
tooltip = "title=\"Extended schema: %s.schema.org\" " % home
# we are a supertype of some kind
subTypes = node.GetImmediateSubtypes(layers=ALL_LAYERS)
if len(subTypes) > 0:
# and we haven't been here before
if node.id not in self.visited:
self.visited[node.id] = True # remember our visit
self.emit2buff(buff, ' %s<li class="tbranch" id="%s"><a %s %s href="%s%s%s">%s</a>%s' % (" " * 4 * depth, node.id, tooltip, extclass, urlprefix, hashorslash, node.id, node.id, extflag) )
self.emit2buff(buff, ' %s<ul>' % (" " * 4 * depth))
# handle our subtypes
for item in subTypes:
subBuff = StringIO.StringIO()
got = self.traverseForHTML(item, depth + 1, hashorslash=hashorslash, layers=layers, buff=subBuff)
if got:
gotOutput = True
self.emit2buff(buff,subBuff.getvalue())
subBuff.close()
self.emit2buff(buff, ' %s</ul>' % (" " * 4 * depth))
else:
# we are a supertype but we visited this type before, e.g. saw Restaurant via Place then via Organization
seen = ' <a href="#%s">+</a> ' % node.id
self.emit2buff(buff, ' %s<li class="tbranch" id="%s"><a %s %s href="%s%s%s">%s</a>%s%s' % (" " * 4 * depth, node.id, tooltip, extclass, urlprefix, hashorslash, node.id, node.id, extflag, seen) )
# leaf nodes
if len(subTypes) == 0:
if node.id not in self.visited:
self.emit2buff(buff, '%s<li class="tleaf" id="%s"><a %s %s href="%s%s%s">%s</a>%s%s' % (" " * depth, node.id, tooltip, extclass, urlprefix, hashorslash, node.id, node.id, extflag, "" ))
#else:
#self.visited[node.id] = True # never...
# we tolerate "VideoGame" appearing under both Game and SoftwareApplication
# and would only suppress it if it had its own subtypes. Seems legit.
self.emit2buff(buff, ' %s</li>' % (" " * 4 * depth) )
if localBuff:
self.emit(buff.getvalue())
buff.close()
return gotOutput
# based on http://danbri.org/2013/SchemaD3/examples/4063550/hackathon-schema.js - thanks @gregg, @sandro
def traverseForJSONLD(self, node, depth = 0, last_at_this_level = True, supertype="None", layers='core'):
emit_debug = False
if node.id in self.visited:
# self.emit("skipping %s - already visited" % node.id)
return
self.visited[node.id] = True
p1 = " " * 4 * depth
if emit_debug:
self.emit("%s# @id: %s last_at_this_level: %s" % (p1, node.id, last_at_this_level))
global namespaces;
ctx = "{}".format(""""@context": {
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"schema": "http://schema.org/",
"rdfs:subClassOf": { "@type": "@id" },
"name": "rdfs:label",
"description": "rdfs:comment",
"children": { "@reverse": "rdfs:subClassOf" }
},\n""" if last_at_this_level and depth==0 else '' )
unseen_subtypes = []
for st in node.GetImmediateSubtypes(layers=layers):
if not st.id in self.visited:
unseen_subtypes.append(st)
unvisited_subtype_count = len(unseen_subtypes)
subtype_count = len( node.GetImmediateSubtypes(layers=layers) )
supertx = "{}".format( '"rdfs:subClassOf": "schema:%s", ' % supertype.id if supertype != "None" else '' )
maybe_comma = "{}".format("," if unvisited_subtype_count > 0 else "")
comment = GetComment(node, layers).strip()
comment = comment.replace('"',"'")
comment = ShortenOnSentence(StripHtmlTags(comment),60)
self.emit('\n%s{\n%s\n%s"@type": "rdfs:Class", %s "description": "%s",\n%s"name": "%s",\n%s"@id": "schema:%s"%s'
% (p1, ctx, p1, supertx, comment, p1, node.id, p1, node.id, maybe_comma))
i = 1
if unvisited_subtype_count > 0:
self.emit('%s"children": ' % p1 )
self.emit(" %s[" % p1 )
inner_lastness = False
for t in unseen_subtypes:
if emit_debug:
self.emit("%s # In %s > %s i: %s unvisited_subtype_count: %s" %(p1, node.id, t.id, i, unvisited_subtype_count))
if i == unvisited_subtype_count:
inner_lastness = True
i = i + 1
self.traverseForJSONLD(t, depth + 1, inner_lastness, supertype=node, layers=layers)
self.emit("%s ]%s" % (p1, "{}".format( "" if not last_at_this_level else '' ) ) )
maybe_comma = "{}".format( ',' if not last_at_this_level else '' )
self.emit('\n%s}%s\n' % (p1, maybe_comma))
def GetExamples(node, layers='core'):
"""Returns the examples (if any) for some Unit node."""
return node.examples
def GetExtMappingsRDFa(node, layers='core'):
"""Self-contained chunk of RDFa HTML markup with mappings for this term."""
if (node.isClass()):
equivs = GetTargets(Unit.GetUnit("owl:equivalentClass"), node, layers=layers)
if len(equivs) > 0:
markup = ''
for c in equivs:
if (c.id.startswith('http')):
markup = markup + "<link property=\"owl:equivalentClass\" href=\"%s\"/>\n" % c.id
else:
markup = markup + "<link property=\"owl:equivalentClass\" resource=\"%s\"/>\n" % c.id
return markup
if (node.isAttribute()):
equivs = GetTargets(Unit.GetUnit("owl:equivalentProperty"), node, layers)
if len(equivs) > 0:
markup = ''
for c in equivs:
markup = markup + "<link property=\"owl:equivalentProperty\" href=\"%s\"/>\n" % c.id
return markup
return "<!-- no external mappings noted for this term. -->"
class ShowUnit (webapp2.RequestHandler):
"""ShowUnit exposes schema.org terms via Web RequestHandler
(HTML/HTTP etc.).
"""
# def __init__(self):
# self.outputStrings = []
def emitCacheHeaders(self):
"""Send cache-related headers via HTTP."""
self.response.headers['Cache-Control'] = "public, max-age=43200" # 12h
self.response.headers['Vary'] = "Accept, Accept-Encoding"
def GetCachedText(self, node, layers='core'):
"""Return page text from node.id cache (if found, otherwise None)."""
global PageCache
cachekey = "%s:%s" % ( layers, node.id ) # was node.id
if (cachekey in PageCache):
return PageCache[cachekey]
else:
return None
def AddCachedText(self, node, textStrings, layers='core'):
"""Cache text of our page for this node via its node.id.
We can be passed a text string or an array of text strings.
"""
global PageCache
cachekey = "%s:%s" % ( layers, node.id ) # was node.id
outputText = "".join(textStrings)
log.debug("CACHING: %s" % node.id)
PageCache[cachekey] = outputText
return outputText
def write(self, str):
"""Write some text to Web server's output stream."""
self.outputStrings.append(str)
def moreInfoBlock(self, node, layer='core'):
# if we think we have more info on this term, show a bulleted list of extra items.
# defaults
bugs = ["No known open issues."]
mappings = ["No recorded schema mappings."]
items = bugs + mappings
nodetype="Misc"
if node.isEnumeration():
nodetype = "enumeration"
elif node.isDataType(layers=layer):
nodetype = "datatype"
elif node.isClass(layers=layer):
nodetype = "type"
elif node.isAttribute(layers=layer):
nodetype = "property"
elif node.isEnumerationValue(layers=layer):
nodetype = "enumeratedvalue"
feedback_url = FEEDBACK_FORM_BASE_URL.format("http://schema.org/{0}".format(node.id), nodetype)
items = [
"<a href='{0}'>Leave public feedback on this term 💬</a>".format(feedback_url),
"<a href='https://github.com/schemaorg/schemaorg/issues?q=is%3Aissue+is%3Aopen+{0}'>Check for open issues.</a>".format(node.id)
]
for l in all_terms[node.id]:
l = l.replace("#","")
if l == "core":
ext = ""
else:
ext = "extension "
if ENABLE_HOSTED_EXTENSIONS:
items.append("'{0}' is mentioned in {1}layer: <a href='{2}'>{3}</a>".format( node.id, ext, makeUrl(l,node.id), l ))
moreinfo = """<div>
<div id='infobox' style='text-align: right;'><label role="checkbox" for=morecheck><b><span style="cursor: pointer;">[more...]</span></b></label></div>
<input type='checkbox' checked="checked" style='display: none' id=morecheck><div id='infomsg' style='background-color: #EEEEEE; text-align: left; padding: 0.5em;'>
<ul>"""
for i in items:
moreinfo += "<li>%s</li>" % i
# <li>mappings to other terms.</li>
# <li>or links to open issues.</li>
moreinfo += "</ul>\n</div>\n</div>\n"
return moreinfo
def GetParentStack(self, node, layers='core'):
"""Returns a hiearchical structured used for site breadcrumbs."""
thing = Unit.GetUnit("Thing")
if (node not in self.parentStack):
self.parentStack.append(node)
if (Unit.isAttribute(node, layers=layers)):
self.parentStack.append(Unit.GetUnit("Property"))
self.parentStack.append(thing)
sc = Unit.GetUnit("rdfs:subClassOf")
if GetTargets(sc, node, layers=layers):
for p in GetTargets(sc, node, layers=layers):
self.GetParentStack(p, layers=layers)
else:
# Enumerations are classes that have no declared subclasses
sc = Unit.GetUnit("typeOf")
for p in GetTargets(sc, node, layers=layers):
self.GetParentStack(p, layers=layers)
#Put 'Thing' to the end for multiple inheritance classes
if(thing in self.parentStack):
self.parentStack.remove(thing)
self.parentStack.append(thing)
def ml(self, node, label='', title='', prop='', hashorslash='/'):
"""ml ('make link')
Returns an HTML-formatted link to the class or property URL
* label = optional anchor text label for the link
* title = optional title attribute on the link
* prop = an optional property value to apply to the A element
"""
if(node.id == "DataType"): #Special case
return "<a href=\"%s\">%s</a>" % (node.id, node.id)
if label=='':
label = node.id
if title != '':
title = " title=\"%s\"" % (title)
if prop:
prop = " property=\"%s\"" % (prop)
urlprefix = ""
home = node.getHomeLayer()
if home in ENABLED_EXTENSIONS and home != getHostExt():
port = ""
if getHostPort() != "80":
port = ":%s" % getHostPort()
urlprefix = makeUrl(home)
extclass = ""
extflag = ""
tooltip = ""
if home != "core" and home != "":
extclass = "class=\"ext ext-%s\" " % home
extflag = EXTENSION_SUFFIX
tooltip = "title=\"Extended schema: %s.schema.org\" " % home
return "<a %s %s href=\"%s%s%s\"%s%s>%s</a>%s" % (tooltip, extclass, urlprefix, hashorslash, node.id, prop, title, label, extflag)
def makeLinksFromArray(self, nodearray, tooltip=''):
"""Make a comma separate list of links via ml() function.
* tooltip - optional text to use as title of all links
"""
hyperlinks = []
for f in nodearray:
hyperlinks.append(self.ml(f, f.id, tooltip))
return (", ".join(hyperlinks))
def emitUnitHeaders(self, node, layers='core'):
"""Write out the HTML page headers for this node."""
self.write("<h1 class=\"page-title\">\n")
self.write(node.id)
self.write("</h1>")
home = node.home
if home != "core" and home != "":
self.write("Defined in the %s.schema.org extension." % home)
self.write(" (This is an initial exploratory release.)<br/>")
self.emitCanonicalURL(node)
self.BreadCrumbs(node, layers=layers)
comment = GetComment(node, layers)
self.write(" <div property=\"rdfs:comment\">%s</div>\n\n" % (comment) + "\n")
self.write(" <br/><div>Usage: %s</div>\n\n" % (node.UsageStr()) + "\n")
#was: self.write(self.moreInfoBlock(node))
if (node.isClass(layers=layers) and not node.isDataType(layers=layers) and node.id != "DataType"):
self.write("<table class=\"definition-table\">\n <thead>\n <tr><th>Property</th><th>Expected Type</th><th>Description</th> \n </tr>\n </thead>\n\n")
def emitCanonicalURL(self,node):
cURL = "http://schema.org/" + node.id
self.write(" <span class=\"canonicalUrl\">Canonical URL: <a href=\"%s\">%s</a></span>" % (cURL, cURL))
# Stacks to support multiple inheritance
crumbStacks = []
def BreadCrumbs(self, node, layers):
self.crumbStacks = []
cstack = []
self.crumbStacks.append(cstack)
self.WalkCrumbs(node,cstack,layers=layers)
if (node.isAttribute(layers=layers)):
cstack.append(Unit.GetUnit("Property"))
cstack.append(Unit.GetUnit("Thing"))
enuma = node.isEnumerationValue(layers=layers)
crumbsout = []
for row in range(len(self.crumbStacks)):
thisrow = ""
if(":" in self.crumbStacks[row][len(self.crumbStacks[row])-1].id):
continue
count = 0
while(len(self.crumbStacks[row]) > 0):
n = self.crumbStacks[row].pop()
if(count > 0):
if((len(self.crumbStacks[row]) == 0) and enuma):
thisrow += " :: "
else:
thisrow += " > "
elif n.id == "Class": # If Class is first breadcrum suppress it
continue
count += 1
thisrow += "%s" % (self.ml(n))
crumbsout.append(thisrow)
self.write("<h4>")
rowcount = 0
for crumb in sorted(crumbsout):
if rowcount > 0:
self.write("<br/>")
self.write("<span class='breadcrumbs'>%s</span>\n" % crumb)
rowcount += 1
self.write("</h4>\n")
#Walk up the stack, appending crumbs & create new (duplicating crumbs already identified) if more than one parent found
def WalkCrumbs(self, node, cstack, layers):
if "http://" in node.id or "https://" in node.id: #Suppress external class references
return
cstack.append(node)
tmpStacks = []
tmpStacks.append(cstack)
subs = []
if(node.isDataType(layers=layers)):
subs = GetTargets(Unit.GetUnit("typeOf"), node, layers=layers)
subs += GetTargets(Unit.GetUnit("rdfs:subClassOf"), node, layers=layers)
elif node.isClass(layers=layers):
subs = GetTargets(Unit.GetUnit("rdfs:subClassOf"), node, layers=layers)
elif(node.isAttribute(layers=layers)):
subs = GetTargets(Unit.GetUnit("rdfs:subPropertyOf"), node, layers=layers)
else:
subs = GetTargets(Unit.GetUnit("typeOf"), node, layers=layers)# Enumerations are classes that have no declared subclasses
for i in range(len(subs)):
if(i > 0):
t = cstack[:]
tmpStacks.append(t)
self.crumbStacks.append(t)
x = 0
for p in subs:
self.WalkCrumbs(p,tmpStacks[x],layers=layers)
x += 1
def emitSimplePropertiesPerType(self, cl, layers="core", out=None, hashorslash="/"):
"""Emits a simple list of properties applicable to the specified type."""
if not out:
out = self
out.write("<ul class='props4type'>")
for prop in sorted(GetSources( Unit.GetUnit("domainIncludes"), cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, prop.id, prop.id ))
out.write("</ul>\n\n")
def emitSimplePropertiesIntoType(self, cl, layers="core", out=None, hashorslash="/"):
"""Emits a simple list of properties whose values are the specified type."""
if not out:
out = self
out.write("<ul class='props2type'>")
for prop in sorted(GetSources( Unit.GetUnit("rangeIncludes"), cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, prop.id, prop.id ))
out.write("</ul>\n\n")
def ClassProperties (self, cl, subclass=False, layers="core", out=None, hashorslash="/"):
"""Write out a table of properties for a per-type page."""
if not out:
out = self
propcount = 0
headerPrinted = False
di = Unit.GetUnit("domainIncludes")
ri = Unit.GetUnit("rangeIncludes")
for prop in sorted(GetSources(di, cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
supersedes = prop.supersedes(layers=layers)
olderprops = prop.supersedes_all(layers=layers)
inverseprop = prop.inverseproperty(layers=layers)
subprops = prop.subproperties(layers=layers)
superprops = prop.superproperties(layers=layers)
ranges = GetTargets(ri, prop, layers=layers)
comment = GetComment(prop, layers=layers)
if (not headerPrinted):
class_head = self.ml(cl)
if subclass:
class_head = self.ml(cl, prop="rdfs:subClassOf")
out.write("<tr class=\"supertype\">\n <th class=\"supertype-name\" colspan=\"3\">Properties from %s</th>\n \n</tr>\n\n<tbody class=\"supertype\">\n " % (class_head))
headerPrinted = True
out.write("<tr typeof=\"rdfs:Property\" resource=\"http://schema.org/%s\">\n \n <th class=\"prop-nam\" scope=\"row\">\n\n<code property=\"rdfs:label\">%s</code>\n </th>\n " % (prop.id, self.ml(prop)))
out.write("<td class=\"prop-ect\">\n")
first_range = True
for r in ranges:
if (not first_range):
out.write(" or <br/> ")
first_range = False
out.write(self.ml(r, prop='rangeIncludes'))
out.write(" ")
out.write("</td>")
out.write("<td class=\"prop-desc\" property=\"rdfs:comment\">%s" % (comment))
if (len(olderprops) > 0):
olderlinks = ", ".join([self.ml(o) for o in olderprops])
out.write(" Supersedes %s." % olderlinks )
if (inverseprop != None):
out.write("<br/> Inverse property: %s." % (self.ml(inverseprop)))
out.write("</td></tr>")
subclass = False
propcount += 1
if subclass: # in case the superclass has no defined attributes
out.write("<tr><td colspan=\"3\"><meta property=\"rdfs:subClassOf\" content=\"%s\"></td></tr>" % (cl.id))
return propcount
def emitClassExtensionSuperclasses (self, cl, layers="core", out=None):
first = True
count = 0
if not out:
out = self
buff = StringIO.StringIO()
sc = Unit.GetUnit("rdfs:subClassOf")
for p in GetTargets(sc, cl, ALL_LAYERS):
if inLayer(layers,p):
continue
if p.id == "http://www.w3.org/2000/01/rdf-schema#Class": #Special case for "DataType"
p.id = "Class"
sep = ", "
if first:
sep = "<li>"
first = False
buff.write("%s%s" % (sep,self.ml(p)))
count += 1
if(count > 0):
buff.write("</li>\n")
content = buff.getvalue()
if(len(content) > 0):
if cl.id == "DataType":
self.write("<h4>Subclass of:<h4>")
else:
self.write("<h4>Available supertypes defined in extensions</h4>")
self.write("<ul>")
self.write(content)
self.write("</ul>")
buff.close()
def emitClassExtensionProperties (self, cl, layers="core", out=None):
if not out:
out = self
buff = StringIO.StringIO()
for p in self.parentStack:
self._ClassExtensionProperties(buff, p, layers=layers)
content = buff.getvalue()
if(len(content) > 0):
self.write("<h4>Available properties in extensions</h4>")
self.write("<ul>")
self.write(content)
self.write("</ul>")
buff.close()
def _ClassExtensionProperties (self, out, cl, layers="core"):
"""Write out a list of properties not displayed as they are in extensions for a per-type page."""
di = Unit.GetUnit("domainIncludes")
first = True
count = 0
for prop in sorted(GetSources(di, cl, ALL_LAYERS), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
if inLayer(layers,prop):
continue
log.debug("ClassExtensionfFound %s " % (prop))
sep = ", "
if first:
out.write("<li>From %s: " % cl)
sep = ""
first = False
out.write("%s%s" % (sep,self.ml(prop)))
count += 1
if(count > 0):
out.write("</li>\n")
def emitClassIncomingProperties (self, cl, layers="core", out=None, hashorslash="/"):
"""Write out a table of incoming properties for a per-type page."""
if not out:
out = self
headerPrinted = False
di = Unit.GetUnit("domainIncludes")
ri = Unit.GetUnit("rangeIncludes")
for prop in sorted(GetSources(ri, cl, layers=layers), key=lambda u: u.id):
if (prop.superseded(layers=layers)):
continue
supersedes = prop.supersedes(layers=layers)
inverseprop = prop.inverseproperty(layers=layers)
subprops = prop.subproperties(layers=layers)
superprops = prop.superproperties(layers=layers)
ranges = GetTargets(di, prop, layers=layers)
comment = GetComment(prop, layers=layers)
if (not headerPrinted):
self.write("<br/><br/>Instances of %s may appear as values for the following properties<br/>" % (self.ml(cl)))
self.write("<table class=\"definition-table\">\n \n \n<thead>\n <tr><th>Property</th><th>On Types</th><th>Description</th> \n </tr>\n</thead>\n\n")
headerPrinted = True
self.write("<tr>\n<th class=\"prop-nam\" scope=\"row\">\n <code>%s</code>\n</th>\n " % (self.ml(prop)) + "\n")
self.write("<td class=\"prop-ect\">\n")
first_range = True
for r in ranges:
if (not first_range):
self.write(" or<br/> ")
first_range = False
self.write(self.ml(r))
self.write(" ")
self.write("</td>")
self.write("<td class=\"prop-desc\">%s " % (comment))
if (supersedes != None):
self.write(" Supersedes %s." % (self.ml(supersedes)))
if (inverseprop != None):
self.write("<br/> inverse property: %s." % (self.ml(inverseprop)) )
self.write("</td></tr>")
if (headerPrinted):
self.write("</table>\n")
def emitRangeTypesForProperty(self, node, layers="core", out=None, hashorslash="/"):
"""Write out simple HTML summary of this property's expected types."""
if not out:
out = self
out.write("<ul class='attrrangesummary'>")
for rt in sorted(GetTargets(Unit.GetUnit("rangeIncludes"), node, layers=layers), key=lambda u: u.id):
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, rt.id, rt.id ))
out.write("</ul>\n\n")
def emitDomainTypesForProperty(self, node, layers="core", out=None, hashorslash="/"):
"""Write out simple HTML summary of types that expect this property."""
if not out:
out = self
out.write("<ul class='attrdomainsummary'>")
for dt in sorted(GetTargets(Unit.GetUnit("domainIncludes"), node, layers=layers), key=lambda u: u.id):
out.write("<li><a href='%s%s'>%s</a></li>" % ( hashorslash, dt.id, dt.id ))
out.write("</ul>\n\n")
def emitAttributeProperties(self, node, layers="core", out=None, hashorslash="/"):
"""Write out properties of this property, for a per-property page."""
if not out:
out = self
di = Unit.GetUnit("domainIncludes")
ri = Unit.GetUnit("rangeIncludes")
ranges = sorted(GetTargets(ri, node, layers=layers), key=lambda u: u.id)
domains = sorted(GetTargets(di, node, layers=layers), key=lambda u: u.id)
first_range = True
newerprop = node.supersededBy(layers=layers) # None of one. e.g. we're on 'seller'(new) page, we get 'vendor'(old)
olderprop = node.supersedes(layers=layers) # None or one
olderprops = node.supersedes_all(layers=layers) # list, e.g. 'seller' has 'vendor', 'merchant'.
inverseprop = node.inverseproperty(layers=layers)
subprops = node.subproperties(layers=layers)
superprops = node.superproperties(layers=layers)
if (inverseprop != None):
tt = "This means the same thing, but with the relationship direction reversed."
out.write("<p>Inverse-property: %s.</p>" % (self.ml(inverseprop, inverseprop.id,tt, prop=False, hashorslash=hashorslash)) )
out.write("<table class=\"definition-table\">\n")
out.write("<thead>\n <tr>\n <th>Values expected to be one of these types</th>\n </tr>\n</thead>\n\n <tr>\n <td>\n ")
for r in ranges:
if (not first_range):
out.write("<br/>")
first_range = False
tt = "The '%s' property has values that include instances of the '%s' type." % (node.id, r.id)
out.write(" <code>%s</code> " % (self.ml(r, r.id, tt, prop="rangeIncludes", hashorslash=hashorslash) +"\n"))
out.write(" </td>\n </tr>\n</table>\n\n")
first_domain = True
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Used on these types</th>\n </tr>\n</thead>\n<tr>\n <td>")
for d in domains:
if (not first_domain):
out.write("<br/>")
first_domain = False
tt = "The '%s' property is used on the '%s' type." % (node.id, d.id)
out.write("\n <code>%s</code> " % (self.ml(d, d.id, tt, prop="domainIncludes",hashorslash=hashorslash)+"\n" ))
out.write(" </td>\n </tr>\n</table>\n\n")
if (subprops != None and len(subprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Sub-properties</th>\n </tr>\n</thead>\n")
for sbp in subprops:
c = GetComment(sbp,layers=layers)
tt = "%s: ''%s''" % ( sbp.id, c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(sbp, sbp.id, tt, hashorslash=hashorslash)))
out.write("\n</table>\n\n")
# Super-properties
if (superprops != None and len(superprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Super-properties</th>\n </tr>\n</thead>\n")
for spp in superprops:
c = GetComment(spp, layers=layers) # markup needs to be stripped from c, e.g. see 'logo', 'photo'
c = re.sub(r'<[^>]*>', '', c) # This is not a sanitizer, we trust our input.
tt = "%s: ''%s''" % ( spp.id, c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(spp, spp.id, tt,hashorslash)))
out.write("\n</table>\n\n")
# Supersedes
if (olderprops != None and len(olderprops) > 0):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th>Supersedes</th>\n </tr>\n</thead>\n")
for o in olderprops:
c = GetComment(o, layers=layers)
tt = "%s: ''%s''" % ( o.id, c)
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(o, o.id, tt, hashorslash)))
out.write("\n</table>\n\n")
# supersededBy (at most one direct successor)
if (newerprop != None):
out.write("<table class=\"definition-table\">\n")
out.write(" <thead>\n <tr>\n <th><a href=\"/supersededBy\">supersededBy</a></th>\n </tr>\n</thead>\n")
tt="supersededBy: %s" % newerprop.id
out.write("\n <tr><td><code>%s</code></td></tr>\n" % (self.ml(newerprop, newerprop.id, tt,hashorslash)))
out.write("\n</table>\n\n")
def rep(self, markup):
"""Replace < and > with HTML escape chars."""
m1 = re.sub("<", "<", markup)
m2 = re.sub(">", ">", m1)
# TODO: Ampersand? Check usage with examples.
return m2
def handleHomepage(self, node):
"""Send the homepage, or if no HTML accept header received and JSON-LD was requested, send JSON-LD context file.
typical browser accept list: ('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
# e.g. curl -H "Accept: application/ld+json" http://localhost:8080/
see also http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
https://github.com/rvguha/schemaorg/issues/5
https://github.com/rvguha/schemaorg/wiki/JsonLd
"""
accept_header = self.request.headers.get('Accept').split(',')
logging.info("accepts: %s" % self.request.headers.get('Accept'))
if ENABLE_JSONLD_CONTEXT:
jsonldcontext = GetJsonLdContext(layers=ALL_LAYERS)
# Homepage is content-negotiated. HTML or JSON-LD.
mimereq = {}
for ah in accept_header:
ah = re.sub( r";q=\d?\.\d+", '', ah).rstrip()
mimereq[ah] = 1
html_score = mimereq.get('text/html', 5)
xhtml_score = mimereq.get('application/xhtml+xml', 5)
jsonld_score = mimereq.get('application/ld+json', 10)
# print "accept_header: " + str(accept_header) + " mimereq: "+str(mimereq) + "Scores H:{0} XH:{1} J:{2} ".format(html_score,xhtml_score,jsonld_score)
if (ENABLE_JSONLD_CONTEXT and (jsonld_score < html_score and jsonld_score < xhtml_score)):
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
self.response.out.write( jsonldcontext )
return True
else:
# Serve a homepage from template
# the .tpl has responsibility for extension homepages
# TODO: pass in extension, base_domain etc.
sitekeyedhomepage = "homepage %s" % getSiteName()
hp = DataCache.get(sitekeyedhomepage)
if hp != None:
self.response.out.write( hp )
#log.info("Served datacache homepage.tpl key: %s" % sitekeyedhomepage)
log.debug("Served datacache homepage.tpl key: %s" % sitekeyedhomepage)
else:
template = JINJA_ENVIRONMENT.get_template('homepage.tpl')
template_values = {
'ENABLE_HOSTED_EXTENSIONS': ENABLE_HOSTED_EXTENSIONS,
'SCHEMA_VERSION': SCHEMA_VERSION,
'sitename': getSiteName(),
'staticPath': makeUrl("",""),
'myhost': getHost(),
'myport': getHostPort(),
'mybasehost': getBaseHost(),
'host_ext': getHostExt(),
'ext_contents': self.handleExtensionContents(getHostExt()),
'home_page': "True",
'debugging': getAppVar('debugging')
}
# We don't want JINJA2 doing any cachine of included sub-templates.
page = template.render(template_values)
self.response.out.write( page )
log.debug("Served and cached fresh homepage.tpl key: %s " % sitekeyedhomepage)
#log.info("Served and cached fresh homepage.tpl key: %s " % sitekeyedhomepage)
DataCache.put(sitekeyedhomepage, page)
# self.response.out.write( open("static/index.html", 'r').read() )
return True
log.info("Warning: got here how?")
return False
def getExtendedSiteName(self, layers):
"""Returns site name (domain name), informed by the list of active layers."""
if layers==["core"]:
return "schema.org"
if len(layers)==0:
return "schema.org"
return (getHostExt() + ".schema.org")
def emitSchemaorgHeaders(self, node, is_class=False, ext_mappings='', sitemode="default", sitename="schema.org", layers="core"):
"""
Generates, caches and emits HTML headers for class, property and enumeration pages. Leaves <body> open.
* entry = name of the class or property
"""
anode = True
if isinstance(node, str):
entry = node
anode = False
else:
entry = node.id
rdfs_type = 'rdfs:Property'
if is_class:
rdfs_type = 'rdfs:Class'
generated_page_id = "genericTermPageHeader-%s-%s" % ( str(entry), getSiteName() )
gtp = DataCache.get( generated_page_id )
if gtp != None:
self.response.out.write( gtp )
log.debug("Served recycled genericTermPageHeader.tpl for %s" % generated_page_id )
else:
desc = entry
if anode:
desc = self.getMetaDescription(node, layers=layers, lengthHint=200)
template = JINJA_ENVIRONMENT.get_template('genericTermPageHeader.tpl')
template_values = {
'entry': str(entry),
'desc' : desc,
'sitemode': sitemode,
'sitename': getSiteName(),
'staticPath': makeUrl("",""),
'menu_sel': "Schemas",
'rdfs_type': rdfs_type,
'ext_mappings': ext_mappings
}
out = template.render(template_values)
DataCache.put(generated_page_id,out)
log.debug("Served and cached fresh genericTermPageHeader.tpl for %s" % generated_page_id )
self.response.write(out)
def getMetaDescription(self, node, layers="core",lengthHint=250):
ins = ""
if node.isEnumeration():
ins += " Enumeration Type"
elif node.isClass():
ins += " Type"
elif node.isAttribute():
ins += " Property"
elif node.isEnumerationValue():
ins += " Enumeration Value"
desc = "Schema.org%s: %s - " % (ins, node.id)
lengthHint -= len(desc)
comment = GetComment(node, layers)
desc += ShortenOnSentence(StripHtmlTags(comment),lengthHint)
return desc
def emitExactTermPage(self, node, layers="core"):
"""Emit a Web page that exactly matches this node."""
log.debug("EXACT PAGE: %s" % node.id)
self.outputStrings = [] # blank slate
ext_mappings = GetExtMappingsRDFa(node, layers=layers)
global sitemode #,sitename
if ("schema.org" not in self.request.host and sitemode == "mainsite"):
sitemode = "mainsite testsite"
self.emitSchemaorgHeaders(node, node.isClass(), ext_mappings, sitemode, getSiteName(), layers)
if ( ENABLE_HOSTED_EXTENSIONS and ("core" not in layers or len(layers)>1) ):
ll = " ".join(layers).replace("core","")
target=""
if inLayer("core", node):
target = node.id
s = "<p id='lli' class='layerinfo %s'><a href=\"https://github.com/schemaorg/schemaorg/wiki/ExtensionList\">extension shown</a>: %s [<a href='%s'>x</a>]</p>\n" % (ll, ll, makeUrl("",target))
self.write(s)
cached = self.GetCachedText(node, layers)
if (cached != None):
self.response.write(cached)
return
self.parentStack = []
self.GetParentStack(node, layers=layers)
self.emitUnitHeaders(node, layers=layers) # writes <h1><table>...
if (node.isEnumerationValue(layers=layers)):
self.write(self.moreInfoBlock(node))
if (node.isClass(layers=layers)):
subclass = True
self.write(self.moreInfoBlock(node))
for p in self.parentStack:
self.ClassProperties(p, p==self.parentStack[0], layers=layers)
if (not node.isDataType(layers=layers) and node.id != "DataType"):
self.write("\n\n</table>\n\n")
self.emitClassIncomingProperties(node, layers=layers)
self.emitClassExtensionSuperclasses(node,layers)
self.emitClassExtensionProperties(p,layers)
elif (Unit.isAttribute(node, layers=layers)):
self.emitAttributeProperties(node, layers=layers)
self.write(self.moreInfoBlock(node))
if (node.isClass(layers=layers)):
children = []
children = GetSources(Unit.GetUnit("rdfs:subClassOf"), node, ALL_LAYERS)# Normal subclasses
if(node.isDataType() or node.id == "DataType"):
children += GetSources(Unit.GetUnit("typeOf"), node, ALL_LAYERS)# Datatypes
children = sorted(children, key=lambda u: u.id)
if (len(children) > 0):
buff = StringIO.StringIO()
extbuff = StringIO.StringIO()
firstext=True
for c in children:
if inLayer(layers, c):
buff.write("<li> %s </li>" % (self.ml(c)))
else:
sep = ", "
if firstext:
sep = ""
firstext=False
extbuff.write("%s%s" % (sep,self.ml(c)) )
if (len(buff.getvalue()) > 0):
if node.isDataType():
self.write("<br/><b>More specific DataTypes</b><ul>")
else:
self.write("<br/><b>More specific Types</b><ul>")
self.write(buff.getvalue())
self.write("</ul>")
if (len(extbuff.getvalue()) > 0):
self.write("<h4>More specific Types available in extensions</h4><ul><li>")
self.write(extbuff.getvalue())
self.write("</li></ul>")
buff.close()
extbuff.close()
if (node.isEnumeration(layers=layers)):
self.write(self.moreInfoBlock(node))
children = sorted(GetSources(Unit.GetUnit("typeOf"), node, ALL_LAYERS), key=lambda u: u.id)
if (len(children) > 0):
buff = StringIO.StringIO()
extbuff = StringIO.StringIO()
firstext=True
for c in children:
if inLayer(layers, c):
buff.write("<li> %s </li>" % (self.ml(c)))
else:
sep = ","
if firstext:
sep = ""
firstext=False
extbuff.write("%s%s" % (sep,self.ml(c)) )
if (len(buff.getvalue()) > 0):
self.write("<br/><br/><b>Enumeration members</b><ul>")
self.write(buff.getvalue())
self.write("</ul>")
if (len(extbuff.getvalue()) > 0):
self.write("<h4>Enumeration members available in extensions</h4><ul><li>")
self.write(extbuff.getvalue())
self.write("</li></ul>")
buff.close()
extbuff.close()
ackorgs = GetTargets(Unit.GetUnit("dc:source"), node, layers=layers)
if (len(ackorgs) > 0):
self.write("<h4 id=\"acks\">Acknowledgements</h4>\n")
for ao in ackorgs:
acks = sorted(GetTargets(Unit.GetUnit("rdfs:comment"), ao, layers))
for ack in acks:
self.write(str(ack+"<br/>"))
examples = GetExamples(node, layers=layers)
log.debug("Rendering n=%s examples" % len(examples))
if (len(examples) > 0):
example_labels = [
('Without Markup', 'original_html', 'selected'),
('Microdata', 'microdata', ''),
('RDFa', 'rdfa', ''),
('JSON-LD', 'jsonld', ''),
]
self.write("<br/><br/><b><a id=\"examples\">Examples</a></b><br/><br/>\n\n")
for ex in examples:
if "id" in ex.egmeta:
self.write('<span id="%s"></span>' % ex.egmeta["id"])
self.write("<div class='ds-selector-tabs ds-selector'>\n")
self.write(" <div class='selectors'>\n")
for label, example_type, selected in example_labels:
self.write(" <a data-selects='%s' class='%s'>%s</a>\n"
% (example_type, selected, label))
self.write("</div>\n\n")
for label, example_type, selected in example_labels:
self.write("<pre class=\"prettyprint lang-html linenums %s %s\">%s</pre>\n\n"
% (example_type, selected, self.rep(ex.get(example_type))))
self.write("</div>\n\n")
self.write("<p class=\"version\"><b>Schema Version %s</b></p>\n\n" % SCHEMA_VERSION)
# TODO: add some version info regarding the extension
# Analytics
self.write("""<script>(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-52672119-1', 'auto');ga('send', 'pageview');</script>""")
self.write(" \n\n</div>\n</body>\n</html>")
self.response.write(self.AddCachedText(node, self.outputStrings, layers))
def emitHTTPHeaders(self, node):
if ENABLE_CORS:
self.response.headers.add_header("Access-Control-Allow-Origin", "*") # entire site is public.
# see http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
def setupExtensionLayerlist(self, node):
# Identify which extension layer(s) are requested
# TODO: add subdomain support e.g. bib.schema.org/Globe
# instead of Globe?ext=bib which is more for debugging.
# 1. get a comma list from ?ext=foo,bar URL notation
extlist = cleanPath( self.request.get("ext") )# for debugging
extlist = re.sub(ext_re, '', extlist).split(',')
log.debug("?ext= extension list: %s " % ", ".join(extlist))
# 2. Ignore ?ext=, start with 'core' only.
layerlist = [ "core"]
# 3. Use host_ext if set, e.g. 'bib' from bib.schema.org
if getHostExt() != None:
log.debug("Host: %s host_ext: %s" % ( self.request.host , getHostExt() ) )
extlist.append(getHostExt())
# Report domain-requested extensions
for x in extlist:
log.debug("Ext filter found: %s" % str(x))
if x in ["core", "localhost", ""]:
continue
layerlist.append("%s" % str(x))
layerlist = list(set(layerlist)) # dedup
log.debug("layerlist: %s" % layerlist)
return layerlist
def handleJSONContext(self, node):
"""Handle JSON-LD Context non-homepage requests (including refuse if not enabled)."""
if not ENABLE_JSONLD_CONTEXT:
self.error(404)
self.response.out.write('<title>404 Not Found.</title><a href="/">404 Not Found (JSON-LD Context not enabled.)</a><br/><br/>')
return True
if (node=="docs/jsonldcontext.json.txt"):
jsonldcontext = GetJsonLdContext(layers=ALL_LAYERS)
self.response.headers['Content-Type'] = "text/plain"
self.emitCacheHeaders()
self.response.out.write( jsonldcontext )
return True
if (node=="docs/jsonldcontext.json"):
jsonldcontext = GetJsonLdContext(layers=ALL_LAYERS)
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
self.response.out.write( jsonldcontext )
return True
return False
# see also handleHomepage for conneg'd version.
def handleSchemasPage(self, node, layerlist='core'):
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
if DataCache.get('SchemasPage'):
self.response.out.write( DataCache.get('SchemasPage') )
log.debug("Serving recycled SchemasPage.")
return True
else:
extensions = []
for ex in sorted(ENABLED_EXTENSIONS):
extensions.append("<a href=\"%s\">%s.schema.org</a>" % (makeUrl(ex,""),ex))
template = JINJA_ENVIRONMENT.get_template('schemas.tpl')
page = template.render({'sitename': getSiteName(),
'staticPath': makeUrl("",""),
'counts': self.getCounts(),
'extensions': extensions,
'menu_sel': "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh SchemasPage.")
DataCache.put("SchemasPage",page)
return True
def getCounts(self):
text = ""
text += "The core vocabulary currently consists of %s Types, " % len(GetAllTypes("core"))
text += " %s Properties, " % len(GetAllProperties("core"))
text += "and %s Enumeration values." % len(GetAllEnumerationValues("core"))
return text
def handleFullHierarchyPage(self, node, layerlist='core'):
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
if DataCache.get('FullTreePage'):
self.response.out.write( DataCache.get('FullTreePage') )
log.debug("Serving recycled FullTreePage.")
return True
else:
template = JINJA_ENVIRONMENT.get_template('full.tpl')
extlist=""
extonlylist=[]
count=0
for i in layerlist:
if i != "core":
sep = ""
if count > 0:
sep = ", "
extlist += "'%s'%s" % (i, sep)
extonlylist.append(i)
count += 1
local_button = ""
local_label = "<h3>Core plus %s extension vocabularies</h3>" % extlist
if count == 0:
local_button = "Core vocabulary"
elif count == 1:
local_button = "Core plus %s extension" % extlist
else:
local_button = "Core plus %s extensions" % extlist
ext_button = ""
if count == 1:
ext_button = "Extension %s" % extlist
elif count > 1:
ext_button = "Extensions %s" % extlist
uThing = Unit.GetUnit("Thing")
uDataType = Unit.GetUnit("DataType")
mainroot = TypeHierarchyTree(local_label)
mainroot.traverseForHTML(uThing, layers=layerlist)
thing_tree = mainroot.toHTML()
#az_enums = GetAllEnumerationValues(layerlist)
#az_enums.sort( key = lambda u: u.id)
#thing_tree += self.listTerms(az_enums,"<br/><strong>Enumeration Values</strong><br/>")
fullmainroot = TypeHierarchyTree("<h3>Core plus all extension vocabularies</h3>")
fullmainroot.traverseForHTML(uThing, layers=ALL_LAYERS)
full_thing_tree = fullmainroot.toHTML()
#az_enums = GetAllEnumerationValues(ALL_LAYERS)
#az_enums.sort( key = lambda u: u.id)
#full_thing_tree += self.listTerms(az_enums,"<br/><strong>Enumeration Values</strong><br/>")
ext_thing_tree = None
if len(extonlylist) > 0:
extroot = TypeHierarchyTree("<h3>Extension: %s</h3>" % extlist)
extroot.traverseForHTML(uThing, layers=extonlylist)
ext_thing_tree = extroot.toHTML()
#az_enums = GetAllEnumerationValues(extonlylist)
#az_enums.sort( key = lambda u: u.id)
#ext_thing_tree += self.listTerms(az_enums,"<br/><strong>Enumeration Values</strong><br/>")
dtroot = TypeHierarchyTree("<h4>Data Types</h4>")
dtroot.traverseForHTML(uDataType, layers=layerlist)
datatype_tree = dtroot.toHTML()
full_button = "Core plus all extensions"
page = template.render({ 'thing_tree': thing_tree,
'full_thing_tree': full_thing_tree,
'ext_thing_tree': ext_thing_tree,
'datatype_tree': datatype_tree,
'local_button': local_button,
'full_button': full_button,
'ext_button': ext_button,
'sitename': getSiteName(),
'staticPath': makeUrl("",""),
'menu_sel': "Schemas"})
self.response.out.write( page )
log.debug("Serving fresh FullTreePage.")
DataCache.put("FullTreePage",page)
return True
def handleJSONSchemaTree(self, node, layerlist='core'):
"""Handle a request for a JSON-LD tree representation of the schemas (RDFS-based)."""
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
if DataCache.get('JSONLDThingTree'):
self.response.out.write( DataCache.get('JSONLDThingTree') )
log.debug("Serving recycled JSONLDThingTree.")
return True
else:
uThing = Unit.GetUnit("Thing")
mainroot = TypeHierarchyTree()
mainroot.traverseForJSONLD(Unit.GetUnit("Thing"), layers=layerlist)
thing_tree = mainroot.toJSON()
self.response.out.write( thing_tree )
log.debug("Serving fresh JSONLDThingTree.")
DataCache.put("JSONLDThingTree",thing_tree)
return True
return False
def handleExactTermPage(self, node, layers='core'):
"""Handle with requests for specific terms like /Person, /fooBar. """
#self.outputStrings = [] # blank slate
schema_node = Unit.GetUnit(node) # e.g. "Person", "CreativeWork".
log.debug("Layers: %s",layers)
if inLayer(layers, schema_node):
self.emitExactTermPage(schema_node, layers=layers)
return True
else:
# log.info("Looking for node: %s in layers: %s" % (node.id, ",".join(all_layers.keys() )) )
if not ENABLE_HOSTED_EXTENSIONS:
return False
if schema_node is not None and schema_node.id in all_terms:# look for it in other layers
log.debug("TODO: layer toc: %s" % all_terms[schema_node.id] )
# self.response.out.write("Layers should be listed here. %s " % all_terms[node.id] )
extensions = []
for x in all_terms[schema_node.id]:
x = x.replace("#","")
ext = {}
ext['href'] = makeUrl(x,schema_node.id)
ext['text'] = x
extensions.append(ext)
#self.response.out.write("<li><a href='%s'>%s</a></li>" % (makeUrl(x,schema_node.id), x) )
template = JINJA_ENVIRONMENT.get_template('wrongExt.tpl')
page = template.render({ 'target': schema_node.id,
'extensions': extensions,
'sitename': "schema.org",
'staticPath': makeUrl("","")})
self.response.out.write( page )
log.debug("Serving fresh wrongExtPage.")
return True
return False
def handle404Failure(self, node, layers="core"):
self.error(404)
self.emitSchemaorgHeaders("404 Missing")
self.response.out.write('<h3>404 Not Found.</h3><p><br/>Page not found. Please <a href="/">try the homepage.</a><br/><br/></p>')
clean_node = cleanPath(node)
log.debug("404: clean_node: clean_node: %s node: %s" % (clean_node, node))
base_term = Unit.GetUnit( node.rsplit('/')[0] )
if base_term != None :
self.response.out.write('<div>Perhaps you meant: <a href="/%s">%s</a></div> <br/><br/> ' % ( base_term.id, base_term.id ))
base_actionprop = Unit.GetUnit( node.rsplit('-')[0] )
if base_actionprop != None :
self.response.out.write('<div>Looking for an <a href="/Action">Action</a>-related property? Note that xyz-input and xyz-output have <a href="/docs/actions.html">special meaning</a>. See also: <a href="/%s">%s</a></div> <br/><br/> ' % ( base_actionprop.id, base_actionprop.id ))
return True
def handleJSONSchemaTree(self, node, layerlist='core'):
"""Handle a request for a JSON-LD tree representation of the schemas (RDFS-based)."""
self.response.headers['Content-Type'] = "application/ld+json"
self.emitCacheHeaders()
if DataCache.get('JSONLDThingTree'):
self.response.out.write( DataCache.get('JSONLDThingTree') )
log.debug("Serving recycled JSONLDThingTree.")
return True
else:
uThing = Unit.GetUnit("Thing")
mainroot = TypeHierarchyTree()
mainroot.traverseForJSONLD(Unit.GetUnit("Thing"), layers=layerlist)
thing_tree = mainroot.toJSON()
self.response.out.write( thing_tree )
log.debug("Serving fresh JSONLDThingTree.")
DataCache.put("JSONLDThingTree",thing_tree)
return True
return False
# if (node == "version/2.0/" or node == "version/latest/" or "version/" in node) ...
def handleFullReleasePage(self, node, layerlist='core'):
"""Deal with a request for a full release summary page. Lists all terms and their descriptions inline in one long page.
version/latest/ is from current schemas, others will need to be loaded and emitted from stored HTML snapshots (for now)."""
# http://jinja.pocoo.org/docs/dev/templates/
global releaselog
clean_node = cleanPath(node)
self.response.headers['Content-Type'] = "text/html"
self.emitCacheHeaders()
requested_version = clean_node.rsplit('/')[1]
requested_format = clean_node.rsplit('/')[-1]
if len( clean_node.rsplit('/') ) == 2:
requested_format=""
log.info("Full release page for: node: '%s' cleannode: '%s' requested_version: '%s' requested_format: '%s' l: %s" % (node, clean_node, requested_version, requested_format, len(clean_node.rsplit('/')) ) )
# Full release page for: node: 'version/' cleannode: 'version/' requested_version: '' requested_format: '' l: 2
# /version/
if (clean_node=="version/" or clean_node=="version") and requested_version=="" and requested_format=="":
log.info("Table of contents should be sent instead, then succeed.")
if DataCache.get('tocVersionPage'):
self.response.out.write( DataCache.get('tocVersionPage'))
return True
else:
template = JINJA_ENVIRONMENT.get_template('tocVersionPage.tpl')
page = template.render({ "releases": releaselog.keys(),
"menu_sel": "Schemas",
"sitename": getSiteName(),
'staticPath': makeUrl("","")})
self.response.out.write( page )
log.debug("Serving fresh tocVersionPage.")
DataCache.put("tocVersionPage",page)
return True
if requested_version in releaselog:
log.info("Version '%s' was released on %s. Serving from filesystem." % ( node, releaselog[requested_version] ))
version_rdfa = "data/releases/%s/schema.rdfa" % requested_version
version_allhtml = "data/releases/%s/schema-all.html" % requested_version
version_nt = "data/releases/%s/schema.nt" % requested_version
if requested_format=="":
self.response.out.write( open(version_allhtml, 'r').read() )
return True
# log.info("Skipping filesystem for now.")
if requested_format=="schema.rdfa":
self.response.headers['Content-Type'] = "application/octet-stream" # It is HTML but ... not really.
self.response.headers['Content-Disposition']= "attachment; filename=schemaorg_%s.rdfa.html" % requested_version
self.response.out.write( open(version_rdfa, 'r').read() )
return True
if requested_format=="schema.nt":
self.response.headers['Content-Type'] = "application/n-triples" # It is HTML but ... not really.
self.response.headers['Content-Disposition']= "attachment; filename=schemaorg_%s.rdfa.nt" % requested_version
self.response.out.write( open(version_nt, 'r').read() )
return True
if requested_format != "":
return False # Turtle, csv etc.
else:
log.info("Unreleased version requested. We only understand requests for latest if unreleased.")
if requested_version != "latest":
return False
log.info("giving up to 404.")
else:
log.info("generating a live view of this latest release.")
if DataCache.get('FullReleasePage'):
self.response.out.write( DataCache.get('FullReleasePage') )
log.debug("Serving recycled FullReleasePage.")
return True
else:
template = JINJA_ENVIRONMENT.get_template('fullReleasePage.tpl')
mainroot = TypeHierarchyTree()
mainroot.traverseForHTML(Unit.GetUnit("Thing"), hashorslash="#term_", layers=layerlist)
thing_tree = mainroot.toHTML()
base_href = "/version/%s/" % requested_version
az_types = GetAllTypes()
az_types.sort( key=lambda u: u.id)
az_type_meta = {}
az_props = GetAllProperties()
az_props.sort( key = lambda u: u.id)
az_prop_meta = {}
#TODO: ClassProperties (self, cl, subclass=False, layers="core", out=None, hashorslash="/"):
# TYPES
for t in az_types:
props4type = HTMLOutput() # properties applicable for a type
props2type = HTMLOutput() # properties that go into a type
self.emitSimplePropertiesPerType(t, out=props4type, hashorslash="#term_" )
self.emitSimplePropertiesIntoType(t, out=props2type, hashorslash="#term_" )
#self.ClassProperties(t, out=typeInfo, hashorslash="#term_" )
tcmt = Markup(GetComment(t))
az_type_meta[t]={}
az_type_meta[t]['comment'] = tcmt
az_type_meta[t]['props4type'] = props4type.toHTML()
az_type_meta[t]['props2type'] = props2type.toHTML()
# PROPERTIES
for pt in az_props:
attrInfo = HTMLOutput()
rangeList = HTMLOutput()
domainList = HTMLOutput()
# self.emitAttributeProperties(pt, out=attrInfo, hashorslash="#term_" )
# self.emitSimpleAttributeProperties(pt, out=rangedomainInfo, hashorslash="#term_" )
self.emitRangeTypesForProperty(pt, out=rangeList, hashorslash="#term_" )
self.emitDomainTypesForProperty(pt, out=domainList, hashorslash="#term_" )
cmt = Markup(GetComment(pt))
az_prop_meta[pt] = {}
az_prop_meta[pt]['comment'] = cmt
az_prop_meta[pt]['attrinfo'] = attrInfo.toHTML()
az_prop_meta[pt]['rangelist'] = rangeList.toHTML()
az_prop_meta[pt]['domainlist'] = domainList.toHTML()
page = template.render({ "base_href": base_href, 'thing_tree': thing_tree,
'liveversion': SCHEMA_VERSION,
'requested_version': requested_version,
'releasedate': releaselog[str(SCHEMA_VERSION)],
'az_props': az_props, 'az_types': az_types,
'az_prop_meta': az_prop_meta, 'az_type_meta': az_type_meta,
'sitename': getSiteName(),
'staticPath': makeUrl("",""),
'menu_sel': "Documentation"})
self.response.out.write( page )
log.debug("Serving fresh FullReleasePage.")
DataCache.put("FullReleasePage",page)
return True
def handleExtensionContents(self,ext):
if not ext in ENABLED_EXTENSIONS:
log.info("cannot list ext %s",ext)
return ""
buff = StringIO.StringIO()
az_types = GetAllTypes(ext)
az_types.sort( key=lambda u: u.id)
az_props = GetAllProperties(ext)
az_props.sort( key = lambda u: u.id)
az_enums = GetAllEnumerationValues(ext)
az_enums.sort( key = lambda u: u.id)
buff.write("<br/><h3>Terms defined or referenced in the '%s' extension.</h3>" % ext)
buff.write(self.listTerms(az_types,"<br/><strong>Types</strong> (%s)<br/>" % len(az_types)))
buff.write(self.listTerms(az_props,"<br/><br/><strong>Properties</strong> (%s)<br/>" % len(az_props)))
buff.write(self.listTerms(az_enums,"<br/><br/><strong>Enumeration values</strong> (%s)<br/>" % len(az_enums)))
ret = buff.getvalue()
buff.close()
return ret
def listTerms(self,terms,prefix=""):
buff = StringIO.StringIO()
if(len(terms) > 0):
buff.write(prefix)
first = True
sep = ""
for term in terms:
if not first:
sep = ", "
else:
first = False
buff.write("%s%s" % (sep,self.ml(term)))
ret = buff.getvalue()
buff.close()
return ret
def setupHostinfo(self, node, test=""):
hostString = test
if test == "":
hostString = self.request.host
scheme = "http" #Defalt for tests
if not getInTestHarness(): #Get the actual scheme from the request
scheme = self.request.scheme
host_ext = re.match( r'([\w\-_]+)[\.:]?', hostString).group(1)
log.info("setupHostinfo: scheme=%s hoststring=%s host_ext?=%s" % (scheme, hostString, str(host_ext) ))
setHttpScheme(scheme)
split = hostString.rsplit(':')
myhost = split[0]
mybasehost = myhost
myport = "80"
if len(split) > 1:
myport = split[1]
if host_ext != None:
# e.g. "bib"
log.debug("HOST: Found %s in %s" % ( host_ext, hostString ))
if host_ext == "www":
# www is special case that cannot be an extension - need to redirect to basehost
mybasehost = mybasehost[4:]
return self.redirectToBase(node)
elif not host_ext in ENABLED_EXTENSIONS:
host_ext = ""
else:
mybasehost = mybasehost[len(host_ext) + 1:]
setHostExt(host_ext)
setBaseHost(mybasehost)
setHostPort(myport)
dcn = host_ext
if dcn == None or dcn == "" or dcn =="core":
dcn = "core"
log.debug("sdoapp.py setting current datacache to: %s " % dcn)
DataCache.setCurrent(dcn)
debugging = False
if "localhost" in hostString or "sdo-phobos.appspot.com" in hostString or FORCEDEBUGGING:
debugging = True
setAppVar('debugging',debugging)
return True
def redirectToBase(self,node=""):
uri = makeUrl("",node)
self.response = webapp2.redirect(uri, True, 301)
log.info("Redirecting [301] to: %s" % uri)
return False
def get(self, node):
"""Get a schema.org site page generated for this node/term.
Web content is written directly via self.response.
CORS enabled all URLs - we assume site entirely public.
See http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
These should give a JSON version of schema.org:
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/docs/jsonldcontext.json
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/docs/jsonldcontext.json.txt
curl --verbose -H "Accept: application/ld+json" http://localhost:8080/
Per-term pages vary for type, property and enumeration.
Last resort is a 404 error if we do not exactly match a term's id.
See also https://webapp-improved.appspot.com/guide/request.html#guide-request
"""
if not self.setupHostinfo(node):
return
self.callCount()
self.emitHTTPHeaders(node)
if (node in silent_skip_list):
return
if ENABLE_HOSTED_EXTENSIONS:
layerlist = self.setupExtensionLayerlist(node) # e.g. ['core', 'bib']
else:
layerlist = ["core"]
setSiteName(self.getExtendedSiteName(layerlist)) # e.g. 'bib.schema.org', 'schema.org'
log.debug("EXT: set sitename to %s " % getSiteName())
if (node in ["", "/"]):
if self.handleHomepage(node):
return
else:
log.info("Error handling homepage: %s" % node)
return
if node in ["docs/jsonldcontext.json.txt", "docs/jsonldcontext.json"]:
if self.handleJSONContext(node):
return
else:
log.info("Error handling JSON-LD context: %s" % node)
return
if (node == "docs/full.html"): # DataCache.getDataCache.get
if self.handleFullHierarchyPage(node, layerlist=layerlist):
return
else:
log.info("Error handling full.html : %s " % node)
return
if (node == "docs/schemas.html"): # DataCache.getDataCache.get
if self.handleSchemasPage(node, layerlist=layerlist):
return
else:
log.info("Error handling schemas.html : %s " % node)
return
if (node == "docs/tree.jsonld" or node == "docs/tree.json"):
if self.handleJSONSchemaTree(node, layerlist=layerlist):
return
else:
log.info("Error handling JSON-LD schema tree: %s " % node)
return
if (node == "version/2.0/" or node == "version/latest/" or "version/" in node):
if self.handleFullReleasePage(node, layerlist=layerlist):
return
else:
log.info("Error handling full release page: %s " % node)
if self.handle404Failure(node):
return
else:
log.info("Error handling 404 under /version/")
return
if(node == "_siteDebug"):
self.siteDebug()
return
# Pages based on request path matching a Unit in the term graph:
if self.handleExactTermPage(node, layers=layerlist):
return
else:
log.info("Error handling exact term page. Assuming a 404: %s" % node)
# Drop through to 404 as default exit.
if self.handle404Failure(node):
return
else:
log.info("Error handling 404.")
return
def siteDebug(self):
global STATS
template = JINJA_ENVIRONMENT.get_template('siteDebug.tpl')
page = template.render({'sitename': getSiteName(),
'staticPath': makeUrl("","")})
self.response.out.write( page )
self.response.out.write("<table style=\"width: 50%; border: solid 1px #CCCCCC; border-collapse: collapse;\"><tbody>\n")
self.writeDebugRow("Setting","Value",True)
self.writeDebugRow("httpScheme",getHttpScheme())
self.writeDebugRow("host_ext",getHostExt())
self.writeDebugRow("basehost",getBaseHost())
self.writeDebugRow("hostport",getHostPort())
self.writeDebugRow("sitename",getSiteName())
self.writeDebugRow("debugging",getAppVar('debugging'))
self.writeDebugRow("intestharness",getInTestHarness())
self.writeDebugRow("Current DataCache",DataCache.getCurrent())
self.writeDebugRow("DataCaches",len(DataCache.keys()))
for c in DataCache.keys():
self.writeDebugRow("DataCache[%s] size" % c, len(DataCache.getCache(c)))
for s in STATS.keys():
self.writeDebugRow("%s" % s, STATS[s])
self.response.out.write("</tbody><table><br/>\n")
self.response.out.write( "</div>\n<body>\n</html>" )
def writeDebugRow(self,term,value,head=False):
rt = "td"
cellStyle = "border: solid 1px #CCCCCC; border-collapse: collapse;"
if head:
rt = "th"
cellStyle += " color: #FFFFFF; background: #888888;"
self.response.out.write("<tr><%s style=\"%s\">%s</%s><%s style=\"%s\">%s</%s></tr>\n" % (rt,cellStyle,term,rt,rt,cellStyle,value,rt))
def callCount(self):
statInc("total calls")
statInc(getHttpScheme() + " calls")
if getHostExt() != "":
statInc(getHostExt() + " calls")
else:
statInc("core calls")
STATS = {}
def statInc(stat):
global STATS
val = 1
if stat in STATS:
val += STATS.get(stat)
STATS[stat] = val
def setInTestHarness(val):
global INTESTHARNESS
INTESTHARNESS = val
def getInTestHarness():
global INTESTHARNESS
return INTESTHARNESS
TestAppIndex = {}
def getAppVar(index):
global TestAppIndex
reg = None
if not getInTestHarness():
app = webapp2.get_app()
reg = app.registry
else:
log.debug("getAppVar(): Using non-threadsafe session variables for test only")
reg = TestAppIndex
return reg.get(index)
def setAppVar(index,val):
global TestAppIndex
reg = None
if not getInTestHarness():
app = webapp2.get_app()
reg = app.registry
else:
log.debug("setAppVar(): Using non-threadsafe session variables for test only")
reg = TestAppIndex
reg[index] = val
def setHttpScheme(val):
setAppVar('httpScheme',val)
def getHttpScheme():
return getAppVar('httpScheme')
def setHostExt(val):
setAppVar('host_ext',val)
def getHostExt():
return getAppVar('host_ext')
def setSiteName(val):
setAppVar('sitename',val)
def getSiteName():
return getAppVar('sitename')
def setHost(val):
setAppVar('myhost',val)
def getHost():
return getAppVar('myhost')
def setBaseHost(val):
setAppVar('mybasehost',val)
def getBaseHost():
return getAppVar('mybasehost')
def setHostPort(val):
setAppVar('myport',val)
def getHostPort():
return getAppVar('myport')
def makeUrl(ext="",path=""):
port = ""
sub = ""
p = ""
if(getHostPort() != "80"):
port = ":%s" % getHostPort()
if ext != "core" and ext != "":
sub = "%s." % ext
if path != "":
if path.startswith("/"):
p = path
else:
p = "/%s" % path
url = "%s://%s%s%s%s" % (getHttpScheme(),sub,getBaseHost(),port,p)
return url
#log.info("STARTING UP... reading schemas.")
read_schemas(loadExtensions=ENABLE_HOSTED_EXTENSIONS)
if ENABLE_HOSTED_EXTENSIONS:
read_extensions(ENABLED_EXTENSIONS)
schemasInitialized = True
app = ndb.toplevel(webapp2.WSGIApplication([("/(.*)", ShowUnit)]))
| pwz3n0/schemaorg | sdoapp.py | Python | apache-2.0 | 78,319 |
"""Version information for Review Board dependencies.
This contains constants that other parts of Review Board (primarily packaging)
can use to look up information on major dependencies of Review Board.
The contents in this file might change substantially between releases. If
you're going to make use of data from this file, code defensively.
"""
import sys
import textwrap
#: The minimum supported version of Python 2.x.
PYTHON_2_MIN_VERSION = (2, 7)
#: The minimum supported version of Python 3.x.
PYTHON_3_MIN_VERSION = (3, 6)
#: A string representation of the minimum supported version of Python 2.x.
PYTHON_2_MIN_VERSION_STR = '%s.%s' % (PYTHON_2_MIN_VERSION)
#: A string representation of the minimum supported version of Python 3.x.
PYTHON_3_MIN_VERSION_STR = '%s.%s' % (PYTHON_3_MIN_VERSION)
#: A dependency version range for Python 2.x.
PYTHON_2_RANGE = "=='%s.*'" % PYTHON_2_MIN_VERSION_STR
#: A dependency version range for Python 3.x.
PYTHON_3_RANGE = ">='%s'" % PYTHON_3_MIN_VERSION_STR
# NOTE: This file may not import other (non-Python) modules! (Except for
# the parent reviewboard module, which must be importable anyway). This
# module is used for packaging and be needed before any dependencies
# have been installed.
#: The major version of Django we're using for documentation.
django_doc_major_version = '1.11'
#: The major version of Djblets we're using for documentation.
djblets_doc_major_version = '2.x'
#: The version of Django required for the current version of Python.
django_version = '>=1.11.29,<1.11.999'
#: The version range required for Djblets.
djblets_version = '>=2.3,<=2.999'
#: All dependencies required to install Review Board.
package_dependencies = {
'bleach': '>=3.3',
'cryptography': [
{
'python': PYTHON_2_RANGE,
'version': '>=1.8.1,<3.3.999',
},
{
'python': PYTHON_3_RANGE,
'version': '>=1.8.1',
},
],
'Django': django_version,
'django-cors-headers': '>=3.0.2,<3.1.0',
'django_evolution': '>=2.1.4,<2.999',
'django-haystack': '>=2.8.1,<2.999',
'django-multiselectfield': '>=0.1.12,<=0.1.999',
'django-oauth-toolkit': '>=0.9.0,<0.9.999',
'Djblets': djblets_version,
'docutils': '',
'pydiffx': '>=1.0,<=1.999',
# Markdown 3.2 dropped support for Python 2.
'markdown': [
{
'python': PYTHON_2_RANGE,
'version': '>=3.1.1,<3.1.999',
},
{
'python': PYTHON_3_RANGE,
'version': '>=3.3.3',
},
],
'mimeparse': '>=0.1.3',
'paramiko': '>=1.12',
'Pygments': [
{
'python': PYTHON_2_RANGE,
'version': '>=2.1,<=2.5.999',
},
{
'python': PYTHON_3_RANGE,
'version': '>=2.1',
},
],
# To keep behavior consistent between Python 2 and 3 installs, we're
# sticking with the pymdown-extensions 6.x range. At the time of this
# writing (November 14, 2020), the latest version is 8.0.1, and 6.3+
# all require Python-Markdown 3.2+, which requires Python 3.
#
# Once we drop Python 2 support, we can migrate to the latest
# pymdown-extensions release.
'pymdown-extensions': [
{
'python': PYTHON_2_RANGE,
'version': '>=6.2,<6.2.999',
},
{
'python': PYTHON_3_RANGE,
'version': '>=6.3,<6.3.999',
},
],
'python-memcached': '',
'pytz': '>=2015.2',
'Whoosh': '>=2.6',
# The following are pinned versions/ranges needed to satisfy dependency
# conflicts between multiple projects. We are not using these directly.
# These should be removed in future versions of Review Board as
# dependencies change.
# asana dependencies:
'requests-oauthlib': '>=0.8,<=1.0',
# django-oauth-toolkit dependencies:
'django-braces': '==1.13.0',
'oauthlib': '==1.0.1',
# cryptography and paramiko dependencies:
'bcrypt': [
{
'python': PYTHON_2_RANGE,
'version': '>=3.1.7,<3.1.999',
},
],
# The core "packaging" dependency dropped Python 2.7 support in 21.0
# (released July 3, 2021), so we need to pin it.
'packaging': [
{
'python': PYTHON_2_RANGE,
'version': '<21.0',
},
],
# setuptools and other modules need pyparsing, but 3.0+ won't support
# Python 2.7.
'pyparsing': [
{
'python': PYTHON_2_RANGE,
'version': '>=2.4,<2.4.999',
},
],
}
#: Dependencies only specified during the packaging process.
#:
#: These dependencies are not used when simply developing Review Board.
#: The dependencies here are generally intended to be those that themselves
#: require Review Board.
package_only_dependencies = {
'rbintegrations': '>=2.0.2,<2.999',
}
_dependency_error_count = 0
_dependency_warning_count = 0
def build_dependency_list(deps, version_prefix=''):
"""Build a list of dependency specifiers from a dependency map.
This can be used along with :py:data:`package_dependencies`,
:py:data:`npm_dependencies`, or other dependency dictionaries to build a
list of dependency specifiers for use on the command line or in
:file:`setup.py`.
Args:
deps (dict):
A dictionary of dependencies.
Returns:
list of unicode:
A list of dependency specifiers.
"""
new_deps = []
for dep_name, dep_details in deps.items():
if isinstance(dep_details, list):
new_deps += [
'%s%s%s; python_version%s'
% (dep_name, version_prefix, entry['version'], entry['python'])
for entry in dep_details
]
else:
new_deps.append('%s%s%s' % (dep_name, version_prefix, dep_details))
return sorted(new_deps, key=lambda s: s.lower())
def _dependency_message(message, prefix=''):
"""Utility function to print and track a dependency-related message.
This will track that a message was printed, allowing us to determine if
any messages were shown to the user.
Args:
message (unicode):
The dependency-related message to display. This will be wrapped,
but long strings (like paths) will not contain line breaks.
prefix (unicode, optional):
The prefix for the message. All text will be aligned after this.
"""
sys.stderr.write('\n%s\n'
% textwrap.fill(message,
initial_indent=prefix,
subsequent_indent=' ' * len(prefix),
break_long_words=False,
break_on_hyphens=False))
def dependency_error(message):
"""Print a dependency error.
This will track that a message was printed, allowing us to determine if
any messages were shown to the user.
Args:
message (unicode):
The dependency error to display. This will be wrapped, but long
strings (like paths) will not contain line breaks.
"""
global _dependency_error_count
_dependency_message(message, prefix='ERROR: ')
_dependency_error_count += 1
def dependency_warning(message):
"""Print a dependency warning.
This will track that a message was printed, allowing us to determine if
any messages were shown to the user.
Args:
message (unicode):
The dependency warning to display. This will be wrapped, but long
strings (like paths) will not contain line breaks.
"""
global _dependency_warning_count
_dependency_message(message, prefix='WARNING: ')
_dependency_warning_count += 1
def fail_if_missing_dependencies():
"""Exit the process with an error if dependency messages were shown.
If :py:func:`dependency_error` or :py:func:`dependency_warning` were
called, this will print some help information with a link to the manual
and then exit the process.
"""
if _dependency_warning_count > 0 or _dependency_error_count > 0:
from reviewboard import get_manual_url
_dependency_message('Please see %s for help setting up Review Board.'
% get_manual_url())
if _dependency_error_count > 0:
sys.exit(1)
| reviewboard/reviewboard | reviewboard/dependencies.py | Python | mit | 8,444 |
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
ans = 0
if root:
if root.left:
if root.left.left is None and root.left.right is None:
ans += root.left.val
else:
ans += self.sumOfLeftLeaves(root.left)
ans += self.sumOfLeftLeaves(root.right)
return ans
| ckclark/leetcode | py/sum-of-left-leaves.py | Python | apache-2.0 | 628 |
###
# Copyright (c) 2005, Jeremiah Fincher
# Copyright (c) 2009, James Vega
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import supybot.conf as conf
import supybot.registry as registry
def configure(advanced):
from supybot.questions import output, expect, anything, something, yn
conf.registerPlugin('ShrinkUrl', True)
if yn("""This plugin offers a snarfer that will go retrieve a shorter
version of long URLs that are sent to the channel. Would you
like this snarfer to be enabled?""", default=False):
conf.supybot.plugins.ShrinkUrl.shrinkSnarfer.setValue(True)
class ShrinkService(registry.OnlySomeStrings):
validStrings = ('ln', 'tiny')
ShrinkUrl = conf.registerPlugin('ShrinkUrl')
conf.registerChannelValue(ShrinkUrl, 'shrinkSnarfer',
registry.Boolean(False, """Determines whether the
shrink snarfer is enabled. This snarfer will watch for URLs in the
channel, and if they're sufficiently long (as determined by
supybot.plugins.ShrinkUrl.minimumLength) it will post a
smaller URL from either ln-s.net or tinyurl.com, as denoted in
supybot.plugins.ShrinkUrl.default."""))
conf.registerChannelValue(ShrinkUrl.shrinkSnarfer, 'showDomain',
registry.Boolean(True, """Determines whether the snarfer will show the
domain of the URL being snarfed along with the shrunken URL."""))
conf.registerChannelValue(ShrinkUrl, 'minimumLength',
registry.PositiveInteger(48, """The minimum length a URL must be before
the bot will shrink it."""))
conf.registerChannelValue(ShrinkUrl, 'nonSnarfingRegexp',
registry.Regexp(None, """Determines what URLs are to be snarfed; URLs
matching the regexp given will not be snarfed. Give the empty string if
you have no URLs that you'd like to exclude from being snarfed."""))
conf.registerChannelValue(ShrinkUrl, 'outFilter',
registry.Boolean(False, """Determines whether the bot will shrink the URLs
of outgoing messages if those URLs are longer than
supybot.plugins.ShrinkUrl.minimumLength."""))
conf.registerChannelValue(ShrinkUrl, 'default',
ShrinkService('ln', """Determines what website the bot will use when
shrinking a URL."""))
conf.registerGlobalValue(ShrinkUrl, 'bold',
registry.Boolean(True, """Determines whether this plugin will bold certain
portions of its replies."""))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| tecan/xchat-rt | plugins/scripts/Supybot-0.83.4.1-bitcoinotc-bot/plugins/ShrinkUrl/config.py | Python | gpl-2.0 | 3,906 |
import aquests
def test_postgres ():
dbo = aquests.postgresql ("127.0.0.1:5432", "mydb", ("test", "1111"))
for i in range (100):
dbo.do ("SELECT * FROM weather;")
aquests.fetchall ()
| hansroh/aquests | tests/test_postgres.py | Python | mit | 192 |
#!/usr/bin/env python
'''
Command to send dynamic filesystem information to Zagg
'''
# vim: expandtab:tabstop=4:shiftwidth=4
#
# Copyright 2015 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#This is not a module, but pylint thinks it is. This is a command.
#pylint: disable=invalid-name,import-error
import argparse
import re
from openshift_tools.monitoring.metric_sender import MetricSender
from openshift_tools.monitoring import pminfo
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Disk metric sender')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
parser.add_argument('--filter-pod-pv', action='store_true', default=None,
help="Filter out OpenShift Pod PV mounts")
parser.add_argument('--force-send-zeros', action='store_true', default=None,
help="Send 0% full for mounts, useful for clearing existing bad alerts")
return parser.parse_args()
def filter_out_key_name_chars(metric_dict, filesystem_filter):
""" Simple filter to elimate unnecessary characters in the key name """
filtered_dict = {k.replace(filesystem_filter, ''):v
for (k, v) in metric_dict.iteritems()
}
return filtered_dict
def filter_out_container_root(metric_dict):
""" Simple filter to remove the container root FS info """
container_root_regex = r'^/dev/mapper/docker-\d+:\d+-\d+-[0-9a-f]+$'
filtered_dict = {k: v
for (k, v) in metric_dict.iteritems()
if not re.match(container_root_regex, k)
}
return filtered_dict
def filter_out_customer_pv_filesystems(metric_dict):
""" Remove customer PVs from list """
r = re.compile("^/dev/(?:xvd[a-z]{2}|nvme(?:[2-9].*|\d{2,}.*))$")
# filter out xvda{2} (???) and nvme devices past 2
return {
k:v for (k, v) in metric_dict.iteritems() if not r.match(k)
}
def zero_mount_percentages(metric_dict):
""" Make all mounts report 0% used """
return {
k:0 for (k, v) in metric_dict.iteritems()
}
def main():
""" Main function to run the check """
args = parse_args()
metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)
filesys_full_metric = ['filesys.full']
filesys_inode_derived_metrics = {'filesys.inodes.pused' :
'filesys.usedfiles / (filesys.usedfiles + filesys.freefiles) * 100'
}
discovery_key_fs = 'disc.filesys'
item_prototype_macro_fs = '#OSO_FILESYS'
item_prototype_key_full = 'disc.filesys.full'
item_prototype_key_inode = 'disc.filesys.inodes.pused'
# Get the disk space
filesys_full_metrics = pminfo.get_metrics(filesys_full_metric)
filtered_filesys_metrics = filter_out_key_name_chars(filesys_full_metrics, 'filesys.full.')
filtered_filesys_metrics = filter_out_container_root(filtered_filesys_metrics)
if args.filter_pod_pv:
filtered_filesys_metrics = filter_out_customer_pv_filesystems(filtered_filesys_metrics)
if args.force_send_zeros:
filtered_filesys_metrics = zero_mount_percentages(filtered_filesys_metrics)
metric_sender.add_dynamic_metric(discovery_key_fs, item_prototype_macro_fs, filtered_filesys_metrics.keys())
for filesys_name, filesys_full in filtered_filesys_metrics.iteritems():
metric_sender.add_metric({'%s[%s]' % (item_prototype_key_full, filesys_name): filesys_full})
# Get filesytem inode metrics
filesys_inode_metrics = pminfo.get_metrics(derived_metrics=filesys_inode_derived_metrics)
filtered_filesys_inode_metrics = filter_out_key_name_chars(filesys_inode_metrics, 'filesys.inodes.pused.')
filtered_filesys_inode_metrics = filter_out_container_root(filtered_filesys_inode_metrics)
if args.filter_pod_pv:
filtered_filesys_inode_metrics = filter_out_customer_pv_filesystems(filtered_filesys_inode_metrics)
if args.force_send_zeros:
filtered_filesys_inode_metrics = zero_mount_percentages(filtered_filesys_inode_metrics)
for filesys_name, filesys_inodes in filtered_filesys_inode_metrics.iteritems():
metric_sender.add_metric({'%s[%s]' % (item_prototype_key_inode, filesys_name): filesys_inodes})
metric_sender.send_metrics()
if __name__ == '__main__':
main()
| blrm/openshift-tools | scripts/monitoring/cron-send-filesystem-metrics.py | Python | apache-2.0 | 5,045 |
#!/usr/bin/env python3
'''
Command line wallpaper selection for an arbitrary number of screens.
Settings contained in ~/.wallpaper.json
'''
import json, os, subprocess, sys
def load_data():
try:
with open(
os.path.join(os.path.expanduser('~'), '.wallpaper.json'), 'r') \
as jfile:
return json.load(jfile)
except FileNotFoundError:
save_data({
'active': [],
'command': 'feh --bg-fill',
'wallpapers': {},
})
return load_data()
def save_data(data):
with open(os.path.join(os.path.expanduser('~'), '.wallpaper.json'), 'w') \
as jfile:
json.dump(data, jfile)
def apply_wallpaper(screen, data):
subprocess.call(
'DISPLAY=:0.' + format(screen) + ' ' + data['command'] + ' "' +
data['wallpapers'][data['active'][screen]] + '"',
shell=True)
def set_wallpaper(screen, wallpaper, data):
if sys.argv[2] in data['wallpapers']:
if len(data['active']) > screen:
data['active'][screen] = sys.argv[2]
else:
data['active'].append(sys.argv[2])
save_data(data)
else:
print('Error: wallpaper ' + sys.argv[2] + ' does not exist.')
def add_wallpaper(label, wallpaper, data):
data['wallpapers'][label] = os.path.abspath(wallpaper)
save_data(data)
def del_wallpaper(label, data):
try:
del data['wallpapers'][label]
save_data(data)
except KeyError:
print('Error: wallpaper ' + wallpaper + ' does not exist.')
def rename_wallpaper(old, new, data):
add_wallpaper(new, data['wallpapers'][old], data)
del_wallpaper(old, data)
def output_wallpaper(wallpaper, data):
try:
print(data['wallpapers'][wallpaper])
except KeyError:
print('Error: wallpaper ' + wallpaper + ' does not exist.')
def print_help(data):
print('''# wallpaper.py
Command line wallpaper management, with multidesktop support.
To set a screen's wallpaper,
screen# wallpaper_name
omit wallpaper_name to apply an already set wallpaper.
add
wallpaper_name path
del
wallpaper_name
rename
old_name new_name
out
wallpaper_name
list
''')
def run():
data = load_data()
if len(sys.argv) > 1:
if sys.argv[1][0].isdigit():
screen = int(sys.argv[1])
if len(sys.argv) > 2: set_wallpaper(screen, sys.argv[2], data)
apply_wallpaper(screen, data)
elif sys.argv[1] == 'set':
if sys.argv[2].isdigit():
set_wallpaper(int(sys.argv[2]), sys.argv[3], data)
elif sys.argv[1] == 'add':
if len(sys.argv) > 2:
add_wallpaper(sys.argv[2], sys.argv[3], data)
elif sys.argv[1] == 'del':
if len(sys.argv) > 2:
del_wallpaper(sys.argv[2], data)
elif sys.argv[1] == 'rename':
if len(sys.argv) > 3:
rename_wallpaper(sys.argv[2], sys.argv[3], data)
elif sys.argv[1] == 'out':
if len(sys.argv) > 2:
output_wallpaper(sys.argv[2], data)
elif sys.argv[1] == 'list':
for wallpaper in data['wallpapers']:
print(wallpaper + ': ' + data['wallpapers'][wallpaper] + '\n')
else: print_help(data)
else: print_help(data)
run()
| PrincessTeruko/wallpaper.py | wallpaper.py | Python | mit | 2,946 |
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db import connection
from django.db.models import Prefetch
from django.db.models.query import get_prefetcher
from django.test import TestCase, override_settings
from django.utils import six
from django.utils.encoding import force_text
from .models import (
Author, Author2, AuthorAddress, AuthorWithAge, Bio, Book, Bookmark,
BookReview, BookWithYear, Comment, Department, Employee, FavoriteAuthors,
House, LessonEntry, Person, Qualification, Reader, Room, TaggedItem,
Teacher, WordEntry,
)
class PrefetchRelatedTests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author.objects.create(name="Charlotte",
first_book=self.book1)
self.author2 = Author.objects.create(name="Anne",
first_book=self.book1)
self.author3 = Author.objects.create(name="Emily",
first_book=self.book1)
self.author4 = Author.objects.create(name="Jane",
first_book=self.book4)
self.book1.authors.add(self.author1, self.author2, self.author3)
self.book2.authors.add(self.author1)
self.book3.authors.add(self.author3)
self.book4.authors.add(self.author4)
self.reader1 = Reader.objects.create(name="Amy")
self.reader2 = Reader.objects.create(name="Belinda")
self.reader1.books_read.add(self.book1, self.book4)
self.reader2.books_read.add(self.book2, self.book4)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertIn(qs[0], qs)
def test_clear(self):
"""
Test that we can clear the behavior by calling prefetch_related()
"""
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""
Test we can follow a m2m and another m2m
"""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Test that objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[six.text_type(r) for r in b.read_by.all()]
for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
Test we can follow an m2m relation after a relation like ForeignKey
that doesn't have many objects
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[six.text_type(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"],
["Amy"],
["Amy"],
["Amy", "Belinda"]])
def test_reverse_one_to_one_then_m2m(self):
"""
Test that we can follow a m2m relation after going through
the select_related reverse of an o2o.
"""
qs = Author.objects.prefetch_related('bio__books').select_related('bio')
with self.assertNumQueries(1):
list(qs.all())
Bio.objects.create(author=self.author1)
with self.assertNumQueries(2):
list(qs.all())
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
with self.assertRaises(AttributeError) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
with self.assertRaises(ValueError) as cm:
list(qs)
self.assertIn('prefetch_related', str(cm.exception))
self.assertIn("name", str(cm.exception))
def test_forward_m2m_to_attr_conflict(self):
msg = 'to_attr=authors conflicts with a field on the Book model.'
authors = Author.objects.all()
with self.assertRaisesMessage(ValueError, msg):
list(Book.objects.prefetch_related(
Prefetch('authors', queryset=authors, to_attr='authors'),
))
# Without the ValueError, an author was deleted due to the implicit
# save of the relation assignment.
self.assertEqual(self.book1.authors.count(), 3)
def test_reverse_m2m_to_attr_conflict(self):
msg = 'to_attr=books conflicts with a field on the Author model.'
poems = Book.objects.filter(title='Poems')
with self.assertRaisesMessage(ValueError, msg):
list(Author.objects.prefetch_related(
Prefetch('books', queryset=poems, to_attr='books'),
))
# Without the ValueError, a book was deleted due to the implicit
# save of reverse relation assignment.
self.assertEqual(self.author1.books.count(), 2)
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
try:
related = getattr(obj, part[0])
except ObjectDoesNotExist:
continue
if related is not None:
rel_objs.extend(cls.traverse_qs(related, [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(name='House 1', address="123 Main St", owner=self.person1)
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(name='House 2', address="45 Side St", owner=self.person1)
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.room2_3 = Room.objects.create(name="Kitchen", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(name='House 3', address="6 Downing St", owner=self.person2)
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(name='house 4', address="7 Regents St", owner=self.person2)
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.room4_3 = Room.objects.create(name="Kitchen", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous: Lookup was already seen with a different queryset.
with self.assertRaises(ValueError):
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all())),
[['houses', 'rooms']]
)
# Ambiguous: Lookup houses_lst doesn't yet exist when performing houses_lst__rooms.
with self.assertRaises(AttributeError):
self.traverse_qs(
Person.objects.prefetch_related('houses_lst__rooms', Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_traverse_single_item_property(self):
# Control lookups.
with self.assertNumQueries(5):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
'primary_house__occupants__houses',
),
[['primary_house', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(5):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses__rooms',
Prefetch('primary_house__occupants', to_attr='occupants_lst'),
'primary_house__occupants_lst__houses',
),
[['primary_house', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_traverse_multiple_items_property(self):
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
'all_houses__occupants__houses',
),
[['all_houses', 'occupants', 'houses']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
'houses',
Prefetch('all_houses__occupants', to_attr='occupants_lst'),
'all_houses__occupants_lst__houses',
),
[['all_houses', 'occupants_lst', 'houses']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]), to_attr='houses_lst')))
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(4):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst'),
Prefetch('houses_lst__rooms_lst__main_room_of')
))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0].main_room_of, self.house1)
self.assertEqual(len(lst2[1].houses_lst), 0)
# Test ReverseSingleRelatedObjectDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('house')
lst1 = self.traverse_qs(rooms, [['house', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['house', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
houses = House.objects.select_related('owner')
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.all(), to_attr='house_attr'))
lst2 = self.traverse_qs(rooms, [['house_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.filter(address='DoesNotExist'))).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'house')
room = Room.objects.all().prefetch_related(Prefetch('house', queryset=houses.filter(address='DoesNotExist'), to_attr='house_attr')).first()
self.assertIsNone(room.house_attr)
rooms = Room.objects.all().prefetch_related(Prefetch('house', queryset=House.objects.only('name')))
with self.assertNumQueries(2):
getattr(rooms.first().house, 'name')
with self.assertNumQueries(3):
getattr(rooms.first().house, 'address')
# Test SingleRelatedObjectDescriptor.
houses = House.objects.select_related('owner')
with self.assertNumQueries(6):
rooms = Room.objects.all().prefetch_related('main_room_of')
lst1 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
with self.assertNumQueries(2):
rooms = Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all()))
lst2 = self.traverse_qs(rooms, [['main_room_of', 'owner']])
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
rooms = list(Room.objects.all().prefetch_related(Prefetch('main_room_of', queryset=houses.all(), to_attr='main_room_of_attr')))
lst2 = self.traverse_qs(rooms, [['main_room_of_attr', 'owner']])
self.assertEqual(lst1, lst2)
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'))).first()
with self.assertRaises(ObjectDoesNotExist):
getattr(room, 'main_room_of')
room = Room.objects.filter(main_room_of__isnull=False).prefetch_related(Prefetch('main_room_of', queryset=houses.filter(address='DoesNotExist'), to_attr='main_room_of_attr')).first()
self.assertIsNone(room.main_room_of_attr)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join("%s department: %s\n" %
(dept.name, ", ".join(six.text_type(t) for t in dept.teachers.all()))
for dept in qs)
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
def setUp(self):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
self.book1, self.book2, self.book3 = book1, book2, book3
self.reader1, self.reader2, self.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="outstanding", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_traverse_GFK(self):
"""
Test that we can traverse a 'content_object' with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = {r.name for tag in qs
for r in tag.content_object.read_by.all()}
self.assertEqual(readers_of_awesome_books, {"me", "you", "someone"})
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted([i.tag for i in bookmark.tags.all()]), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
class MultiTableInheritanceTest(TestCase):
def setUp(self):
self.book1 = BookWithYear.objects.create(
title="Poems", published_year=2010)
self.book2 = BookWithYear.objects.create(
title="More poems", published_year=2011)
self.author1 = AuthorWithAge.objects.create(
name='Jane', first_book=self.book1, age=50)
self.author2 = AuthorWithAge.objects.create(
name='Tom', first_book=self.book1, age=49)
self.author3 = AuthorWithAge.objects.create(
name='Robert', first_book=self.book2, age=48)
self.authorAddress = AuthorAddress.objects.create(
author=self.author1, address='SomeStreet 1')
self.book2.aged_authors.add(self.author2, self.author3)
self.br1 = BookReview.objects.create(
book=self.book1, notes="review book1")
self.br2 = BookReview.objects.create(
book=self.book2, notes="review book2")
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.authorAddress)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[six.text_type(book) for book in author.books_with_year.all()]
for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[six.text_type(book) for book in author.books_with_year.all()]
for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[six.text_type(author) for author in book.aged_authors.all()]
for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[six.text_type(author) for author in book.aged_authors.all()]
for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
l = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(l, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
def setUp(self):
self.book = Book.objects.create(title="Poems")
self.author1 = Author.objects.create(name='Jane', first_book=self.book)
self.author2 = Author.objects.create(name='Tom', first_book=self.book)
self.author3 = Author.objects.create(name='Robert', first_book=self.book)
self.authorAddress = AuthorAddress.objects.create(
author=self.author1, address='SomeStreet 1'
)
FavoriteAuthors.objects.create(author=self.author1,
likes_author=self.author2)
FavoriteAuthors.objects.create(author=self.author2,
likes_author=self.author3)
FavoriteAuthors.objects.create(author=self.author3,
likes_author=self.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.authorAddress)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[six.text_type(i_like) for i_like in author.favorite_authors.all()],
[six.text_type(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([six.text_type(self.author2)], [six.text_type(self.author3)]),
([six.text_type(self.author3)], [six.text_type(self.author1)]),
([six.text_type(self.author1)], [six.text_type(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
self.house1 = House.objects.create(address="123 Main St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.house1.main_room = self.room1_1
self.house1.save()
self.person1.houses.add(self.house1)
self.house2 = House.objects.create(address="45 Side St")
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.house2.main_room = self.room2_1
self.house2.save()
self.person1.houses.add(self.house2)
self.house3 = House.objects.create(address="6 Downing St")
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.house3.main_room = self.room3_1
self.house3.save()
self.person2.houses.add(self.house3)
self.house4 = House.objects.create(address="7 Regents St")
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.house4.main_room = self.room4_1
self.house4.save()
self.person2.houses.add(self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
def setUp(self):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Check that prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
multi_db = True
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1)
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join("%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2)
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte",
first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne",
first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily",
first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane",
first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
class Ticket21760Tests(TestCase):
def setUp(self):
self.rooms = []
for _ in range(3):
house = House.objects.create()
for _ in range(3):
self.rooms.append(Room.objects.create(house=house))
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house.main_room = self.rooms[-3]
house.save()
def test_bug(self):
prefetcher = get_prefetcher(self.rooms[0], 'house')[0]
queryset = prefetcher.get_prefetch_queryset(list(Room.objects.all()))[0]
self.assertNotIn(' JOIN ', force_text(queryset.query))
| 52ai/django-ccsds | tests/prefetch_related/tests.py | Python | bsd-3-clause | 51,773 |
import contextlib
from django.core.exceptions import ValidationError as DjangoValidationError
# Remants from MODM days
# TODO: Remove usages of aliased Exceptions
ValidationError = DjangoValidationError
ValidationValueError = DjangoValidationError
ValidationTypeError = DjangoValidationError
class TokenError(Exception):
pass
class TokenHandlerNotFound(TokenError):
def __init__(self, action, *args, **kwargs):
super(TokenHandlerNotFound, self).__init__(*args, **kwargs)
self.action = action
class UnsupportedSanctionHandlerKind(Exception):
pass
class OSFError(Exception):
"""Base class for exceptions raised by the Osf application"""
pass
class NodeError(OSFError):
"""Raised when an action cannot be performed on a Node model"""
pass
class NodeStateError(NodeError):
"""Raised when the Node's state is not suitable for the requested action
Example: Node.remove_node() is called, but the node has non-deleted children
"""
pass
class UserStateError(OSFError):
"""Raised when the user's state is not suitable for the requested action
Example: user.gdpr_delete() is called, but the user has resources that cannot be deleted.
"""
pass
class SanctionTokenError(TokenError):
"""Base class for errors arising from the user of a sanction token."""
pass
class MaxRetriesError(OSFError):
"""Raised when an operation has been attempted a pre-determined number of times"""
pass
class InvalidSanctionRejectionToken(TokenError):
"""Raised if a Sanction subclass disapproval token submitted is invalid
or associated with another admin authorizer
"""
message_short = 'Invalid Token'
message_long = 'This disapproval link is invalid. Are you logged into the correct account?'
class InvalidSanctionApprovalToken(TokenError):
"""Raised if a Sanction subclass approval token submitted is invalid
or associated with another admin authorizer
"""
message_short = 'Invalid Token'
message_long = 'This approval link is invalid. Are you logged into the correct account?'
class InvalidTagError(OSFError):
"""Raised when attempting to perform an invalid operation on a tag"""
pass
class TagNotFoundError(OSFError):
"""Raised when attempting to perform an operation on an absent tag"""
pass
class UserNotAffiliatedError(OSFError):
"""Raised if a user attempts to add an institution that is not currently
one of its affiliations.
"""
message_short = 'User not affiliated'
message_long = 'This user is not affiliated with this institution.'
@contextlib.contextmanager
def reraise_django_validation_errors():
"""Context manager to reraise DjangoValidationErrors as `osf.exceptions.ValidationErrors` (for
MODM compat).
"""
try:
yield
except DjangoValidationError as err:
raise ValidationError(*err.args)
class NaiveDatetimeException(Exception):
pass
class InvalidTriggerError(Exception):
def __init__(self, trigger, state, valid_triggers):
self.trigger = trigger
self.state = state
self.valid_triggers = valid_triggers
self.message = 'Cannot trigger "{}" from state "{}". Valid triggers: {}'.format(trigger, state, valid_triggers)
super(Exception, self).__init__(self.message)
class InvalidTransitionError(Exception):
def __init__(self, machine, transition):
self.message = 'Machine "{}" received invalid transitions: "{}" expected but not defined'.format(machine, transition)
class PreprintError(OSFError):
"""Raised when an action cannot be performed on a Preprint model"""
pass
class PreprintStateError(PreprintError):
"""Raised when the Preprint's state is not suitable for the requested action"""
pass
class DraftRegistrationStateError(OSFError):
"""Raised when an action cannot be performed on a Draft Registration model"""
pass
class PreprintProviderError(PreprintError):
"""Raised when there is an error with the preprint provider"""
pass
class BlockedEmailError(OSFError):
"""Raised if a user tries to register an email that is included
in the blocked domains list
"""
pass
class SchemaBlockConversionError(OSFError):
"""Raised if unexpected data breaks the conversion between the legacy
nested registration schema/metadata format and the new, flattened,
'schema block' format.
"""
pass
class SchemaResponseError(OSFError):
"""Superclass for errors ariseing from unexpected SchemaResponse behavior."""
pass
class SchemaResponseStateError(SchemaResponseError):
"""Raised when attempting to perform an operation against a
SchemaResponse with an invalid state.
"""
pass
class PreviousSchemaResponseError(SchemaResponseError):
"""Raised when attempting to create a new SchemaResponse for a parent that
already has a SchemaResponse in an unsupported state
"""
pass
class RegistrationBulkCreationContributorError(OSFError):
"""Raised if contributor preparation has failed"""
def __init__(self, error=None):
self.error = error if error else 'Contributor preparation error'
class RegistrationBulkCreationRowError(OSFError):
"""Raised if a draft registration failed creation during bulk upload"""
def __init__(self, upload_id, row_id, title, external_id, draft_id=None, error=None, approval_failure=False):
# `draft_id` is provided when the draft is created but not related to the row object
self.draft_id = draft_id
# `approval_failure` determines whether the error happens during the approval process
self.approval_failure = approval_failure
# The error information for logging, sentry and email
self.error = error if error else 'Draft registration creation error'
# The short error message to be added to the error list that will be returned to the initiator via email
self.short_message = 'Title: {}, External ID: {}, Error: {}'.format(title, external_id, self.error)
# The long error message for logging and sentry
self.long_message = 'Draft registration creation failed: [upload_id="{}", row_id="{}", title="{}", ' \
'external_id="{}", error="{}"]'.format(upload_id, row_id, title, external_id, self.error)
class SchemaResponseUpdateError(SchemaResponseError):
"""Raised when assigning an invalid value (or key) to a SchemaResponseBlock."""
def __init__(self, response, invalid_responses=None, unsupported_keys=None):
self.invalid_responses = invalid_responses
self.unsupported_keys = unsupported_keys
invalid_response_message = ''
unsupported_keys_message = ''
if invalid_responses:
invalid_response_message = (
f'\nThe following responses had invalid values: {invalid_responses}'
)
if unsupported_keys:
unsupported_keys_message = (
f'\nReceived the following resposnes had invalid keys: {unsupported_keys}'
)
error_message = (
f'Error update SchemaResponse with id [{response._id}]:'
f'{invalid_response_message}{unsupported_keys_message}'
)
super().__init__(error_message)
| Johnetordoff/osf.io | osf/exceptions.py | Python | apache-2.0 | 7,309 |
"""
Wrapper functions for printing to a log. The log may be the console, or a
text file, or the screen.
NOTE: Logger does not yet write to a file.
"""
# TODO: Investigate how we can put loggers into 'AssetInstance' objects, to
# collect information per-instance - rather than some global log that all
# methods write into. This technique would allow better capturing of diagnostic
# information and sorting of information.
import logging as std_logging
import logging.config as std_logging_config
import assetQC.api.config as config
BASE_LOG_NAME = 'assetqc'
# Log levels
LEVEL_NOTSET = std_logging.NOTSET # 0
LEVEL_DEBUG = std_logging.DEBUG # 10
LEVEL_INFO = std_logging.INFO # 20
LEVEL_PROGRESS = 25
LEVEL_WARNING = std_logging.WARNING # 30
LEVEL_FAILURE = 35
LEVEL_ERROR = std_logging.ERROR # 40
LEVEL_CRITICAL = std_logging.CRITICAL # 50
class AssetQCLogger(std_logging.Logger):
"""
Logger class for AssetQC project.
Added a few helper functions for 'progress' and 'failure'.
All the standard logging functions are in the base class, such as:
- info
- warning
- error
- debug
"""
def __init__(self, name):
std_logging.Logger.__init__(self, name)
def progress(self, msg, num, **kwargs):
msg = '{0}% {1}'.format(num, msg)
self.log(LEVEL_PROGRESS, msg, **kwargs)
def failure(self, msg, **kwargs):
self.log(LEVEL_FAILURE, msg, **kwargs)
def getLogger(name=BASE_LOG_NAME):
"""
:param name: The logger dot-separated name.
:type name: str
:rtype: std_logging.Manager
:return: Logging object.
"""
assert isinstance(name, str)
logConfigPath = config.getLoggingConfigPath()
# Set the logger class.
std_logging.setLoggerClass(AssetQCLogger)
# Configure Logger
std_logging.addLevelName(LEVEL_PROGRESS, 'PROGRESS')
std_logging.addLevelName(LEVEL_FAILURE, 'FAILURE')
std_logging_config.fileConfig(logConfigPath)
logger = std_logging.getLogger(name)
assert isinstance(logger, AssetQCLogger)
return logger
| david-cattermole/assetQC | python/assetQC/api/logger.py | Python | lgpl-3.0 | 2,078 |
#!/usr/local/bin/python
# encoding: utf-8
"""
*Convert a python list of dictionaries to pretty csv output*
:Author:
David Young
"""
import sys
import os
import io
import csv
from decimal import Decimal
from datetime import datetime
os.environ['TERM'] = 'vt100'
from fundamentals import tools
from fundamentals.mysql import convert_dictionary_to_mysql_table
def list_of_dictionaries_to_mysql_inserts(
log,
datalist,
tableName):
"""Convert a python list of dictionaries to pretty csv output
**Key Arguments**
- ``log`` -- logger
- ``datalist`` -- a list of dictionaries
- ``tableName`` -- the name of the table to create the insert statements for
**Return**
- ``output`` -- the mysql insert statements (as a string)
**Usage**
```python
from fundamentals.files import list_of_dictionaries_to_mysql_inserts
mysqlInserts = list_of_dictionaries_to_mysql_inserts(
log=log,
datalist=dataList,
tableName="my_new_table"
)
print mysqlInserts
```
this output the following:
```plain
INSERT INTO `testing_table` (a_newKey,and_another,dateCreated,uniqueKey2,uniquekey1) VALUES ("cool" ,"super cool" ,"2016-09-14T13:17:26" ,"burgers" ,"cheese") ON DUPLICATE KEY UPDATE a_newKey="cool", and_another="super cool", dateCreated="2016-09-14T13:17:26", uniqueKey2="burgers", uniquekey1="cheese" ;
...
...
```
"""
log.debug('starting the ``list_of_dictionaries_to_mysql_inserts`` function')
if not len(datalist):
return "NO MATCH"
inserts = []
for d in datalist:
insertCommand = convert_dictionary_to_mysql_table(
log=log,
dictionary=d,
dbTableName="testing_table",
uniqueKeyList=[],
dateModified=False,
returnInsertOnly=True,
replace=True,
batchInserts=False
)
inserts.append(insertCommand)
output = ";\n".join(inserts) + ";"
log.debug('completed the ``list_of_dictionaries_to_mysql_inserts`` function')
return output
| thespacedoctor/fundamentals | fundamentals/files/list_of_dictionaries_to_mysql_inserts.py | Python | gpl-3.0 | 2,121 |
import os
from distutils import log
from distutils.core import setup, Command, Extension
from distutils.dist import Distribution
from distutils.command.build import build
from distutils.command.build_ext import build_ext
from distutils.command.build_py import build_py
from distutils.command.install_lib import install_lib
from distutils.command.install_scripts import install_scripts
from distutils.spawn import spawn, find_executable
from distutils import cygwinccompiler, file_util
from distutils.errors import CCompilerError, DistutilsExecError
from distutils.sysconfig import get_python_inc, get_config_var
from distutils.version import StrictVersion
class xxldist(Distribution):
pure = 0
global_options = Distribution.global_options + \
[('pure', None, "use pure (slow) Python "
"code instead of C extensions"),
('c2to3', None, "(experimental!) convert "
"code with 2to3"),
]
def has_ext_modules(self):
# self.ext_modules is emptied in hgbuildpy.finalize_options which is
# too late for some cases
return not self.pure and Distribution.has_ext_modules(self)
scripts = ['gitxxl']
version = ''
if os.path.isdir('.git'):
pass
if version:
f = open("extralarge/__version__.py", "w")
f.write('# this file is autogenerated by setup.py\n')
f.write('version = "%s"\n' % version)
f.close()
try:
from src import __version__
version = __version__.version
except ImportError:
version = 'unknown'
cmdclass = {}
packages = ['extralarge']
pymodules = []
common_depends = []
extmodules = []
packagedata = {'gitxxl': []}
datafiles = [('dist/hooks', ['dist/hooks/post-commit', 'dist/hooks/pre-commit'])]
setupversion = version
extra = {}
setup(name='gitxxl',
version=setupversion,
author='Paolo Gavazzi and many others',
author_email='pgavazzi@softcactus.com',
url='http://gitxxl.com/',
download_url='http://gitxxl.com/release/',
description=('git extension for XXL binary files'),
long_description=('GitXXL is a git extension written in Python.'
' It is used by a number of large projects that require'
' fast, reliable large file handling as part of'
' their distributed revision control.'),
license='GNU GPLv2 or any later version',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: Microsoft :: Windows',
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Version Control',
],
scripts=scripts,
packages=packages,
py_modules=pymodules,
ext_modules=extmodules,
data_files=datafiles,
package_data=packagedata,
cmdclass=cmdclass,
distclass=xxldist,
options={'py2exe': {'packages': []},
'bdist_mpkg': {'zipdist': False,
'license': 'COPYING',
'readme': 'contrib/macosx/Readme.html',
'welcome': 'contrib/macosx/Welcome.html',
},
},
**extra) | pgavazzi/gitxxl | setup.py | Python | gpl-3.0 | 3,580 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.