text stringlengths 4 1.02M | meta dict |
|---|---|
import sys
import pytest
import json
import xml.etree.ElementTree as ET
from azure.core.pipeline.transport import HttpRequest as PipelineTransportHttpRequest
from azure.core.rest import HttpRequest as RestHttpRequest
try:
import collections.abc as collections
except ImportError:
import collections
@pytest.fixture
def old_request():
return PipelineTransportHttpRequest("GET", "/")
@pytest.fixture
def new_request():
return RestHttpRequest("GET", "/")
def test_request_attr_parity(old_request, new_request):
for attr in dir(old_request):
if not attr[0] == "_":
# if not a private attr, we want parity
assert hasattr(new_request, attr)
def test_request_set_attrs(old_request, new_request):
for attr in dir(old_request):
if attr[0] == "_":
continue
try:
# if we can set it on the old request, we want to
# be able to set it on the new
setattr(old_request, attr, "foo")
except:
pass
else:
setattr(new_request, attr, "foo")
assert getattr(old_request, attr) == getattr(new_request, attr) == "foo"
def test_request_multipart_mixed_info(old_request, new_request):
old_request.multipart_mixed_info = "foo"
new_request.multipart_mixed_info = "foo"
assert old_request.multipart_mixed_info == new_request.multipart_mixed_info == "foo"
def test_request_files_attr(old_request, new_request):
assert old_request.files == new_request.files == None
old_request.files = {"hello": "world"}
new_request.files = {"hello": "world"}
assert old_request.files == new_request.files == {"hello": "world"}
def test_request_data_attr(old_request, new_request):
assert old_request.data == new_request.data == None
old_request.data = {"hello": "world"}
new_request.data = {"hello": "world"}
assert old_request.data == new_request.data == {"hello": "world"}
def test_request_query(old_request, new_request):
assert old_request.query == new_request.query == {}
old_request.url = "http://localhost:5000?a=b&c=d"
new_request.url = "http://localhost:5000?a=b&c=d"
assert old_request.query == new_request.query == {'a': 'b', 'c': 'd'}
def test_request_query_and_params_kwarg(old_request):
# should be same behavior if we pass in query params through the params kwarg in the new requests
old_request.url = "http://localhost:5000?a=b&c=d"
new_request = RestHttpRequest("GET", "http://localhost:5000", params={'a': 'b', 'c': 'd'})
assert old_request.query == new_request.query == {'a': 'b', 'c': 'd'}
def test_request_body(old_request, new_request):
assert old_request.body == new_request.body == None
old_request.data = {"hello": "world"}
new_request.data = {"hello": "world"}
assert (
old_request.body ==
new_request.body ==
new_request.content ==
{"hello": "world"}
)
# files will not override data
old_request.files = {"foo": "bar"}
new_request.files = {"foo": "bar"}
assert (
old_request.body ==
new_request.body ==
new_request.content ==
{"hello": "world"}
)
# nullify data
old_request.data = None
new_request.data = None
assert (
old_request.data ==
new_request.data ==
old_request.body ==
new_request.body ==
None
)
def test_format_parameters(old_request, new_request):
old_request.url = "a/b/c?t=y"
new_request.url = "a/b/c?t=y"
assert old_request.url == new_request.url == "a/b/c?t=y"
old_request.format_parameters({"g": "h"})
new_request.format_parameters({"g": "h"})
# ordering can vary, so not sticking on order
assert old_request.url in ["a/b/c?g=h&t=y", "a/b/c?t=y&g=h"]
assert new_request.url in ["a/b/c?g=h&t=y", "a/b/c?t=y&g=h"]
def test_request_format_parameters_and_params_kwarg(old_request):
# calling format_parameters on an old request should be the same
# behavior as passing in params to new request
old_request.url = "a/b/c?t=y"
old_request.format_parameters({"g": "h"})
new_request = RestHttpRequest(
"GET", "a/b/c?t=y", params={"g": "h"}
)
assert old_request.url in ["a/b/c?g=h&t=y", "a/b/c?t=y&g=h"]
assert new_request.url in ["a/b/c?g=h&t=y", "a/b/c?t=y&g=h"]
# additionally, calling format_parameters on a new request
# should be the same as passing the params to a new request
assert new_request.url in ["a/b/c?g=h&t=y", "a/b/c?t=y&g=h"]
assert new_request.url in ["a/b/c?g=h&t=y", "a/b/c?t=y&g=h"]
def test_request_streamed_data_body(old_request, new_request):
assert old_request.files == new_request.files == None
assert old_request.data == new_request.data == None
old_request.files = new_request.files = "foo"
# passing in iterable
def streaming_body(data):
yield data # pragma: nocover
old_request.set_streamed_data_body(streaming_body("i will be streamed"))
new_request.set_streamed_data_body(streaming_body("i will be streamed"))
assert old_request.files == new_request.files == None
assert isinstance(old_request.data, collections.Iterable)
assert isinstance(new_request.data, collections.Iterable)
assert isinstance(old_request.body, collections.Iterable)
assert isinstance(new_request.body, collections.Iterable)
assert isinstance(new_request.content, collections.Iterable)
assert old_request.headers == new_request.headers == {}
def test_request_streamed_data_body_non_iterable(old_request, new_request):
# should fail before nullifying the files property
old_request.files = new_request.files = "foo"
# passing in non iterable
with pytest.raises(TypeError) as ex:
old_request.set_streamed_data_body(1)
assert "A streamable data source must be an open file-like object or iterable" in str(ex.value)
assert old_request.data is None
assert old_request.files == "foo"
with pytest.raises(TypeError) as ex:
new_request.set_streamed_data_body(1)
assert "A streamable data source must be an open file-like object or iterable" in str(ex.value)
assert old_request.data is None
assert old_request.files == "foo"
assert old_request.headers == new_request.headers == {}
def test_request_streamed_data_body_and_content_kwarg(old_request):
# passing stream bodies to set_streamed_data_body
# and passing a stream body to the content kwarg of the new request should be the same
def streaming_body(data):
yield data # pragma: nocover
old_request.set_streamed_data_body(streaming_body("stream"))
new_request = RestHttpRequest("GET", "/", content=streaming_body("stream"))
assert old_request.files == new_request.files == None
assert isinstance(old_request.data, collections.Iterable)
assert isinstance(new_request.data, collections.Iterable)
assert isinstance(old_request.body, collections.Iterable)
assert isinstance(new_request.body, collections.Iterable)
assert isinstance(new_request.content, collections.Iterable)
assert old_request.headers == new_request.headers == {}
def test_request_text_body(old_request, new_request):
assert old_request.files == new_request.files == None
assert old_request.data == new_request.data == None
old_request.files = new_request.files = "foo"
old_request.set_text_body("i am text")
new_request.set_text_body("i am text")
assert old_request.files == new_request.files == None
assert (
old_request.data ==
new_request.data ==
old_request.body ==
new_request.body ==
new_request.content ==
"i am text"
)
assert old_request.headers['Content-Length'] == new_request.headers['Content-Length'] == '9'
assert not old_request.headers.get("Content-Type")
assert new_request.headers["Content-Type"] == "text/plain"
def test_request_text_body_and_content_kwarg(old_request):
old_request.set_text_body("i am text")
new_request = RestHttpRequest("GET", "/", content="i am text")
assert (
old_request.data ==
new_request.data ==
old_request.body ==
new_request.body ==
new_request.content ==
"i am text"
)
assert old_request.headers["Content-Length"] == new_request.headers["Content-Length"] == "9"
assert old_request.files == new_request.files == None
def test_request_xml_body(old_request, new_request):
assert old_request.files == new_request.files == None
assert old_request.data == new_request.data == None
old_request.files = new_request.files = "foo"
xml_input = ET.Element("root")
old_request.set_xml_body(xml_input)
new_request.set_xml_body(xml_input)
assert old_request.files == new_request.files == None
assert (
old_request.data ==
new_request.data ==
old_request.body ==
new_request.body ==
new_request.content ==
b"<?xml version='1.0' encoding='utf-8'?>\n<root />"
)
assert old_request.headers == new_request.headers == {'Content-Length': '47'}
def test_request_xml_body_and_content_kwarg(old_request):
old_request.set_text_body("i am text")
new_request = RestHttpRequest("GET", "/", content="i am text")
assert (
old_request.data ==
new_request.data ==
old_request.body ==
new_request.body ==
new_request.content ==
"i am text"
)
assert old_request.headers["Content-Length"] == new_request.headers["Content-Length"] == "9"
assert old_request.files == new_request.files == None
def test_request_json_body(old_request, new_request):
assert old_request.files == new_request.files == None
assert old_request.data == new_request.data == None
old_request.files = new_request.files = "foo"
json_input = {"hello": "world"}
old_request.set_json_body(json_input)
new_request.set_json_body(json_input)
assert old_request.files == new_request.files == None
assert (
old_request.data ==
new_request.data ==
old_request.body ==
new_request.body ==
new_request.content ==
json.dumps(json_input)
)
assert old_request.headers["Content-Length"] == new_request.headers['Content-Length'] == '18'
assert not old_request.headers.get("Content-Type")
assert new_request.headers["Content-Type"] == "application/json"
def test_request_json_body_and_json_kwarg(old_request):
json_input = {"hello": "world"}
old_request.set_json_body(json_input)
new_request = RestHttpRequest("GET", "/", json=json_input)
assert (
old_request.data ==
new_request.data ==
old_request.body ==
new_request.body ==
new_request.content ==
json.dumps(json_input)
)
assert old_request.headers["Content-Length"] == new_request.headers['Content-Length'] == '18'
assert not old_request.headers.get("Content-Type")
assert new_request.headers["Content-Type"] == "application/json"
assert old_request.files == new_request.files == None
def test_request_formdata_body_files(old_request, new_request):
assert old_request.files == new_request.files == None
assert old_request.data == new_request.data == None
old_request.data = new_request.data = "foo"
old_request.files = new_request.files = "bar"
# without the urlencoded content type, set_formdata_body
# will set it as files
old_request.set_formdata_body({"fileName": "hello.jpg"})
new_request.set_formdata_body({"fileName": "hello.jpg"})
assert old_request.data == new_request.data == None
assert (
old_request.files ==
new_request.files ==
new_request.content ==
{'fileName': (None, 'hello.jpg')}
)
# we don't set any multipart headers with boundaries
# we rely on the transport to boundary calculating
assert old_request.headers == new_request.headers == {}
def test_request_formdata_body_data(old_request, new_request):
assert old_request.files == new_request.files == None
assert old_request.data == new_request.data == None
old_request.data = new_request.data = "foo"
old_request.files = new_request.files = "bar"
# with the urlencoded content type, set_formdata_body
# will set it as data
old_request.headers["Content-Type"] = "application/x-www-form-urlencoded"
new_request.headers["Content-Type"] = "application/x-www-form-urlencoded"
old_request.set_formdata_body({"fileName": "hello.jpg"})
new_request.set_formdata_body({"fileName": "hello.jpg"})
assert old_request.files == new_request.files == None
assert (
old_request.data ==
new_request.data ==
old_request.body ==
new_request.body ==
new_request.content ==
{"fileName": "hello.jpg"}
)
# old behavior would pop out the Content-Type header
# new behavior doesn't do that
assert old_request.headers == {}
assert new_request.headers == {'Content-Type': "application/x-www-form-urlencoded"}
def test_request_formdata_body_and_files_kwarg(old_request):
files = {"fileName": "hello.jpg"}
old_request.set_formdata_body(files)
new_request = RestHttpRequest("GET", "/", files=files)
assert old_request.data == new_request.data == None
assert old_request.body == new_request.body == None
assert old_request.headers == new_request.headers == {}
assert old_request.files == new_request.files == {'fileName': (None, 'hello.jpg')}
def test_request_formdata_body_and_data_kwarg(old_request):
data = {"fileName": "hello.jpg"}
# with the urlencoded content type, set_formdata_body
# will set it as data
old_request.headers["Content-Type"] = "application/x-www-form-urlencoded"
old_request.set_formdata_body(data)
new_request = RestHttpRequest("GET", "/", data=data)
assert (
old_request.data ==
new_request.data ==
old_request.body ==
new_request.body ==
new_request.content ==
{"fileName": "hello.jpg"}
)
assert old_request.headers == {}
assert new_request.headers == {"Content-Type": "application/x-www-form-urlencoded"}
assert old_request.files == new_request.files == None
def test_request_bytes_body(old_request, new_request):
assert old_request.files == new_request.files == None
assert old_request.data == new_request.data == None
old_request.files = new_request.files = "foo"
bytes_input = b"hello, world!"
old_request.set_bytes_body(bytes_input)
new_request.set_bytes_body(bytes_input)
assert old_request.files == new_request.files == None
assert (
old_request.data ==
new_request.data ==
old_request.body ==
new_request.body ==
new_request.content ==
bytes_input
)
assert old_request.headers == new_request.headers == {'Content-Length': '13'}
def test_request_bytes_body_and_content_kwarg(old_request):
bytes_input = b"hello, world!"
old_request.set_bytes_body(bytes_input)
new_request = RestHttpRequest("GET", "/", content=bytes_input)
assert (
old_request.data ==
new_request.data ==
old_request.body ==
new_request.body ==
new_request.content ==
bytes_input
)
assert old_request.headers == new_request.headers == {'Content-Length': '13'}
assert old_request.files == new_request.files
| {
"content_hash": "0621bca36e0cc649bc6ea06b0423e78f",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 101,
"avg_line_length": 39.5,
"alnum_prop": 0.6500904159132007,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e2d4f866f4703c3cecfbee0d95daa7524f415ce9",
"size": "15817",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/core/azure-core/tests/test_rest_request_backcompat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import boto3
class FileUpload:
def __init__(self):
aws_access_key_id = "ExampleAccessString" #insert your access id
aws_secret_access_key = "ExampleAccessKey" #insert your access key
s3_bucket_name = "your-s3-bucket-name" #inster your s3 bucket
#other_filenames should be a list of filenames. it is assumed they are in the tmp folder.
def file_upload(self, other_filenames=None):
s3 = boto3.client('s3', aws_access_key_id=aws_access_key_id ,aws_secret_access_key=aws_secret_access_key)
# Upload tmp.txt to bucket-name
s3.upload_file("/tmp/price-comparison-recent.json", s3_bucket_name, "price-comparison-recent.json")
s3.upload_file("/tmp/hills_urls.json", s3_bucket_name, "hills_urls.json")
s3.upload_file("/tmp/1E-hills-prices-recent.json", s3_bucket_name, "1E-hills-prices-recent.json")
s3.upload_file("/tmp/pages_to_check.json", s3_bucket_name, "pages_to_check.json")
s3.upload_file("/tmp/pages_to_check_test.json", s3_bucket_name, "pages_to_check_test.json")
s3.upload_file("/tmp/prices-recent.json", s3_bucket_name, "prices-recent.json")
if other_filenames:
for each_filename in other_filenames:
s3.upload_file("/tmp/%s" % each_filename, s3_bucket_name, each_filename)
| {
"content_hash": "dbc9d96b2cf7f0a77ae07c72afddf6e1",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 113,
"avg_line_length": 54.833333333333336,
"alnum_prop": 0.6702127659574468,
"repo_name": "corbinq27/priceTweeter",
"id": "6a41a85e63107d1f7bfce65f528d909d0ddb679e",
"size": "1316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "s3_file_upload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12689"
}
],
"symlink_target": ""
} |
import time
import unittest
import yaml
class timewith():
def __init__(self, name=''):
self.name = name
self.start = time.time()
@property
def elapsed(self):
return time.time() - self.start
def checkpoint(self, name=''):
print('{timer} {checkpoint} took {elapsed} ms'.format(
timer=self.name,
checkpoint=name,
elapsed=(self.elapsed * 1000.0),
).strip())
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.checkpoint('finished')
pass
import cProfile
def do_cprofile(func):
def profiled_func(*args, **kwargs):
profile = cProfile.Profile()
try:
profile.enable()
result = func(*args, **kwargs)
profile.disable()
return result
finally:
profile.print_stats(sort=True)
return profiled_func
class TestTheBusThatCouldntSlowDown(unittest.TestCase):
@do_cprofile
def _apply_rule(self, context, rule, should_pass=True):
with timewith("execute_rules"):
report = Emperor.execute_rules(context, [rule])
self.assertEqual(report['pass'], should_pass)
return report.get('data')
def test_simple_when(self):
rule = {
'name': 'testrule',
'when': 'doc.amount == 1',
'hide': [
'doc.currency.dollar',
'doc.amount',
]
}
context = {
'currency': {
'dollar': 100,
},
'amount': 1,
}
result = self._apply_rule(context, rule, should_pass=True)
result = self._apply_rule(context, rule, should_pass=True)
result = self._apply_rule(context, rule, should_pass=True)
| {
"content_hash": "fa910dd01d10224d99fbb37b22b95c29",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 66,
"avg_line_length": 25.10958904109589,
"alnum_prop": 0.5330060010911075,
"repo_name": "optimuspaul/themis",
"id": "ef9198cb2cdaa496e6c00b3d641d59f8f14b88d0",
"size": "1833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/themis/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10558"
}
],
"symlink_target": ""
} |
__doc__ = \
"""
Example of a script that perfoms histogram analysis of an activation
image, to estimate activation Z-score with various heuristics:
* Gamma-Gaussian model
* Gaussian mixture model
* Empirical normal null
This example is based on a (simplistic) simulated image.
Note : We do not want a 'zscore', which does mean anything
(except with the fdr) but probability
that each voxel is in the active class
"""
# Author : Bertrand Thirion, Gael Varoquaux 2008-2009
print __doc__
import numpy as np
import nipy.neurospin.utils.simul_2d_multisubject_fmri_dataset as simul
import nipy.neurospin.utils.emp_null as en
################################################################################
# simulate the data
dimx = 60
dimy = 60
pos = 2*np.array([[6,7],[10,10],[15,10]])
ampli = np.array([3,4,4])
dataset = simul.make_surrogate_array(nbsubj=1, dimx=dimx, dimy=dimy, pos=pos,
ampli=ampli, width=10.0).squeeze()
import pylab as pl
fig = pl.figure(figsize=(12, 10))
pl.subplot(3, 3, 1)
pl.imshow(dataset, cmap=pl.cm.hot)
pl.colorbar()
pl.title('Raw data')
Beta = dataset.ravel().squeeze()
################################################################################
# fit Beta's histogram with a Gamma-Gaussian mixture
gam_gaus_pp = en.Gamma_Gaussian_fit(Beta, Beta)
gam_gaus_pp = np.reshape(gam_gaus_pp, (dimx, dimy, 3))
pl.figure(fig.number)
pl.subplot(3, 3, 4)
pl.imshow(gam_gaus_pp[..., 0], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n first component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 5)
pl.imshow(gam_gaus_pp[..., 1], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n second component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 6)
pl.imshow(gam_gaus_pp[..., 2], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n third component posterior proba.')
pl.colorbar()
################################################################################
# fit Beta's histogram with a mixture of Gaussians
alpha = 0.01
gaus_mix_pp = en.three_classes_GMM_fit(Beta, None,
alpha, prior_strength=100)
gaus_mix_pp = np.reshape(gaus_mix_pp, (dimx, dimy, 3))
pl.figure(fig.number)
pl.subplot(3, 3, 7)
pl.imshow(gaus_mix_pp[..., 0], cmap=pl.cm.hot)
pl.title('Gaussian mixture,\n first component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 8)
pl.imshow(gaus_mix_pp[..., 1], cmap=pl.cm.hot)
pl.title('Gaussian mixture,\n second component posterior proba.')
pl.colorbar()
pl.subplot(3, 3, 9)
pl.imshow(gaus_mix_pp[..., 2], cmap=pl.cm.hot)
pl.title('Gamma-Gaussian mixture,\n third component posterior proba.')
pl.colorbar()
################################################################################
# Fit the null mode of Beta with an empirical normal null
efdr = en.ENN(Beta)
emp_null_fdr = efdr.fdr(Beta)
emp_null_fdr = emp_null_fdr.reshape((dimx, dimy))
pl.subplot(3, 3, 3)
pl.imshow(1-emp_null_fdr, cmap=pl.cm.hot)
pl.colorbar()
pl.title('Empirical FDR\n ')
#efdr.plot()
#pl.title('Empirical FDR fit')
pl.show()
| {
"content_hash": "62696b56ea939674d455fed639d4432c",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 80,
"avg_line_length": 30.326732673267326,
"alnum_prop": 0.6183480248122756,
"repo_name": "yarikoptic/NiPy-OLD",
"id": "26979856617da8963533bf893c16c38cb439cf50",
"size": "3063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/neurospin/histogram_fits.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4411801"
},
{
"name": "Objective-C",
"bytes": "4262"
},
{
"name": "Python",
"bytes": "2617786"
}
],
"symlink_target": ""
} |
class NeptunError(Exception):
pass
class IntergallacticError(NeptunError):
pass
f = 2
try:
f = 2
if f == 2:
raise NeptunError("why f")
h = 2
except IntergallacticError as e:
print(e)
except NeptunError as e:
print(e)
| {
"content_hash": "03270ac34bd2b4d09e9f48cd583e38a7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 39,
"avg_line_length": 15.058823529411764,
"alnum_prop": 0.62890625,
"repo_name": "alehander42/pseudo-python",
"id": "b09500366c379e701d5fe6ed4095c35a087f5b8b",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/error_handling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "125300"
}
],
"symlink_target": ""
} |
import hashlib
from tempfile import TemporaryFile
from django.core.files import File
from django.utils.timezone import make_aware, utc
from PIL import Image
import requests
from .models import Like, Photo, Tag
from instagram.client import InstagramAPI as BaseInstagramAPI
from instagram.bind import bind_method
from instagram.models import Media
class InstagramAPI(BaseInstagramAPI):
self_recent_media = bind_method(
path="/users/self/media/recent",
accepts_parameters=["count", "max_id",'min_id'],
root_class=Media,
paginates=True)
def photo_tags(obj, tags):
for tag in tags:
name = tag.name.lower()
# No sane limit on tags - so we enforce one here by avoiding them
if len(name) > 200:
continue
tag_obj, created = Tag.objects.get_or_create(name=name)
obj.tags.add(tag_obj)
def update_photos(photos, download=False):
obj_list = []
for i in photos:
image = i.images['standard_resolution']
if i.caption:
caption = i.caption.text
else:
caption = ''
obj, created = Photo.objects.update_or_create(photo_id=i.id, defaults={
'user': i.user.username,
'image': image.url,
'image_width': image.width,
'image_height': image.height,
'created': make_aware(i.created_time, utc),
'caption': caption,
'link': i.link,
'like_count': i.like_count,
'comment_count': i.comment_count,
})
if download and not obj.image_file:
with TemporaryFile() as temp_file:
image_file = File(temp_file)
# Download the file
r = requests.get(image.url, stream=True)
r.raise_for_status()
for chunk in r.iter_content(4096):
image_file.write(chunk)
# Get Pillow to look at it
image_file.seek(0)
pil_image = Image.open(image_file)
image_name = '%s.%s' % (
hashlib.md5(image.url.encode()).hexdigest(), pil_image.format.lower())
# Save the file
image_file.seek(0)
obj.image_file.save(image_name, image_file, save=True)
# Add tags
photo_tags(obj=obj, tags=i.tags)
obj_list.append(obj)
return obj_list
def update_likes(user, photos, download=False):
obj_list = update_photos(photos=photos, download=download)
for photo in obj_list:
Like.objects.get_or_create(user=user, photo=photo)
return obj_list
| {
"content_hash": "52aea34d47620ae5faf921b3dc637ce9",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 90,
"avg_line_length": 29.130434782608695,
"alnum_prop": 0.5738805970149253,
"repo_name": "kmlebedev/mezzanine-instagram-quickphotos",
"id": "0f881ab57a0c1a1f3061822300f53885666e9df8",
"size": "2680",
"binary": false,
"copies": "1",
"ref": "refs/heads/mezzanine",
"path": "mezzanine_quickphotos/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14169"
}
],
"symlink_target": ""
} |
from columns.lib.authorization.exceptions import *
from decorator import decorator
__all__ = ['retrieve_user','user_permission','user_id','SitePermissions'] #,'AuthorizeAction'
def retrieve_user(session):
if session.get('user_id',None) is None:
return None
from columns.model import User, meta
return meta.Session.query(User).get(int(session['user_id']))
def user_permission(session):
return session.get('user_type',None)
def user_id(session):
return session.get('user_id',None)
#def AuthorizeAction(permissions, fallback=None):
# def wrapper(func, self, *args, **kwargs):
# environ = self._py_object.request.environ
# try:
# permissions.to_python(kwargs, environ)
# except Invalid:
# from pylons.controllers.util import abort, redirect
# if fallback is not None:
# url_gen = environ['routes.url']
# redirect(url_gen(fallback))
# else:
# abort(403)
# return func(self,*args,**kwargs)
#
# return decorator(wrapper)
class SitePermissions(object):
permission_sets = {}
def __init__(self, permissions=None):
if isinstance(permissions,dict):
for k,v in permissions.items():
self.add_permissions(k,v)
def add_permissions(self, controller_or_resource, permissions):
self.permission_sets[controller_or_resource] = permissions
def authorize(self, environ, controller_or_resource=None, action=None,**kwargs):
try:
validator = self.permission_sets[controller_or_resource][action]
except KeyError:
return
else:
validator.to_python(kwargs, environ)
#def auth_decorator(self, controller_or_resource, action):
# def wrapper(func,self,*args,**kwargs):
# environ = self._py_object.request.environ
# try:
# validator = self.permission_sets[controller_or_resource][action]
# except KeyError:
# pass
# else:
# validator.to_python(kwargs, environ)
# return func(self,*args,**kwargs)
#
# return decorator(wrapper)
#
| {
"content_hash": "06788cdbd75afe064e6ad3221c776b36",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 93,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.7140591966173362,
"repo_name": "yoshrote/Columns",
"id": "4d678bc2623720f646c6872501bf21ce066387f7",
"size": "1892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "columns/lib/authorization/util.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2558885"
},
{
"name": "PHP",
"bytes": "48646"
},
{
"name": "Python",
"bytes": "320764"
}
],
"symlink_target": ""
} |
"""Utility for archiving files and directories.
An archive is simply a zip file containing the archived
targets plus an optional log file. The standard used for
the log file is a text file named `__archive_info__.txt`.
The log is always in the root directory of the archive.
By default, the archive filename will be prefixed with
a timestamp of the time created. The default format is the
following: `YYMMDDhhmm-<ARCHIVE_NAME>.zip`. Options are
provided to extend, shorten or remove the timestamp. If a
name for the archive is not explicitly given, the archive
name will be based on the name of the first target.
Usage:
archiver [options] TARGET...
archiver -h | --help
archiver --version
Arguments:
TARGET Path to a file or folder to archive.
Options:
-m LOGMSG Archive log message.
--outdir=OUTDIR Directory to place generated files [default: .].
--name=NAME Archive name.
--no_ts Do not include timestamp in archive name.
--short_ts Only timestamp to the day (hour:min otherwise).
--long_ts Timestamp to the second (hour:min otherwise).
--delete Delete original targets from file system after archiving.
--flatten Flatten directory structure in the zip archive.
--flatten_ld Flatten leading directory; only if single directory target.
-h --help Show this help message and exit.
--version Show version and exit.
"""
##==============================================================#
## DEVELOPED 2012, REVISED 2014, Jeff Rimko. #
##==============================================================#
##==============================================================#
## SECTION: Imports #
##==============================================================#
import os
import sys
from docopt import docopt
import arcmgr
from appinfo import ARCHIVER_NAME, ARCHIVER_VER
##==============================================================#
## SECTION: Global Definitions #
##==============================================================#
#: Combined application name and version string.
NAMEVER = "%s %s" % (ARCHIVER_NAME, ARCHIVER_VER)
##==============================================================#
## SECTION: Function Definitions #
##==============================================================#
#: Function to print an error message to the console.
print_error = lambda s: sys.stderr.write("ERROR: " + s)
#: Function to print an warning message to the console.
print_warning = lambda s: sys.stderr.write("WARNING: " + s)
def parse_args(args):
"""Parses command line arguments into a UtilData object."""
arcctr = arcmgr.ArcCreator()
arcctr.systargets = [os.path.abspath(t) for t in args['TARGET']]
arcctr.name = args['--name']
arcctr.logtxt = args['-m']
if args['--long_ts']:
arcctr.ts_style = "long"
elif args['--short_ts']:
arcctr.ts_style = "short"
elif args['--no_ts']:
arcctr.ts_style = "none"
arcctr.delete = args['--delete']
arcctr.flatten = args['--flatten']
arcctr.flatten_ld = args['--flatten_ld']
arcctr.outdir = args['--outdir']
return arcctr
def main():
"""The application main logic."""
args = docopt(__doc__, version=NAMEVER)
arcctr = parse_args(args)
if not arcctr.create_archive():
print_error("Archive could not be created! %s" % arcctr.errmsg)
if arcctr.warnmsgs:
for w in arcctr.warnmsgs:
print_warning(w)
##==============================================================#
## SECTION: Main Body #
##==============================================================#
if __name__ == "__main__":
main()
| {
"content_hash": "f855b7dc589101ab5298c24623b959e4",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 37.262135922330096,
"alnum_prop": 0.5284002084418968,
"repo_name": "jeffrimko/Archiver",
"id": "e29e178e35726c5d6596ecd63a882a89f883722e",
"size": "3838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/archiver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "8198"
},
{
"name": "Python",
"bytes": "48274"
}
],
"symlink_target": ""
} |
import idaapi
import idautils
import idc
from ..core import fix_addresses
from .xref import Xref
from .instruction import Instruction
from ..ui import updates_ui
from .base import get_selection, get_offset_name, demangle
from .. import data
class Comments(object):
"""IDA Line Comments
Provides easy access to all types of comments for an IDA line.
"""
def __init__(self, ea):
self._ea = ea
def __bool__(self):
return any((self.regular, self.repeat, self.anterior, self.posterior,))
@property
def regular(self):
"""Regular Comment"""
return idaapi.get_cmt(self._ea, 0)
@regular.setter
def regular(self, comment):
idaapi.set_cmt(self._ea, comment, 0)
@property
def repeat(self):
"""Repeatable Comment"""
return idaapi.get_cmt(self._ea, 1)
@repeat.setter
def repeat(self, comment):
idaapi.set_cmt(self._ea, comment, 1)
def _iter_extra_comments(self, start):
end = idaapi.get_first_free_extra_cmtidx(self._ea, start)
for idx in range(start, end):
line = idaapi.get_extra_cmt(self._ea, idx)
yield line or ''
def _iter_anterior(self):
return self._iter_extra_comments(idaapi.E_PREV)
@property
def anterior(self):
"""Anterior Comment"""
return "\n".join(self._iter_anterior())
@anterior.setter
@updates_ui
def anterior(self, comment):
if not comment:
idaapi.del_extra_cmt(self._ea, idaapi.E_PREV)
return
index = 0
for index, line in enumerate(comment.splitlines()):
idaapi.update_extra_cmt(self._ea, idaapi.E_PREV + index, line)
idaapi.del_extra_cmt(self._ea, idaapi.E_PREV + (index + 1))
def _iter_posterior(self):
return self._iter_extra_comments(idaapi.E_NEXT)
@property
def posterior(self):
"""Posterior Comment"""
return "\n".join(self._iter_posterior())
@posterior.setter
@updates_ui
def posterior(self, comment):
if not comment:
idaapi.del_extra_cmt(self._ea, idaapi.E_NEXT)
return
index = 0
for index, line in enumerate(comment.splitlines()):
idaapi.update_extra_cmt(self._ea, idaapi.E_NEXT + index, line)
idaapi.del_extra_cmt(self._ea, idaapi.E_NEXT + (index + 1))
def __repr__(self):
return ("Comments("
"ea=0x{ea:08X},"
" reqular={regular},"
" repeat={repeat},"
" anterior={anterior},"
" posterior={posterior})").format(
ea=self._ea,
regular=repr(self.regular),
repeat=repr(self.repeat),
anterior=repr(self.anterior),
posterior=repr(self.posterior))
class Line(object):
"""
An IDA Line.
This objects encapsulates many of IDA's line-handling APIs in an easy to use
and object oriented way.
"""
class UseCurrentAddress(object):
"""
This is a filler object to replace `None` for the EA.
In many cases, a programmer can accidentally initialize the
`Line` object with `ea=None`, resulting in the current address.
Usually, this is not the desired outcome. This object resolves this issue.
"""
pass
def __init__(self, ea=UseCurrentAddress, name=None):
if name is not None and ea != self.UseCurrentAddress:
raise ValueError(("Either supply a name or an address (ea). "
"Not both. (ea={!r}, name={!r})").format(ea, name))
elif name is not None:
ea = idc.get_name_ea_simple(name)
elif ea == self.UseCurrentAddress:
ea = idc.here()
elif ea is None:
raise ValueError("`None` is not a valid address. To use the current screen ea, "
"use `Line(ea=Line.UseCurrentAddress)` or supply no `ea`.")
self._ea = idaapi.get_item_head(ea)
self._comments = Comments(self._ea)
@property
def flags(self):
"""`FF_*` Flags. See `bytes.hpp`."""
return idaapi.get_full_flags(self.ea)
@property
def is_code(self):
"""Is the line code."""
return idaapi.is_code(self.flags)
@property
def is_data(self):
"""Is the line data."""
return idaapi.is_data(self.flags)
@property
def is_unknown(self):
"""Is the line unknown."""
return idaapi.is_unknown(self.flags)
@property
def is_tail(self):
"""Is the line a tail."""
return idaapi.is_tail(self.flags)
@property
def is_string(self):
"""Is the line a string."""
return data.is_string(self.ea)
@property
def comments(self):
"""Comments"""
return self._comments
@property
def ea(self):
"""Line EA"""
return self._ea
start_ea = ea
@property
def end_ea(self):
"""End address of line (first byte after the line)"""
return self.ea + self.size
@property
def disasm(self):
"""Line Disassembly"""
return idc.GetDisasm(self.ea)
@property
def type(self):
"""return the type of the Line """
properties = {self.is_code: "code",
self.is_data: "data",
self.is_string: "string",
self.is_tail: "tail",
self.is_unknown: "unknown"}
for k, v in properties.items():
if k: return v
def __repr__(self):
return "[{:08X}] {}".format(self.ea, self.disasm)
@property
def xrefs_from(self):
"""Xrefs from this line.
:return: Xrefs as `sark.code.xref.Xref` objects.
"""
return list(map(Xref, idautils.XrefsFrom(self.ea)))
@property
def calls_from(self):
return (xref for xref in self.xrefs_from if xref.type.is_call)
@property
def drefs_from(self):
"""Destination addresses of data references from this line."""
return idautils.DataRefsFrom(self.ea)
@property
def crefs_from(self):
"""Destination addresses of code references from this line."""
return idautils.CodeRefsFrom(self.ea, 1)
@property
def xrefs_to(self):
"""Xrefs to this line.
Returns:
Xrefs as `sark.code.xref.Xref` objects.
"""
return list(map(Xref, idautils.XrefsTo(self.ea)))
@property
def drefs_to(self):
"""Source addresses of data references from this line."""
return idautils.DataRefsTo(self.ea)
@property
def crefs_to(self):
"""Source addresses of data references to this line."""
return idautils.CodeRefsTo(self.ea, 1)
@property
def size(self):
"""Size (in bytes) of the line."""
return idaapi.get_item_size(self.ea)
@property
def name(self):
"""Name of the line (the label shown in IDA)."""
return idaapi.get_ea_name(self.ea)
@name.setter
def name(self, value):
idc.set_name(self.ea, value)
@property
def demangled(self):
"""Return the demangled name of the line. If none exists, return `.name`"""
return demangle(self.name)
@property
def insn(self):
"""Instruction"""
return Instruction(self.ea)
@property
def color(self):
"""Line color in IDA View"""
color = idc.get_color(self.ea, idc.CIC_ITEM)
if color == 0xFFFFFFFF:
return None
return color
@color.setter
@updates_ui
def color(self, color):
"""Line Color in IDA View.
Set color to `None` to clear the color.
"""
if color is None:
color = 0xFFFFFFFF
idc.set_color(self.ea, idc.CIC_ITEM, color)
@property
def next(self):
"""The next line."""
return Line(self.end_ea)
@property
def prev(self):
"""The previous line."""
return Line(self.ea - 1)
@property
def has_name(self):
"""Does the current line have a non-trivial (non-dummy) name?"""
return idaapi.has_name(self.flags)
@property
def offset_name(self):
return get_offset_name(self.ea)
@property
def bytes(self):
return idaapi.get_bytes(self.ea, self.size)
def __eq__(self, other):
if not isinstance(other, Line):
return False
return self.ea == other.ea
def __ne__(self, other):
return not self.__eq__(other)
def lines(start=None, end=None, reverse=False, selection=False):
"""Iterate lines in range.
Args:
start: Starting address, start of IDB if `None`.
end: End address, end of IDB if `None`.
reverse: Set to true to iterate in reverse order.
selection: If set to True, replaces start and end with current selection.
Returns:
iterator of `Line` objects.
"""
if selection:
start, end = get_selection()
else:
start, end = fix_addresses(start, end)
if not reverse:
item = idaapi.get_item_head(start)
while item < end:
yield Line(item)
item += idaapi.get_item_size(item)
else: # if reverse:
item = idaapi.get_item_head(end - 1)
while item >= start:
yield Line(item)
item = idaapi.get_item_head(item - 1)
| {
"content_hash": "aa8df64f865d9524fc1183259a3b5950",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 92,
"avg_line_length": 26.74576271186441,
"alnum_prop": 0.5700253485424588,
"repo_name": "tmr232/Sark",
"id": "159c4d548598806cd2b8981eba2e8a32b280ec77",
"size": "9468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sark/code/line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "75"
},
{
"name": "Python",
"bytes": "164329"
}
],
"symlink_target": ""
} |
import sys
import os
import os.path
import tempfile
import subprocess
import py_compile
import contextlib
import shutil
import zipfile
# Executing the interpreter in a subprocess
def _assert_python(expected_success, *args, **env_vars):
cmd_line = [sys.executable]
if not env_vars:
cmd_line.append('-E')
cmd_line.extend(args)
# Need to preserve the original environment, for in-place testing of
# shared library builds.
env = os.environ.copy()
env.update(env_vars)
p = subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
try:
out, err = p.communicate()
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
rc = p.returncode
if (rc and expected_success) or (not rc and not expected_success):
raise AssertionError(
"Process return code is %d, "
"stderr follows:\n%s" % (rc, err.decode('ascii', 'ignore')))
return rc, out, err
def assert_python_ok(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` is ok and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(True, *args, **env_vars)
def assert_python_failure(*args, **env_vars):
"""
Assert that running the interpreter with `args` and optional environment
variables `env_vars` fails and return a (return code, stdout, stderr) tuple.
"""
return _assert_python(False, *args, **env_vars)
def python_exit_code(*args):
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
with open(os.devnull, 'w') as devnull:
return subprocess.call(cmd_line, stdout=devnull,
stderr=subprocess.STDOUT)
def spawn_python(*args, **kwargs):
cmd_line = [sys.executable, '-E']
cmd_line.extend(args)
return subprocess.Popen(cmd_line, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
**kwargs)
def kill_python(p):
p.stdin.close()
data = p.stdout.read()
p.stdout.close()
# try to cleanup the child so we don't appear to leak when running
# with regrtest -R.
p.wait()
subprocess._cleanup()
return data
def run_python(*args, **kwargs):
if __debug__:
p = spawn_python(*args, **kwargs)
else:
p = spawn_python('-O', *args, **kwargs)
stdout_data = kill_python(p)
return p.wait(), stdout_data
# Script creation utilities
@contextlib.contextmanager
def temp_dir():
dirname = tempfile.mkdtemp()
dirname = os.path.realpath(dirname)
try:
yield dirname
finally:
shutil.rmtree(dirname)
def make_script(script_dir, script_basename, source):
script_filename = script_basename+os.extsep+'py'
script_name = os.path.join(script_dir, script_filename)
script_file = open(script_name, 'w')
script_file.write(source)
script_file.close()
return script_name
def compile_script(script_name):
py_compile.compile(script_name, doraise=True)
if __debug__:
compiled_name = script_name + 'c'
else:
compiled_name = script_name + 'o'
return compiled_name
def make_zip_script(zip_dir, zip_basename, script_name, name_in_zip=None):
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
if name_in_zip is None:
name_in_zip = os.path.basename(script_name)
zip_file.write(script_name, name_in_zip)
zip_file.close()
#if test.test_support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, name_in_zip)
def make_pkg(pkg_dir):
os.mkdir(pkg_dir)
make_script(pkg_dir, '__init__', '')
def make_zip_pkg(zip_dir, zip_basename, pkg_name, script_basename,
source, depth=1, compiled=False):
unlink = []
init_name = make_script(zip_dir, '__init__', '')
unlink.append(init_name)
init_basename = os.path.basename(init_name)
script_name = make_script(zip_dir, script_basename, source)
unlink.append(script_name)
if compiled:
init_name = compile_script(init_name)
script_name = compile_script(script_name)
unlink.extend((init_name, script_name))
pkg_names = [os.sep.join([pkg_name]*i) for i in range(1, depth+1)]
script_name_in_zip = os.path.join(pkg_names[-1], os.path.basename(script_name))
zip_filename = zip_basename+os.extsep+'zip'
zip_name = os.path.join(zip_dir, zip_filename)
zip_file = zipfile.ZipFile(zip_name, 'w')
for name in pkg_names:
init_name_in_zip = os.path.join(name, init_basename)
zip_file.write(init_name, init_name_in_zip)
zip_file.write(script_name, script_name_in_zip)
zip_file.close()
for name in unlink:
os.unlink(name)
#if test.test_support.verbose:
# zip_file = zipfile.ZipFile(zip_name, 'r')
# print 'Contents of %r:' % zip_name
# zip_file.printdir()
# zip_file.close()
return zip_name, os.path.join(zip_name, script_name_in_zip)
| {
"content_hash": "1bb7d55d8d4f9a21eacda4780c6499e1",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 83,
"avg_line_length": 34.044585987261144,
"alnum_prop": 0.6314312441534145,
"repo_name": "xxd3vin/spp-sdk",
"id": "1b5b0bf315fb8123d914a557e10c2585a524edb9",
"size": "5471",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "opt/Python27/Lib/test/script_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "759663"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "56155"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "3065"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "JavaScript",
"bytes": "163687"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Pascal",
"bytes": "8738"
},
{
"name": "Python",
"bytes": "22177886"
},
{
"name": "Shell",
"bytes": "15704"
},
{
"name": "Tcl",
"bytes": "2065501"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import conda
import yaml
import os
from subprocess import check_call
import shutil
with open('sources.yaml', 'r') as fh:
sources = yaml.safe_load(fh)
with open('env.specs/lts.yaml', 'r') as fh:
env_lts = yaml.safe_load(fh)
print sources
print env_lts
import conda.plan
import conda_build.config
import conda
from conda.api import get_index
from conda.fetch import fetch_repodata
url = 'file:///data/local/itpe/miniconda/conda-builds-scientific_software_stack_since_05_15/linux-64/'
repo = fetch_repodata(url)
from conda.resolve import Resolve, MatchSpec
print repo
r = Resolve(repo['packages'])
r.solve(env_lts['packages'], features=set())
r.solve2(env_lts['packages'], features=set())
# conda.api.fetch_repodata is the underlying index loader.
#index = get_index(channel_urls=channel_urls,
# prepend=not args.override_channels,
# use_cache=args.use_index_cache,
# unknown=args.unknown,
# json=args.json,
# offline=args.offline)
from conda.resolve import MatchSpec
fn = 'numpy-1.8.3-py27_0.tar.bz2'
ms = MatchSpec('numpy >=1.7,<1.9')
print ms.match(fn)
#for name in orig_packages:
# pkgs = sorted(r.get_pkgs(MatchSpec(name)))
| {
"content_hash": "63df0f6e1d7ea3b82502e876a688578f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 102,
"avg_line_length": 20.77777777777778,
"alnum_prop": 0.6455309396485867,
"repo_name": "marqh/conda-manifest",
"id": "ba55d3960c41aa8a17a42f212ec9fe69558921a6",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resolve.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "42643"
}
],
"symlink_target": ""
} |
from google.cloud import pubsublite_v1
def sample_create_topic():
# Create a client
client = pubsublite_v1.AdminServiceClient()
# Initialize request argument(s)
request = pubsublite_v1.CreateTopicRequest(
parent="parent_value",
topic_id="topic_id_value",
)
# Make the request
response = client.create_topic(request=request)
# Handle the response
print(response)
# [END pubsublite_generated_pubsublite_v1_AdminService_CreateTopic_sync]
| {
"content_hash": "9b2469292ffc54a83be3b7d789246c6d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 72,
"avg_line_length": 24.65,
"alnum_prop": 0.6997971602434077,
"repo_name": "googleapis/python-pubsublite",
"id": "ea6881c1057ae5a580941baba3bd152f4db621b8",
"size": "1504",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/pubsublite_generated_pubsublite_v1_admin_service_create_topic_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "1689513"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
} |
import requests
from requests import exceptions
import cStringIO
from oslo.config import cfg
from heat.common import urlfetch
from heat.tests.common import HeatTestCase
from heat.openstack.common.py3kcompat import urlutils
class Response:
def __init__(self, buf=''):
self.buf = buf
def iter_content(self, chunk_size=1):
while self.buf:
yield self.buf[:chunk_size]
self.buf = self.buf[chunk_size:]
def raise_for_status(self):
pass
class UrlFetchTest(HeatTestCase):
def setUp(self):
super(UrlFetchTest, self).setUp()
self.m.StubOutWithMock(requests, 'get')
def test_file_scheme_default_behaviour(self):
self.m.ReplayAll()
self.assertRaises(IOError, urlfetch.get, 'file:///etc/profile')
self.m.VerifyAll()
def test_file_scheme_supported(self):
data = '{ "foo": "bar" }'
url = 'file:///etc/profile'
self.m.StubOutWithMock(urlutils, 'urlopen')
urlutils.urlopen(url).AndReturn(cStringIO.StringIO(data))
self.m.ReplayAll()
self.assertEqual(data, urlfetch.get(url, allowed_schemes=['file']))
self.m.VerifyAll()
def test_file_scheme_failure(self):
url = 'file:///etc/profile'
self.m.StubOutWithMock(urlutils, 'urlopen')
urlutils.urlopen(url).AndRaise(urlutils.URLError('oops'))
self.m.ReplayAll()
self.assertRaises(IOError, urlfetch.get, url, allowed_schemes=['file'])
self.m.VerifyAll()
def test_http_scheme(self):
url = 'http://example.com/template'
data = '{ "foo": "bar" }'
response = Response(data)
requests.get(url, stream=True).AndReturn(response)
self.m.ReplayAll()
self.assertEqual(data, urlfetch.get(url))
self.m.VerifyAll()
def test_https_scheme(self):
url = 'https://example.com/template'
data = '{ "foo": "bar" }'
response = Response(data)
requests.get(url, stream=True).AndReturn(response)
self.m.ReplayAll()
self.assertEqual(data, urlfetch.get(url))
self.m.VerifyAll()
def test_http_error(self):
url = 'http://example.com/template'
requests.get(url, stream=True).AndRaise(exceptions.HTTPError())
self.m.ReplayAll()
self.assertRaises(IOError, urlfetch.get, url)
self.m.VerifyAll()
def test_non_exist_url(self):
url = 'http://non-exist.com/template'
requests.get(url, stream=True).AndRaise(exceptions.Timeout())
self.m.ReplayAll()
self.assertRaises(IOError, urlfetch.get, url)
self.m.VerifyAll()
def test_garbage(self):
self.m.ReplayAll()
self.assertRaises(IOError, urlfetch.get, 'wibble')
self.m.VerifyAll()
def test_max_fetch_size_okay(self):
url = 'http://example.com/template'
data = '{ "foo": "bar" }'
response = Response(data)
cfg.CONF.set_override('max_template_size', 500)
requests.get(url, stream=True).AndReturn(response)
self.m.ReplayAll()
urlfetch.get(url)
self.m.VerifyAll()
def test_max_fetch_size_error(self):
url = 'http://example.com/template'
data = '{ "foo": "bar" }'
response = Response(data)
cfg.CONF.set_override('max_template_size', 5)
requests.get(url, stream=True).AndReturn(response)
self.m.ReplayAll()
exception = self.assertRaises(IOError, urlfetch.get, url)
self.assertIn("Template exceeds", str(exception))
self.m.VerifyAll()
| {
"content_hash": "e7fa67dc08fcbc2a51996973feb624d0",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 79,
"avg_line_length": 30.65811965811966,
"alnum_prop": 0.6177864510733203,
"repo_name": "ntt-sic/heat",
"id": "20557fa45fba2547a6b85138cac99b25042e130d",
"size": "4207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/test_urlfetch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3336181"
},
{
"name": "Shell",
"bytes": "22168"
}
],
"symlink_target": ""
} |
"""Python library module for LIS3MDL magnetometer.
This module for the Raspberry Pi computer helps interface the LIS3MDL
magnetometer.The library makes it easy to read the raw magnetometer
through I²C interface.
The datasheet for the LSM6DS33 is available at
[https://www.pololu.com/file/download/LIS3MDL.pdf?file_id=0J1089]
"""
from i2c import I2C
from constants import *
class LIS3MDL(I2C):
""" Set up and access LIS3MDL magnetometer.
"""
# Output registers used by the magnetometer
magnetometer_registers = [
LIS3MDL_OUT_X_L, # low byte of X value
LIS3MDL_OUT_X_H, # high byte of X value
LIS3MDL_OUT_Y_L, # low byte of Y value
LIS3MDL_OUT_Y_H, # high byte of Y value
LIS3MDL_OUT_Z_L, # low byte of Z value
LIS3MDL_OUT_Z_H, # high byte of Z value
]
def __init__(self, bus_id=1):
""" Set up I2C connection and initialize some flags and values.
"""
super(LIS3MDL, self).__init__(bus_id)
self.is_magnetometer_enabled = False
def __del__(self):
""" Clean up. """
try:
# Power down magnetometer
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG3, 0x03)
super(LIS3MDL, self).__del__()
except:
pass
def enable(self):
""" Enable and set up the the magnetometer and determine
whether to auto increment registers during I2C read operations.
"""
# Disable magnetometer and temperature sensor first
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG1, 0x00)
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG3, 0x03)
# Enable device in continuous conversion mode
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG3, 0x00)
# Initial value for CTRL_REG1
ctrl_reg1 = 0x00
# Ultra-high-performance mode for X and Y
# Output data rate 10Hz
# binary value -> 01110000b, hex value -> 0x70
ctrl_reg1 += 0x70
# +/- 4 gauss full scale
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG2, 0x00)
# Ultra-high-performance mode for Z
# binary value -> 00001100b, hex value -> 0x0c
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG4, 0x0c)
self.is_magnetometer_enabled = True
# Write calculated value to the CTRL_REG1 register
self.write_register(LIS3MDL_ADDR, LIS3MDL_CTRL_REG1, ctrl_reg1)
def get_magnetometer_raw(self):
""" Return 3D vector of raw magnetometer data.
"""
# Check if magnetometer has been enabled
if not self.is_magnetometer_enabled:
raise(Exception('Magnetometer is not enabled'))
return self.read_3d_sensor(LIS3MDL_ADDR, self.magnetometer_registers)
| {
"content_hash": "a2853cd151b9d6e155bdbfed495ca652",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 77,
"avg_line_length": 33.63855421686747,
"alnum_prop": 0.6378939828080229,
"repo_name": "SvetoslavKuzmanov/altimu10v5",
"id": "f359aeb80388c40a4268afd8e21d138dedc28c84",
"size": "2818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "altimu10v5/lis3mdl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16643"
}
],
"symlink_target": ""
} |
from trakt.interfaces import auth
from trakt.interfaces import movies
from trakt.interfaces import oauth
from trakt.interfaces import scrobble
from trakt.interfaces import search
from trakt.interfaces import shows
from trakt.interfaces import sync
from trakt.interfaces import users
INTERFACES = [
# /
auth.AuthInterface,
oauth.OAuthInterface,
scrobble.ScrobbleInterface,
search.SearchInterface,
# /sync/
sync.SyncInterface,
sync.SyncCollectionInterface,
sync.SyncHistoryInterface,
sync.SyncPlaybackInterface,
sync.SyncRatingsInterface,
sync.SyncWatchedInterface,
sync.SyncWatchlistInterface,
# /shows/
shows.ShowsInterface,
# /movies/
movies.MoviesInterface,
# /users/
users.UsersInterface,
users.UsersSettingsInterface,
# /users/lists/
users.UsersListsInterface,
users.UsersListInterface
]
def get_interfaces():
for interface in INTERFACES:
if not interface.path:
continue
path = interface.path.strip('/')
if path:
path = path.split('/')
else:
path = []
yield path, interface
def construct_map(client, d=None, interfaces=None):
if d is None:
d = {}
if interfaces is None:
interfaces = get_interfaces()
for path, interface in interfaces:
if len(path) == 0:
continue
key = path.pop(0)
if len(path) == 0:
d[key] = interface(client)
continue
value = d.get(key, {})
if type(value) is not dict:
value = {None: value}
construct_map(client, value, [(path, interface)])
d[key] = value
return d
| {
"content_hash": "5a0f53043171fbdd4cca39a3824272b8",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 57,
"avg_line_length": 21.223529411764705,
"alnum_prop": 0.5992239467849224,
"repo_name": "timbooo/traktforalfred",
"id": "3469e0ab99e063ead846170a906e32c07b7ff09d",
"size": "1804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trakt/interfaces/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1313047"
}
],
"symlink_target": ""
} |
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class TestClient(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.runtimeconfig.client import Client
return Client
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_wo_client_info(self):
from google.cloud._http import ClientInfo
from google.cloud.runtimeconfig._http import Connection
project = "PROJECT"
http = object()
creds = _make_credentials()
client = self._make_one(project=project, credentials=creds, _http=http)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._credentials, creds)
self.assertIs(client._http_internal, http)
self.assertIsInstance(client._connection._client_info, ClientInfo)
def test_ctor_w_client_info(self):
from google.cloud._http import ClientInfo
from google.cloud.runtimeconfig._http import Connection
project = "PROJECT"
http = object()
creds = _make_credentials()
client_info = ClientInfo()
client = self._make_one(
project=project, credentials=creds, _http=http, client_info=client_info
)
self.assertIsInstance(client._connection, Connection)
self.assertIs(client._credentials, creds)
self.assertIs(client._http_internal, http)
self.assertIs(client._connection._client_info, client_info)
def test_ctor_w_empty_client_options(self):
from google.api_core.client_options import ClientOptions
project = "PROJECT"
http = object()
creds = _make_credentials()
client_options = ClientOptions()
client = self._make_one(
project=project,
credentials=creds,
_http=http,
client_options=client_options,
)
self.assertEqual(
client._connection.API_BASE_URL, client._connection.DEFAULT_API_ENDPOINT
)
def test_ctor_w_client_options_object(self):
from google.api_core.client_options import ClientOptions
project = "PROJECT"
http = object()
creds = _make_credentials()
client_options = ClientOptions(
api_endpoint="https://foo-runtimeconfig.googleapis.com"
)
client = self._make_one(
project=project,
credentials=creds,
_http=http,
client_options=client_options,
)
self.assertEqual(
client._connection.API_BASE_URL, "https://foo-runtimeconfig.googleapis.com"
)
def test_ctor_w_client_options_dict(self):
project = "PROJECT"
http = object()
creds = _make_credentials()
client_options = {"api_endpoint": "https://foo-runtimeconfig.googleapis.com"}
client = self._make_one(
project=project,
credentials=creds,
_http=http,
client_options=client_options,
)
self.assertEqual(
client._connection.API_BASE_URL, "https://foo-runtimeconfig.googleapis.com"
)
def test_config(self):
project = "PROJECT"
CONFIG_NAME = "config_name"
creds = _make_credentials()
client = self._make_one(project=project, credentials=creds)
new_config = client.config(CONFIG_NAME)
self.assertEqual(new_config.name, CONFIG_NAME)
self.assertIs(new_config._client, client)
self.assertEqual(new_config.project, project)
self.assertEqual(
new_config.full_name, "projects/%s/configs/%s" % (project, CONFIG_NAME)
)
self.assertFalse(new_config.description)
| {
"content_hash": "ceb7e79f92fbc2c009b8937a30123f4a",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 87,
"avg_line_length": 32.965811965811966,
"alnum_prop": 0.619652579725175,
"repo_name": "googleapis/python-runtimeconfig",
"id": "a0fef69c4010e303c91d14026581da519f8e53e9",
"size": "4432",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/test_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "103102"
},
{
"name": "Shell",
"bytes": "30681"
}
],
"symlink_target": ""
} |
import re
import sys
def parseData(main_file):
count = 1
reviews = list()
pterms = list()
rterms = list()
scores = list()
quote = '&' + 'quot' + ';'
replace_punctuation = re.compile('[%s]' % re.escape('!"#$%&\'()*+,-./:;<=>?@[\\]^`{|}~'))
for x in main_file:
if "product/productId: " in x:
reviews.append(str(count)+",")
str_entry = x.replace("product/productId: ","").replace("\\","\\\\").replace('"',quote).strip('\n')
reviews.append(str_entry+",")
elif "product/title: " in x:
str_entry = x.replace("product/title: ","").replace("\\","\\\\").replace('"',quote).strip('\n')
terms = replace_punctuation.sub(" ", str_entry)
terms = terms.split(" ")
for term in terms:
if len(term)>2:
pterms.append(term.lower()+",")
pterms.append(str(count)+"\n")
reviews.append('"'+str_entry+'",')
elif "product/price: " in x:
str_entry = x.replace("product/price: ","").replace("\\","\\\\").replace('"',quote).strip('\n')
reviews.append(str_entry+",")
elif "review/userId: " in x:
str_entry = x.replace("review/userId: ","").replace("\\","\\\\").replace('"',quote).strip('\n')
reviews.append(str_entry+",")
elif "review/profileName: " in x:
str_entry = x.replace("review/profileName: ","").replace("\\","\\\\").replace('"',quote).strip('\n')
reviews.append('"'+str_entry+'",')
elif "review/helpfulness: " in x:
str_entry = x.replace("review/helpfulness: ","").replace("\\","\\\\").replace('"',quote).strip('\n')
reviews.append(str_entry+",")
elif "review/score: " in x:
str_entry = x.replace("review/score: ","").replace("\\","\\\\").replace('"',quote).strip('\n')
reviews.append(str_entry+",")
scores.append(str(str_entry)+",")
scores.append(str(count)+"\n")
elif "review/time: " in x:
str_entry = x.replace("review/time: ","").replace("\\","\\\\").replace('"',quote).strip('\n')
reviews.append(str_entry+",")
elif "review/summary: " in x:
str_entry = x.replace("review/summary: ","").replace("\\","\\\\").replace('"',quote).strip('\n')
terms = replace_punctuation.sub(" ", str_entry)
terms = terms.split(" ")
for term in terms:
if len(term)>2:
rterms.append(term.lower()+",")
rterms.append(str(count)+"\n")
reviews.append('"'+str_entry+'",')
elif "review/text: " in x:
str_entry = x.replace("review/text: ","").replace("\\","\\\\").replace('"',quote).strip('\n')
reviews.append('"'+str_entry+'"\n')
terms = replace_punctuation.sub(" ", str_entry)
terms = terms.split(" ")
for term in terms:
if len(term)>2:
rterms.append(term.lower()+",")
rterms.append(str(count)+"\n")
count+=1
with open("pterms.txt","a") as pterms_file:
for entry in pterms:
pterms_file.write(entry)
with open("scores.txt","a") as scores_file:
for entry in scores:
scores_file.write(str(entry))
with open("rterms.txt","a") as rterms_file:
for entry in rterms:
rterms_file.write(str(entry))
with open("reviews.txt","a") as reviews_file:
for entry in reviews:
reviews_file.write(entry)
if __name__ == "__main__":
parseData(sys.stdin) | {
"content_hash": "549d75cfae4e425ee22b13b4dcd29286",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 112,
"avg_line_length": 38.747368421052634,
"alnum_prop": 0.49144254278728605,
"repo_name": "k----n/InformationRetrievalSystem",
"id": "a76b889a3d16c6f3b753f065588f9763de5e44b3",
"size": "4279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "274"
},
{
"name": "Python",
"bytes": "17555"
},
{
"name": "Shell",
"bytes": "313"
}
],
"symlink_target": ""
} |
"""
This module provides high level utility functions to encode and decode a terrain tile.
Reference
---------
"""
# Enable Shapely "speedups" if available
# http://toblerity.org/shapely/manual.html#performance
from shapely import speedups
from .terrain import TerrainTile
from .topology import TerrainTopology
if speedups.available:
speedups.enable()
def encode(geometries, bounds=[], autocorrectGeometries=False, hasLighting=False,
watermask=[]):
"""
Function to convert geometries into a
:class:`quantized_mesh_tile.terrain.TerrainTile` instance.
Arguments:
``geometries``
A list of shapely polygon geometries representing 3 dimensional triangles.
or
A list of WKT or WKB Polygons representing 3 dimensional triangles.
or
A list of triplet of vertices using the following structure:
``(((lon0/lat0/height0),(...),(lon2,lat2,height2)),(...))``
``bounds``
The bounds of the terrain tile. (west, south, east, north)
If not defined, the bounds will be computed from the provided geometries.
Default is `[]`.
``autocorrectGeometries``
When set to `True`, it will attempt to fix geometries that are not
triangles. This often happens when geometries are clipped from an existing mesh.
Default is `False`.
``hasLighting`` (Experimental)
Indicate whether unit vectors should be computed for the lighting extension.
Default is `False`.
``watermask``
A water mask list (Optional). Adds rendering water effect.
The water mask list is either one byte, `[0]` for land and `[255]` for
water, either a list of 256*256 values ranging from 0 to 255.
Values in the mask are defined from north-to-south and west-to-east.
Per default no watermask is applied. Note that the water mask effect depends on
the texture of the raster layer drapped over your terrain.
Default is `[]`.
"""
topology = TerrainTopology(geometries=geometries,
autocorrectGeometries=autocorrectGeometries,
hasLighting=hasLighting)
if len(bounds) == 4:
west, south, east, north = bounds
tile = TerrainTile(topology=topology,
watermask=watermask,
west=west, south=south, east=east, north=north)
else:
tile = TerrainTile(topology=topology, watermask=watermask)
return tile
def decode(filePath, bounds, hasLighting=False, hasWatermask=False, gzipped=False):
"""
Function to convert a quantized-mesh terrain tile file into a
:class:`quantized_mesh_tile.terrain.TerrainTile` instance.
Arguments:
``filePath``
An absolute or relative path to write the terrain tile. (Required)
``bounds``
The bounds of the terrain tile. (west, south, east, north) (Required).
``hasLighting`` (Experimental)
Indicate whether the tile has the lighting extension.
Default is `False`.
``hasWatermask``
Indicate whether the tile has the water-mask extension.
Default is `False`.
"""
west, south, east, north = bounds
tile = TerrainTile(west=west, south=south, east=east, north=north)
tile.fromFile(
filePath, hasLighting=hasLighting, hasWatermask=hasWatermask, gzipped=gzipped)
return tile
| {
"content_hash": "02832d2b3419c80b5bd309a889e8a720",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 88,
"avg_line_length": 30.41592920353982,
"alnum_prop": 0.6569682862961885,
"repo_name": "loicgasser/quantized-mesh-tile",
"id": "d3ba8f306789c5b60e96c6ee163548fa4bd0ebcb",
"size": "3437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantized_mesh_tile/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99523"
}
],
"symlink_target": ""
} |
from p2ner.abstract.interface import Interface
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NullControl(Interface):
def initInterface(self):
pass
| {
"content_hash": "d0223c74769cd76fcf2682df8ec42e78",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 76,
"avg_line_length": 37.15,
"alnum_prop": 0.7375504710632571,
"repo_name": "schristakidis/p2ner",
"id": "f3b2c17b1f30358115075e8a565ab9506a4a755a",
"size": "743",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "p2ner/components/interface/nullcontrol/nullcontrol/nullcontrol.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "303"
},
{
"name": "Python",
"bytes": "1319300"
}
],
"symlink_target": ""
} |
import logging
import os
import quickremovedups.filesize
from tempfile import TemporaryDirectory
from unittest import main, TestCase
class TestFilesize(TestCase):
def create_file_of_size(self, filename, size):
logging.debug('Creating file: %s of size %d' % (filename, size))
with open(filename, 'w') as filehandle:
filehandle.write(' ' * size)
def test_duplicates(self):
count = 0
with TemporaryDirectory() as tempdir:
for filesize in [100, 1024, 1024, 2048, 2048, 0, 0]:
count += 1
self.create_file_of_size(os.path.join(tempdir, str(count)), filesize)
result = dict(quickremovedups.filesize.duplicates(tempdir))
self.assertDictEqual(
result,
{
1024: [os.path.join(tempdir, '2'), os.path.join(tempdir, '3')],
2048: [os.path.join(tempdir, '5'), os.path.join(tempdir, '4')]
}
)
def test_duplicates_status(self):
count = 0
with TemporaryDirectory() as tempdir:
for filesize in [100, 1024, 1024, 2048, 2048, 0, 0]:
count += 1
self.create_file_of_size(os.path.join(tempdir, str(count)), filesize)
result = dict(quickremovedups.filesize.duplicates(tempdir, status=True))
self.assertDictEqual(
result,
{
1024: [os.path.join(tempdir, '2'), os.path.join(tempdir, '3')],
2048: [os.path.join(tempdir, '5'), os.path.join(tempdir, '4')]
}
)
def test_dupfiles_count(self):
input_dict = {
1024: ['/tmp/tmpux_s30x_/2', '/tmp/tmpux_s30x_/3'],
2048: ['/tmp/tmpux_s30x_/5', '/tmp/tmpux_s30x_/4']
}
result = quickremovedups.filesize.dupfiles_count(input_dict)
self.assertEqual(result, 4)
if __name__ == '__main__':
main()
| {
"content_hash": "f2764fa63badef4e46bb9dc3b2674650",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 85,
"avg_line_length": 35.018181818181816,
"alnum_prop": 0.5586708203530634,
"repo_name": "dwighthubbard/quickremovedups",
"id": "9c09fedc64e0ccaa0fc8b5c33090da845ba2d9a4",
"size": "1949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_filesize.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25921"
}
],
"symlink_target": ""
} |
"""Python API for the Microsoft TerraServer.
Copyright (c) 2012 Howard Butler hobu@hobu.net
License:
See the Python 2.6 License (http://www.python.org/2.6/license.html)
"""
import datetime
wsdl = 'http://msrmaps.com/TerraService2.asmx?WSDL'
from suds.client import Client
client = Client(wsdl)
import logging
logging.basicConfig(level=logging.INFO)
# logging.getLogger('suds.client').setLevel(logging.DEBUG)
logging.getLogger('suds.client').setLevel(logging.ERROR)
themes = {'DOQ':1, 'DRG':2, "ORTHO":1, "TOPO":2}
__author__ = "Howard Butler hobu@hobu.net"
__copyright__ ='(c) 2012 Howard Butler'
url = "http://msrmaps.com/TerraService2.asmx"
ns = "http://msrmaps.com/"
class pyTerraError(Exception):
"""Custom exception for PyTerra"""
def GetPlaceList(placeName, MaxItems=10, imagePresence=True):
"""Returns a list of PlaceItems that have the same placeName"""
try:
resp = client.service.GetPlaceList(placeName, int(MaxItems), str(bool(imagePresence)).lower())
except Exception, e:
raise pyTerraError(e)
return resp
def GetPlaceTypes():
t = client.factory.create("PlaceType")
return [i[0] for i in t]
def GetScales():
t = client.factory.create("Scale")
return [i[0] for i in t]
def GetPlaceListInRect(upperLeft, lowerRight, ptype, MaxItems):
"""Returns a list of places inside the bounding box"""
#This function is not known to return good results
ul = client.factory.create("LonLatPt")
ul.Lat = float(upperLeft.Lat)
ul.Lon = float(upperLeft.Lon)
lr = client.factory.create("LonLatPt")
lr.Lat = float(lowerRight.Lat)
lr.Lon = float(lowerRight.Lon)
if (ptype not in GetPlaceTypes()):
raise pyTerraError("type %s not available" % ptype)
try:
resp = client.service.GetPlaceListInRect(ul, lr, ptype, MaxItems)
except Exception, e:
raise pyTerraError(e)
return resp
def GetPlaceFacts(place):
"""Gets facts about a place (park, CityTown, etc..)"""
p = client.factory.create("Place")
p.City = place.City
p.State = place.State
p.Country = place.Country
try:
resp = client.service.GetPlaceFacts(p)
except Exception, e:
raise pyTerraError(e)
return resp
def GetAreaFromPt(center, theme, scale, displayPixWidth, displayPixHeight):
"""Returns an area (set of tiles) defined by a point"""
p = client.factory.create("LonLatPt")
p.Lat = float(center.Lat)
p.Lon = float(center.Lon)
displayPixHeight = int(displayPixHeight)
displayPixWidth = int(displayPixHeight)
if (scale not in GetScales()):
raise pyTerraError("Scale '%s' is not a valid scale" % scale)
try:
int(theme)
except ValueError:
try:
theme = themes[theme.upper()]
except KeyError:
raise pyTerraError("Theme %s not found" % theme)
try:
resp = client.service.GetAreaFromPt(p, theme, scale, displayPixWidth, displayPixHeight)
except Exception, e:
raise pyTerraError(e)
return resp
def GetAreaFromTileId(id, displayPixWidth=200, displayPixHeight=200):
"""Returns the bounding box for a TileMeta.Id"""
t = client.factory.create("TileId")
t.X = int(id.X)
t.Y = int(id.Y)
t.Scene = int(id.Scene)
t.Theme = id.Theme
t.Scale = id.Scale
displayPixHeight = int(displayPixHeight)
displayPixWidth = int(displayPixHeight)
if (t.Scale not in GetScales()):
raise pyTerraError("Scale '%s' is not a valid scale" % t.Scale)
try:
int(id.Theme)
except ValueError:
try:
t.Theme = themes[id.Theme.upper()]
except KeyError:
raise pyTerraError("Theme %s not found" % id.Theme)
try:
resp = client.service.GetAreaFromTileId(t, displayPixWidth, displayPixHeight)
except Exception, e:
raise pyTerraError(e)
return resp
def GetAreaFromRect(upperLeft, lowerRight, theme, scale):
"""Returns the tiles for the bounding box defined two points, upperLeft and lowerRight.
:param upperLeft: an instance with .Lat and .Lon data members
The .Lat and .Lon data members of the instance passed in represent the
WGS84 latitude and longitude, and should be provided as floating point nubmers.
:param lowerRight: an instance with .Lat and .Lon data members
The .Lat and .Lon data members of the instance passed in represent the
WGS84 latitude and longitude, and should be provided as floating point nubmers.
:param theme: integer
An integer from one of the valid themes in :data:`themes`.
:param scale: string
A valid scale string from :meth:GetScales
"""
ul = client.factory.create("LonLatPt")
ul.Lat = float(upperLeft.Lat)
ul.Lon = float(upperLeft.Lon)
lr = client.factory.create("LonLatPt")
lr.Lat = float(lowerRight.Lat)
lr.Lon = float(lowerRight.Lon)
try:
int(theme)
except ValueError:
try:
theme = themes[theme.upper()]
except KeyError:
raise pyTerraError("Theme %s not found" % theme)
if (scale not in GetScales()):
raise pyTerraError("scale %s not available" % scale)
try:
resp = client.service.GetAreaFromRect(ul, lr, theme, scale)
except Exception, e:
raise pyTerraError(e)
return resp
def GetTileMetaFromTileId(id):
"""Gets the metadata for a TileMeta.Id"""
t = client.factory.create("TileId")
t.X = int(id.X)
t.Y = int(id.Y)
t.Scene = int(id.Scene)
t.Theme = id.Theme
t.Scale = id.Scale
if (t.Scale not in GetScales()):
raise pyTerraError("Scale '%s' is not a valid scale" % t.Scale)
try:
int(id.Theme)
except ValueError:
try:
t.Theme = themes[id.Theme.upper()]
except KeyError:
raise pyTerraError("Theme %s not found" % id.Theme)
try:
resp = client.service.GetTileMetaFromTileId(t)
except Exception, e:
raise pyTerraError(e)
return resp
def GetTileMetaFromLonLatPt(point, theme, scale):
"""Gets the TileMeta for a point (lat/lon)"""
p = client.factory.create("LonLatPt")
p.Lat = float(point.Lat)
p.Lon = float(point.Lon)
try:
int(theme)
except ValueError:
try:
theme = themes[theme.upper()]
except KeyError:
raise pyTerraError("Theme %s not found" % theme)
try:
resp = client.service.GetTileMetaFromLonLatPt(p, theme, scale)
except Exception, e:
raise pyTerraError(e)
return resp
def GetTile(id):
"""Returns the tile image data"""
t = client.factory.create("TileId")
t.X = int(id.X)
t.Y = int(id.Y)
t.Scene = int(id.Scene)
t.Theme = id.Theme
t.Scale = id.Scale
if (t.Scale not in GetScales()):
raise pyTerraError("Scale '%s' is not a valid scale" % t.Scale)
try:
int(id.Theme)
except ValueError:
try:
t.Theme = themes[id.Theme.upper()]
except KeyError:
raise pyTerraError("Theme %s not found" % id.Theme)
try:
resp = client.service.GetTile(t)
except Exception, e:
raise pyTerraError(e)
return resp
def ConvertLonLatPtToNearestPlace(point):
"""Converts a lat/lon point into a place"""
p = client.factory.create("LonLatPt")
p.Lat = float(point.Lat)
p.Lon = float(point.Lon)
try:
resp = client.service.ConvertLonLatPtToNearestPlace(p)
except Exception, e:
raise pyTerraError(e)
return resp
def ConvertUtmPtToLonLatPt(utm):
"""Converts a UTM point into lat/lon"""
p = client.factory.create("UtmPt")
p.X = float(utm.X)
p.Y = float(utm.Y)
p.Zone = int(utm.Zone)
try:
resp = client.service.ConvertUtmPtToLonLatPt(p)
except Exception, e:
raise pyTerraError(e)
return resp
def ConvertLonLatPtToUtmPt(point):
"""Converts a lat/lon point into UTM"""
p = client.factory.create("LonLatPt")
p.Lat = float(point.Lat)
p.Lon = float(point.Lon)
try:
resp = client.service.ConvertLonLatPtToUtmPt(p)
except Exception, e:
raise pyTerraError(e)
return resp
def ConvertPlaceToLonLatPt(place):
"""Converts a place struct into a lat/lon point"""
p = client.factory.create("Place")
p.City = place.City
p.State = place.State
p.Country = place.Country
try:
resp = client.service.ConvertPlaceToLonLatPt(p)
except Exception, e:
raise pyTerraError(e)
return resp
def GetTheme(theme):
"""Returns theme information about a theme (Photo, Topo, or Relief)"""
try:
int(theme)
except ValueError:
try:
theme = themes[theme.upper()]
except KeyError:
raise pyTerraError("Theme %s not found" % theme)
try:
resp = client.service.GetTheme(theme=theme)
except Exception, e:
raise pyTerraError(e)
return resp
def CountPlacesInRect(upperLeft, lowerRight, ptype):
"""Counts the number of places inside the bounding box with the specified ptype"""
ul = client.factory.create("LonLatPt")
ul.Lat = float(upperLeft.Lat)
ul.Lon = float(upperLeft.Lon)
lr = client.factory.create("LonLatPt")
lr.Lat = float(lowerRight.Lat)
lr.Lon = float(lowerRight.Lon)
if (ptype not in GetPlaceTypes()):
raise pyTerraError("type %s not available" % ptype)
try:
resp = client.service.CountPlacesInRect(ul, lr, ptype)
except Exception, e:
raise pyTerraError(e)
return resp
def GetLatLonMetrics(point):
"""Don't know why this is there or what this does"""
p = client.factory.create("LonLatPt")
p.Lat = float(point.Lat)
p.Lon = float(point.Lon)
try:
resp = client.service.GetLatLonMetrics(p)
except Exception, e:
raise pyTerraError(e)
return resp
| {
"content_hash": "ff7fe2fda35881dfa6ec10a3d912a9e7",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 102,
"avg_line_length": 26.935400516795866,
"alnum_prop": 0.611857252494244,
"repo_name": "hobu/pyTerra",
"id": "dd7cb94b600d82293a169c8e875c22c4b04f320f",
"size": "10424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyTerra/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36807"
}
],
"symlink_target": ""
} |
from django.test import Client, TestCase
from django.urls import reverse
c = Client()
class WebsiteStabilityTestCase(TestCase):
def test_frontpage_availability(self):
self.assertEqual(c.get('/').status_code, 200)
def test_about_us_availability(self):
self.assertEqual(c.get(reverse('about')).status_code, 200)
| {
"content_hash": "621c9077e63fb2d5d00af94d5e8a8133",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 66,
"avg_line_length": 28.166666666666668,
"alnum_prop": 0.7189349112426036,
"repo_name": "martinlunde/RealBack",
"id": "8f66f13df691e12dc353b7406504d487b5744466",
"size": "339",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "attendee/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16631"
},
{
"name": "HTML",
"bytes": "35985"
},
{
"name": "JavaScript",
"bytes": "55462"
},
{
"name": "Python",
"bytes": "82432"
}
],
"symlink_target": ""
} |
'''
This script stores the bitcoin value in bitcoin.txt
This script uses coindesk api.
CoinDesk provides a simple API to make its Bitcoin Price Index (BPI) data programmatically available to others.
You are free to use this API to include our data in any application or website as you see fit,
as long as each page or app that uses it includes the text Powered by CoinDesk, linking to our price page
'''
import urllib,json
from datetime import datetime
from dateutil import tz
#If you don't have these dependencies you will have to get them
#sudo pip2 install python-dateutil
#sudo pip2 install datetime
url = "https://api.coindesk.com/v1/bpi/currentprice.json"
response = urllib.urlopen(url)
data =json.loads(response.read())
utc_time_str =str(data['time']['updated'][:-4]).strip()
from_zone = tz.gettz('UTC')
to_zone = tz.tzlocal()
utc = datetime.strptime(utc_time_str, '%b %d, %Y %H:%M:%S')
utc = utc.replace(tzinfo=from_zone)
localtime = str(utc.astimezone(to_zone))
timestring = localtime.split(' ')[1].split('+')[0]
usdval=data['bpi']['USD']['rate'][:-2]
file = open('bitcoin.txt','w')
file.write("Time:"+timestring+'\n')
file.write('$'+usdval)
file.close()
| {
"content_hash": "0560cddbba900f52115ac5e35cb56d0d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 112,
"avg_line_length": 35.484848484848484,
"alnum_prop": 0.7284372331340735,
"repo_name": "munagekar/MJRDarkConky",
"id": "2c6657b391feedd743dea2ccc62610bc33865ce3",
"size": "1171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AbhishekManjaro/bitcoin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3107"
}
],
"symlink_target": ""
} |
import torch
import argparse
import torch.optim as optim
import numpy as np
from os import makedirs
from os.path import join, exists
from torch.utils.data import DataLoader
from torch.autograd import Variable
from utils import *
use_cuda = torch.cuda.is_available()
# seed for replicability
SEED = 123
np.random.seed(SEED)
torch.manual_seed(SEED)
if use_cuda:
torch.cuda.manual_seed(SEED)
def train_model(data_loc, model_name, noise_dim=128, dim_factor=64,
K=5, lmbda=10., batch_size=64, n_epochs=140,
learning_rate=1e-4):
# create folder to store model results
model_folder = join('models', model_name)
if not exists(model_folder):
makedirs(model_folder)
# load dataset
dataset = SVHNDataset(data_loc)
data_iter = DataLoader(dataset, batch_size=batch_size, shuffle=True,
drop_last=True, num_workers=3,
pin_memory=True)
# create networks
gen_net = Generator(image_shape=dataset.image_shape, noise_dim=noise_dim,
dim_factor=dim_factor)
disc_net = Discriminator(image_shape=dataset.image_shape,
dim_factor=dim_factor)
# initialize optimizers
gen_optimizer = optim.Adam(gen_net.parameters(), lr=learning_rate,
betas=(0.5, 0.9))
disc_optimizer = optim.Adam(disc_net.parameters(), lr=learning_rate,
betas=(0.5, 0.9))
# create tensors for input to algorithm
gen_noise_tensor = torch.FloatTensor(batch_size, noise_dim)
gp_alpha_tensor = torch.FloatTensor(batch_size, 1, 1, 1)
# convert tensors and parameters to cuda
if use_cuda:
gen_net = gen_net.cuda()
disc_net = disc_net.cuda()
gen_noise_tensor = gen_noise_tensor.cuda()
gp_alpha_tensor = gp_alpha_tensor.cuda()
# wrap noise as variable so we can backprop through the graph
gen_noise_var = Variable(gen_noise_tensor, requires_grad=False)
# calculate batches per epoch
bpe = len(dataset) // batch_size
# create lists to store training loss
gen_loss = []
disc_loss = []
# iterate over epochs
for ie in range(n_epochs):
print("-> Entering epoch %i out of %i" % (ie + 1, n_epochs))
# iterate over data
for ib, X_data in enumerate(data_iter):
# wrap data in torch Tensor
X_tensor = torch.Tensor(X_data)
if use_cuda:
X_tensor = X_tensor.cuda()
X_var = Variable(X_tensor, requires_grad=False)
# calculate total iterations
i = bpe * ie + ib
if (i % K) == (K - 1):
# train generator
enable_gradients(gen_net) # enable gradients for gen net
disable_gradients(disc_net) # saves computation on backprop
gen_net.zero_grad()
loss = wgan_generator_loss(gen_noise_var, gen_net, disc_net)
loss.backward()
gen_optimizer.step()
# append loss to list
gen_loss.append(loss.data[0])
# train discriminator
enable_gradients(disc_net) # enable gradients for disc net
disable_gradients(gen_net) # saves computation on backprop
disc_net.zero_grad()
loss = wgan_gp_discriminator_loss(gen_noise_var, X_var, gen_net,
disc_net, lmbda, gp_alpha_tensor)
loss.backward()
disc_optimizer.step()
# append loss to list
disc_loss.append(loss.data[0])
# calculate and print mean discriminator loss for past epoch
mean_disc_loss = np.array(disc_loss[-bpe:]).mean()
print("Mean discriminator loss over epoch: %.2f" % mean_disc_loss)
# save model and training loss
torch.save(gen_net.state_dict(), join(model_folder, 'gen_net.pt'))
torch.save(disc_net.state_dict(), join(model_folder, 'disc_net.pt'))
np.save(join(model_folder, 'gen_loss'), gen_loss)
np.save(join(model_folder, 'disc_loss'), disc_loss)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("data_loc", help="Location of SVHN data")
parser.add_argument("model_name",
help="Name of directory to store " +
"model/training contents")
parser.add_argument("--noise_dim",
default=128, type=int,
help="Noise dim for generator")
parser.add_argument("--dim_factor",
default=64, type=int,
help="Dimension factor to use for hidden layers")
parser.add_argument("--K",
default=5, type=int,
help="Iterations of discriminator per generator")
parser.add_argument("--lmbda",
default=10., type=float,
help="Gradient penalty hyperparameter")
parser.add_argument("--batch_size",
default=64, type=int,
help="Batch size for model training")
parser.add_argument("--n_epochs",
default=140, type=int,
help="Number of epochs to train for")
parser.add_argument("--learning_rate",
default=1e-4, type=float,
help="Learning rate of the model")
args = parser.parse_args()
train_model(**vars(args))
| {
"content_hash": "f7c06ef3ca1d3719db1620dbd96115e5",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 79,
"avg_line_length": 42.684615384615384,
"alnum_prop": 0.5709136781402054,
"repo_name": "shariqiqbal2810/WGAN-GP-PyTorch",
"id": "b5d6ba62e14ac7b32a18be95ef1f459c4eacad35",
"size": "5549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/train_SVHN.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "195481"
},
{
"name": "Python",
"bytes": "13047"
}
],
"symlink_target": ""
} |
import tweepy
import time
from credentials import *
import urllib
from pyshorteners import Shortener
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
print("Bot started.")
shortener = Shortener("Tinyurl")
recent_id = 0
image = False
url = 'http://34.250.158.151:3000/meals?{}={}'
reply = '@{} here is the environmental impact of your meal: '
while True:
for tweet in tweepy.Cursor(api.search, q='@ratemyplate_', include_entities=True).items():
if(tweet.id>recent_id):
try:
username = tweet.user.screen_name
formatted_reply = reply.format(username)
if 'media' in tweet.entities:
image = True
image = tweet.entities['media'][0]
media = image['media_url']
formatted_reply += shortener.short(url.format('image', media))
else:
text = '+'.join(tweet.text.split()[1:])
location = tweet.user.location
formatted_reply += shortener.short(url.format('recipe', text))
print(formatted_reply)
# urllib.request.urlopen('http://34.250.158.151:3000/meals?recipe=' + text).read()
# api.update_status('@' + username + ' here is your meal: ' + urllib.parse.quote_plus(str(tweet.id)), in_reply_to_status_id=tweet.id)
api.update_status(formatted_reply, in_reply_to_status_id=tweet.id)
print("Tweet received from ", username)
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
recent_id = tweet.id
print("Bot finished.")
time.sleep(30)
| {
"content_hash": "2a1a3794f689ef6bed4b59171a2232d8",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 138,
"avg_line_length": 27.678571428571427,
"alnum_prop": 0.6793548387096774,
"repo_name": "harrymt/boeing-hackathon",
"id": "95c0732494f876fd0a112bc3086ae2f9a6428fa2",
"size": "1550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitterbot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "113"
},
{
"name": "HTML",
"bytes": "2345"
},
{
"name": "JavaScript",
"bytes": "4593"
},
{
"name": "Python",
"bytes": "18719"
}
],
"symlink_target": ""
} |
import sys
import re
from orderedattrdict import AttrDict
from pysnmp.hlapi import *
from lib.utilities import *
SNMP_PORT = 161
PUBLIC = 'public'
BRIDGE_MIB = 'BRIDGE-MIB'
DOT_1D_TP_FDB_PORT = 'dot1dTpFdbPort'
class GetMgmtSwitchConfig(object):
def __init__(self, log):
self.log = log
def get_port_mac(self, rack, switch_mgmt_ipv4):
self.mac_port = []
for (
error_indication,
error_status,
error_index,
var_binds) in nextCmd(
SnmpEngine(),
CommunityData(PUBLIC),
UdpTransportTarget((switch_mgmt_ipv4, SNMP_PORT)),
ContextData(),
ObjectType(ObjectIdentity(BRIDGE_MIB, DOT_1D_TP_FDB_PORT)),
lexicographicMode=False):
if error_indication:
self.log.error(error_indication)
sys.exit(1)
elif error_status:
self.log.error('%s at %s' % (
error_status.prettyPrint(),
error_index and var_binds[int(error_index) - 1][0] or '?'))
sys.exit(1)
else:
_dict = AttrDict()
for var_bind in var_binds:
match = re.search(
(r'^%s::%s\.(' +
'(%s)' +
' = ' +
r'(\d+)$') % (
BRIDGE_MIB, DOT_1D_TP_FDB_PORT, PATTERN_MAC),
str(var_bind))
mac = match.group(1)
port = str(match.group(3))
_dict[port] = mac
self.log.info(
'Rack: %s - MAC: %s - port: %s' % (rack, mac, port))
self.mac_port.append(_dict)
return self.mac_port
| {
"content_hash": "1c351367719698c2e0734e74f4778c77",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 79,
"avg_line_length": 33.80357142857143,
"alnum_prop": 0.4400422609614369,
"repo_name": "open-power-ref-design-toolkit/cluster-genesis",
"id": "1dfae6742ad1448728c4f55107647e89dbadcde7",
"size": "2515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/python/get_mgmt_switch_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "609082"
},
{
"name": "Shell",
"bytes": "27326"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import logging
logger = logging.getLogger(__name__)
from IoticAgent.Core.Validation import Validation
from .ResourceBase import ResourceBase
from .const import VALUE, VALUESHARE, VTYPE, LANG, DESCRIPTION, UNIT, SHARETIME, SHAREDATA, RECENT
class Point(ResourceBase):
__share_time_fmt = '%Y-%m-%dT%H:%M:%S.%fZ'
def __init__(self, foc, pid, new=False, labels=None, descriptions=None, tags=None, values=None, max_samples=0):
super(Point, self).__init__(pid, new=new, labels=labels, descriptions=descriptions, tags=tags)
self.__foc = foc
self.__pid = pid
self.__values = {} if values is None else values
self.__sharetime = None
self.__sharedata = None
self.__max_samples = max_samples
def clear_changes(self):
with self.lock:
self._changes = []
self._set_not_new()
@property
def foc(self):
with self.lock:
return self.__foc
def create_value(self, label, vtype=None, lang=None, description=None, unit=None, data=None):
"""
If vtype is not specified then the value (lang, description & unit are ignored)
data can be specified on it's own
"""
label = Validation.label_check_convert(label)
if vtype is not None:
vtype = Validation.value_type_check_convert(vtype)
lang = Validation.lang_check_convert(lang, allow_none=True)
description = Validation.comment_check_convert(description, allow_none=True)
unit = unit
if vtype is None and data is None:
raise AttributeError("create_value with no vtype and no data!")
with self.lock:
try:
value = self.__values[label]
except KeyError:
value = self.__values[label] = {}
if vtype is not None:
new_value = {VTYPE: vtype,
LANG: lang,
DESCRIPTION: description,
UNIT: unit}
if new_value != value:
value.update(new_value)
if VALUE + label not in self._changes:
logger.debug('Value %s has changed', label)
self._changes.append(VALUE + label)
if data is not None:
value[SHAREDATA] = data
if VALUESHARE + label not in self._changes:
logger.debug('Sharing value data for %s', label)
self._changes.append(VALUESHARE + label)
@property
def values(self):
with self.lock:
return self.__values
def share(self, data=None, time=None):
if data is None and time is None:
raise ValueError("kwarg data or time required.")
with self.lock:
if time is not None:
self.__sharetime = Validation.datetime_check_convert(time, allow_none=True)
if SHARETIME not in self._changes:
self._changes.append(SHARETIME)
if data is not None:
self.__sharedata = data
if SHAREDATA not in self._changes:
self._changes.append(SHAREDATA)
@property
def sharetime(self):
with self.lock:
return self.__sharetime
@property
def sharedata(self):
with self.lock:
return self.__sharedata
def set_recent_config(self, max_samples=0):
with self.lock:
if max_samples != self.__max_samples:
self.__max_samples = max_samples
if RECENT not in self._changes:
self._changes.append(RECENT)
@property
def recent_config(self):
with self.lock:
return self.__max_samples
| {
"content_hash": "f495a1dfb3f25e12b2bf6eb79cfcedbb",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 115,
"avg_line_length": 35.403669724770644,
"alnum_prop": 0.55713915522156,
"repo_name": "Iotic-Labs/py-IoticBulkData",
"id": "3f4eefb8f298ad5c809d8a0ef97815187cd051c3",
"size": "4488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Ioticiser/Stash/Point.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "199"
},
{
"name": "Python",
"bytes": "56906"
},
{
"name": "Shell",
"bytes": "8280"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('thing', '0005_station_is_unknown'),
]
operations = [
migrations.AlterField(
model_name='station',
name='system',
field=models.ForeignKey(blank=True, to='thing.System', null=True),
preserve_default=True,
),
]
| {
"content_hash": "cd77f3b846a533392c5d1f74071d6c6f",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 23.157894736842106,
"alnum_prop": 0.5931818181818181,
"repo_name": "cmptrgeekken/evething",
"id": "cc5c901f134fcac0e19e7df8a43d98fbde75ee7f",
"size": "464",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "thing/migrations/0006_auto_20170407_1919.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "503888"
},
{
"name": "CoffeeScript",
"bytes": "15698"
},
{
"name": "HTML",
"bytes": "464845"
},
{
"name": "JavaScript",
"bytes": "702015"
},
{
"name": "Python",
"bytes": "1195503"
},
{
"name": "Ruby",
"bytes": "583"
},
{
"name": "Shell",
"bytes": "1294"
},
{
"name": "TSQL",
"bytes": "4398"
}
],
"symlink_target": ""
} |
import sqlite3
from pprint import pprint
def columnar(cursor):
cols = [x[0] for x in cursor.description]
c = [list() for _ in cols]
for row in cursor:
for i,val in enumerate(row):
c[i] += [val]
return dict(zip(cols,c))
db = sqlite3.connect('data.sqlite')
if 1:
db.execute(''' drop table if exists link_em ''')
db.execute(''' drop table if exists sat_user ''')
db.execute(''' drop table if exists sat_format ''')
db.execute(''' drop table if exists sat_user_agg ''')
db.execute(''' drop table if exists sat_format_agg ''')
if 1: # INIT
db.execute(''' create table if not exists link_em (day,user,platform,format,em_cnt,em_len) ''')
db.execute(''' create table if not exists sat_user (user,grp,gender,offer,segment) ''')
db.execute(''' create table if not exists sat_format (format,kind,category) ''')
db.execute(''' create table if not exists sat_user_agg (user,em_cnt,em_len) ''')
db.execute(''' create table if not exists sat_format_agg (format,users_cnt,em_cnt,em_len) ''')
if 1:
db.execute(''' insert into link_em values ('2021-06-12','u1','p1','f1',1,2) ''')
db.execute(''' insert into link_em values ('2021-06-12','u1','p1','f2',2,3) ''')
db.execute(''' insert into link_em values ('2021-06-12','u2','p1','f1',2,4) ''')
db.execute(''' insert into link_em values ('2021-06-12','u2','p2','f3',1,2) ''')
db.execute(''' insert into sat_user values ('u1','a','m','o1','s1') ''')
db.execute(''' insert into sat_user values ('u2','b','m','o1','s1') ''')
db.execute(''' insert into sat_user values ('u4','a','f','o1','s1') ''')
rows = db.execute('''
select
day,
sum(iif(grp=="a",em_len,0)) as a,
sum(iif(grp=="b",em_len,0)) as b
from link_em em
left join sat_user u
group by day
''')
pprint(columnar(rows))
| {
"content_hash": "d08bdfbbf6d31408658d2a62c3dc859d",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 96,
"avg_line_length": 37.95652173913044,
"alnum_prop": 0.6305841924398625,
"repo_name": "mobarski/sandbox",
"id": "11d8d2ed76837a4de2c519b5f8a92aea1b63a794",
"size": "1746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ab/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "862"
},
{
"name": "CSS",
"bytes": "6757"
},
{
"name": "Go",
"bytes": "2645"
},
{
"name": "HTML",
"bytes": "637936"
},
{
"name": "JavaScript",
"bytes": "23025"
},
{
"name": "Jupyter Notebook",
"bytes": "57502"
},
{
"name": "Lua",
"bytes": "549110"
},
{
"name": "Makefile",
"bytes": "580"
},
{
"name": "Python",
"bytes": "1329224"
},
{
"name": "Roff",
"bytes": "561"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "..")))
from db.MysqlUtil import initMysql, execute, select, batchInsert, disconnect
from stock.StockTechStatus import StockTechStatus
from stock.GainIndexForecast import getGainForecast
from task.StockTechTask import saveStockTechForecastToDb
from common.StringHelper import toString
def updateForecast():
forecastData = select(unicode("select * from s_stock_forecast"))
for item in forecastData:
dictStatus = {}
code = item[0]
date = item[1]
dictStatus['MACD'] = toString(item[2])
dictStatus['ADX'] = toString(item[3])
dictStatus['DMA'] = toString(item[4])
dictStatus['EXPMA1'] = toString(item[5])
dictStatus['EMV'] = toString(item[6])
dictStatus['TRIX'] = toString(item[7])
dictStatus['WVAD'] = toString(item[8])
dictStatus['VR'] = toString(item[9])
dictStatus['CR'] = toString(item[10])
dictStatus['AR'] = toString(item[11])
dictStatus['PSY'] = toString(item[12])
dictStatus['K'] = toString(item[13])
dictStatus['RSI1'] = toString(item[14])
dictStatus['MTM'] = toString(item[15])
dictStatus['W&R'] = toString(item[16])
dictStatus['CCI'] = toString(item[17])
dictStatus['OBV'] = toString(item[18])
bulls = item[19]
bears = item[20]
notsure = item[21]
stockTechStatus = StockTechStatus(code, dictStatus, bulls, bears, notsure)
forecastInfo = getGainForecast(stockTechStatus)
saveStockTechForecastToDb(stockTechStatus, forecastInfo, date)
def main(argv):
reload(sys)
sys.setdefaultencoding('utf-8')
initMysql()
updateForecast()
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "dea6896f1bda9521920c4746b2b26cf3",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 82,
"avg_line_length": 33.31481481481482,
"alnum_prop": 0.6425792106725959,
"repo_name": "zwffff2015/stock",
"id": "209cf00502d2f8d636f13528fb78f61c9b5cc828",
"size": "1815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "task/UpdateDbFromDbTask.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "208046"
},
{
"name": "Shell",
"bytes": "831"
}
],
"symlink_target": ""
} |
from oslo_policy import policy
from manila.policies import base
BASE_POLICY_NAME = 'share_snapshot_export_location:%s'
share_snapshot_export_location_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'index',
check_str=base.RULE_DEFAULT,
description="List export locations of a share snapshot.",
operations=[
{
'method': 'GET',
'path': '/snapshots/{snapshot_id}/export-locations/',
}
]),
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME % 'show',
check_str=base.RULE_DEFAULT,
description="Get details of a specified export location of a "
"share snapshot.",
operations=[
{
'method': 'GET',
'path': ('/snapshots/{snapshot_id}/'
'export-locations/{export_location_id}'),
}
]),
]
def list_rules():
return share_snapshot_export_location_policies
| {
"content_hash": "ba1dc85c298f13df64105cae6ab3f82f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 70,
"avg_line_length": 28.305555555555557,
"alnum_prop": 0.5574092247301276,
"repo_name": "bswartz/manila",
"id": "dea0610e9d9de8f27abe31aee309a688d76a9ec9",
"size": "1592",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/policies/share_snapshot_export_location.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "9952105"
},
{
"name": "Shell",
"bytes": "106606"
}
],
"symlink_target": ""
} |
"a".encode('ascii')
| {
"content_hash": "a5822efc8db0e8f8edcd538355ebd409",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 19,
"avg_line_length": 10.5,
"alnum_prop": 0.5714285714285714,
"repo_name": "sburnett/seattle",
"id": "802e70692d1651385541568a9abb98383e3b2883",
"size": "50",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "repy/tests/ut_repytests_encodeisnotallowed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "85039"
},
{
"name": "CSS",
"bytes": "44140"
},
{
"name": "Java",
"bytes": "178864"
},
{
"name": "JavaScript",
"bytes": "791008"
},
{
"name": "Perl",
"bytes": "36791"
},
{
"name": "Python",
"bytes": "4683648"
},
{
"name": "Scala",
"bytes": "2587"
},
{
"name": "Shell",
"bytes": "87609"
}
],
"symlink_target": ""
} |
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Iterator,
Optional,
Sequence,
Tuple,
)
from google.cloud.contentwarehouse_v1.types import rule_engine, ruleset_service_request
class ListRuleSetsPager:
"""A pager for iterating through ``list_rule_sets`` requests.
This class thinly wraps an initial
:class:`google.cloud.contentwarehouse_v1.types.ListRuleSetsResponse` object, and
provides an ``__iter__`` method to iterate through its
``rule_sets`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListRuleSets`` requests and continue to iterate
through the ``rule_sets`` field on the
corresponding responses.
All the usual :class:`google.cloud.contentwarehouse_v1.types.ListRuleSetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., ruleset_service_request.ListRuleSetsResponse],
request: ruleset_service_request.ListRuleSetsRequest,
response: ruleset_service_request.ListRuleSetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.contentwarehouse_v1.types.ListRuleSetsRequest):
The initial request object.
response (google.cloud.contentwarehouse_v1.types.ListRuleSetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = ruleset_service_request.ListRuleSetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[ruleset_service_request.ListRuleSetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[rule_engine.RuleSet]:
for page in self.pages:
yield from page.rule_sets
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListRuleSetsAsyncPager:
"""A pager for iterating through ``list_rule_sets`` requests.
This class thinly wraps an initial
:class:`google.cloud.contentwarehouse_v1.types.ListRuleSetsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``rule_sets`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListRuleSets`` requests and continue to iterate
through the ``rule_sets`` field on the
corresponding responses.
All the usual :class:`google.cloud.contentwarehouse_v1.types.ListRuleSetsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[ruleset_service_request.ListRuleSetsResponse]],
request: ruleset_service_request.ListRuleSetsRequest,
response: ruleset_service_request.ListRuleSetsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.contentwarehouse_v1.types.ListRuleSetsRequest):
The initial request object.
response (google.cloud.contentwarehouse_v1.types.ListRuleSetsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = ruleset_service_request.ListRuleSetsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(
self,
) -> AsyncIterator[ruleset_service_request.ListRuleSetsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[rule_engine.RuleSet]:
async def async_generator():
async for page in self.pages:
for response in page.rule_sets:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| {
"content_hash": "d96e6264bc556eceb1b8a7c593b2dd24",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 87,
"avg_line_length": 37.82394366197183,
"alnum_prop": 0.6469931111524856,
"repo_name": "googleapis/google-cloud-python",
"id": "74be3e28b5b156e07ec845fc98f112c437d949d8",
"size": "5971",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "packages/google-cloud-contentwarehouse/google/cloud/contentwarehouse_v1/services/rule_set_service/pagers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2895"
},
{
"name": "Python",
"bytes": "5620713"
},
{
"name": "Shell",
"bytes": "51704"
}
],
"symlink_target": ""
} |
'''
Created on July 30, 2014
@author: cmills
This contains client-side support for TASR, focusing on the Subject+Version
(S+V) API used by the Avro project's schema repository code. There is also
support for some TASR-exclusive methods (retrieving by digest-based ID, for
example). The older TASR API remains available through the client_legacy
module.
'''
import requests
import tasr.app
import webtest
import StringIO
from tasr.registered_schema import RegisteredAvroSchema
from tasr.headers import SubjectHeaderBot, SchemaHeaderBot
APP = tasr.app.TASR_APP
APP.set_config_mode('local')
TASR_HOST = APP.config.host
TASR_PORT = APP.config.port
TIMEOUT = 2 # seconds
class TASRError(Exception):
'''Something went wrong with a TASR interaction'''
def reg_schema_from_url(url, method='GET', data=None, headers=None,
timeout=TIMEOUT, err_404='No such object.'):
'''A generic method to call a URL and transform the reply into a
RegisteredSchema object. Most of the API calls can use this skeleton.
'''
schema_str = None
resp = None
if headers == None:
headers = {'Accept': 'application/json', }
elif isinstance(headers, dict):
headers['Accept'] = 'application/json'
try:
if method.upper() == 'GET':
resp = requests.get(url, timeout=timeout)
schema_str = resp.content
elif method.upper() == 'POST':
resp = requests.post(url, data=data, headers=headers,
timeout=timeout)
schema_str = resp.content
elif method.upper() == 'PUT':
resp = requests.put(url, data=data, headers=headers,
timeout=timeout)
schema_str = resp.content
# check for error cases
if resp == None:
raise TASRError('Timeout for request to %s' % url)
if 404 == resp.status_code:
raise TASRError(err_404)
if 409 == resp.status_code:
raise TASRError(resp.content)
if not resp.status_code in [200, 201]:
raise TASRError('Failed request to %s (status code: %s)' %
(url, resp.status_code))
# OK - so construct the RS and return it
ras = RegisteredAvroSchema()
ras.schema_str = schema_str
ras.created = True if resp.status_code == 201 else False
schema_meta = SchemaHeaderBot.extract_metadata(resp)
if schema_str and not schema_meta.sha256_id == ras.sha256_id:
raise TASRError('Schema was modified in transit.')
ras.update_from_schema_metadata(schema_meta)
return ras
except Exception as exc:
raise TASRError(exc)
def register_subject(subject_name, config_dict=None, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' PUT /tasr/subject/<subject name>
Registers a _subject_ (not a schema), ensuring that the group can be
established before associating schemas with it. Note that if a form is
sent as the PUT body, it should be used to set the subject config map.
Returns a GroupMetadata object on success.
'''
url = 'http://%s:%s/tasr/subject/%s' % (host, port, subject_name)
resp = requests.put(url, data=config_dict, timeout=timeout)
if resp == None:
raise TASRError('Timeout for register subject request.')
if not resp.status_code in [200, 201]:
raise TASRError('Failed to register subject. (status code: %s)' %
resp.status_code)
subject_metas = SubjectHeaderBot.extract_metadata(resp)
if subject_metas and len(subject_metas) > 0:
return subject_metas[subject_name]
def lookup_subject(subject_name, host=TASR_HOST, port=TASR_PORT,
timeout=TIMEOUT):
''' GET /tasr/subject/<subject name>
Checks whether a subject has been registered. Returns a boolean value.
'''
try:
url = 'http://%s:%s/tasr/subject/%s' % (host, port, subject_name)
resp = requests.get(url, timeout=timeout)
if resp.status_code == 200:
return True
return False
except webtest.AppError:
return False
def get_subject_config(subject_name, host=TASR_HOST, port=TASR_PORT,
timeout=TIMEOUT):
''' GET /tasr/subject/<subject name>/config
Retrieves the config map for the subject. Each key:value pair is returned
as a line in the format "<key>=<value>\n" in the response body.
'''
url = 'http://%s:%s/tasr/subject/%s/config' % (host, port, subject_name)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for register subject request.')
if not resp.status_code == 200:
raise TASRError('Failed to register subject. (status code: %s)' %
resp.status_code)
# construct the dict from the response body
buff = StringIO.StringIO(resp.content)
config_dict = dict()
for line in buff:
if line and line.strip():
# this ensures we don't try and split empty lines
key, value = line.strip().split('=', 1)
config_dict[key] = value
buff.close()
return config_dict
def update_subject_config(subject_name, config_dict,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' POST /tasr/subject/<subject name>/config
Replaces the config map for the subject. Each key:value pair of the
updated map is returned as a line in the format "<key>=<value>\n" in the
response body. The method returns the updated config dict.
'''
url = 'http://%s:%s/tasr/subject/%s/config' % (host, port, subject_name)
resp = requests.post(url, data=config_dict, timeout=timeout)
if resp == None:
raise TASRError('Timeout for register subject request.')
if resp.status_code != 200:
raise TASRError('Failed to update config for %s' % subject_name)
# construct the dict from the response body
buff = StringIO.StringIO(resp.content)
config_dict = dict()
for line in buff:
if line and line.strip():
# this ensures we don't try and split empty lines
key, value = line.strip().split('=', 1)
config_dict[key] = value
buff.close()
return config_dict
def is_subject_integral(subject_name, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/subject/<subject name>/integral
Returns 'True' or 'False' as plaintext in the response body, indicating
whether the IDs used by the repository are guaranteed to be integers. Note
that TASR will always return 'False' in its current state as we accept both
version numbers (which are integers) and multi-type IDs (which are base64-
encoded byte arrays, not integers).
'''
url = 'http://%s:%s/tasr/subject/%s/integral' % (host, port, subject_name)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for get all subjects request.')
if resp.status_code != 200:
raise TASRError('No valid integral response for %s' % subject_name)
if resp.content.strip().upper() == 'TRUE':
return True
return False
def get_active_subject_names(host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/collection/subjects/active
Retrieves all the active subject names (ones with schemas), both as X-TASR
header fields and as plain text, one per line, in the response body. This
method returns a list of subject name strings.
'''
url = 'http://%s:%s/tasr/collection/subjects/active' % (host, port)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for get active subjects request.')
if resp.status_code != 200:
raise TASRError('Failed to get active subjects (status code: %s)' %
resp.status_code)
subject_metas = SubjectHeaderBot.extract_metadata(resp)
# check that subject_metas.keys() matches the body list
buff = StringIO.StringIO(resp.content)
name_list = []
for line in buff:
name_list.append(line.strip())
buff.close()
if len(subject_metas.keys()) != len(name_list):
raise TASRError('Header-body mismatch for subject name lists.')
if sorted(subject_metas.keys()) != sorted(name_list):
raise TASRError('Header-body mismatch for subject name lists.')
return subject_metas.keys()
def get_all_subject_names(host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/collection/subjects/all
Retrieves all the registered subject names, both as X-TASR header fields
and as plain text, one per line, in the response body. This method returns
a list of subject name strings.
'''
url = 'http://%s:%s/tasr/collection/subjects/all' % (host, port)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for get all subjects request.')
if resp.status_code != 200:
raise TASRError('Failed to get all subjects (status code: %s)' %
resp.status_code)
subject_metas = SubjectHeaderBot.extract_metadata(resp)
# check that subject_metas.keys() matches the body list
buff = StringIO.StringIO(resp.content)
name_list = []
for line in buff:
name_list.append(line.strip())
buff.close()
if len(subject_metas.keys()) != len(name_list):
raise TASRError('Header-body mismatch for subject name lists.')
if sorted(subject_metas.keys()) != sorted(name_list):
raise TASRError('Header-body mismatch for subject name lists.')
return subject_metas.keys()
def get_all_subject_schema_ids(subject_name, host=TASR_HOST,
port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/subject/<subject name>/all_ids
Retrieves a list of the SHA256 multi-type IDs for all the schema versions
registered for a subject, in version order.
'''
url = 'http://%s:%s/tasr/subject/%s/all_ids' % (host, port, subject_name)
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for get all subject IDs request.')
if resp.status_code != 200:
raise TASRError('Failed to get all subject IDs (status code: %s)' %
resp.status_code)
meta = SubjectHeaderBot.extract_metadata(resp)[subject_name]
# check that the sha256_list matches the body list
buff = StringIO.StringIO(resp.content)
sha256_ids = []
for line in buff:
sha256_ids.append(line.strip())
buff.close()
if len(meta.sha256_id_list) != len(sha256_ids):
raise TASRError('Header-body mismatch for sha256_id lists.')
if meta.sha256_id_list != sha256_ids:
raise TASRError('Header-body mismatch for sha256_id lists.')
return meta.sha256_id_list
def get_all_subject_schemas(subject_name,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/subject/<subject name>/all_schemas
Retrieves all the (canonical) schema versions registered for a subject,
in version order, one per line in the response body. The multi-type IDs
are included in the headers for confirmation.
'''
url = ('http://%s:%s/tasr/subject/%s/all_schemas' %
(host, port, subject_name))
resp = requests.get(url, timeout=timeout)
if resp == None:
raise TASRError('Timeout for get all subject schemas request.')
if resp.status_code != 200:
raise TASRError('Failed to get all subject schemas (status code: %s)' %
resp.status_code)
meta = SubjectHeaderBot.extract_metadata(resp)[subject_name]
buff = StringIO.StringIO(resp.content)
schemas = []
version = 1
for schema_str in buff:
ras = RegisteredAvroSchema()
ras.schema_str = schema_str.strip()
ras.gv_dict[subject_name] = version
if ras.sha256_id != meta.sha256_id_list[version - 1]:
raise TASRError('Generated SHA256 ID did not match passed ID.')
schemas.append(ras)
version += 1
buff.close()
return schemas
def register_schema(subject_name, schema_str,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' PUT /tasr/subject/<subject name>/register
Register a schema string for a subject. Returns a RegisteredSchema object.
'''
url = ('http://%s:%s/tasr/subject/%s/register' %
(host, port, subject_name))
headers = {'content-type': 'application/json; charset=utf8', }
return reg_schema_from_url(url, method='PUT', data=schema_str,
headers=headers, timeout=timeout)
def register_schema_if_latest(subject_name, version, schema_str,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' PUT /tasr/subject/<subject name>/register_if_latest/<version>
Register a schema string for a subject if the version specified is the
latest version number at the time of the request. If successful, it
returns a RegisteredSchema object.
'''
url = ('http://%s:%s/tasr/subject/%s/register_if_latest/%s' %
(host, port, subject_name, version))
headers = {'content-type': 'application/json; charset=utf8', }
return reg_schema_from_url(url, method='PUT', data=schema_str,
headers=headers, timeout=timeout)
def lookup_by_schema_str(subject_name, schema_str,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' POST /tasr/subject/<subject name>/schema
Get a RegisteredAvroSchema back for a given subject and schema string.
'''
url = 'http://%s:%s/tasr/subject/%s/schema' % (host, port, subject_name)
headers = {'content-type': 'application/json; charset=utf8', }
return reg_schema_from_url(url, method='POST', data=schema_str,
headers=headers, timeout=timeout,
err_404='Schema not registered.')
def lookup_by_version(subject_name, version,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/subject/<subject name>/version/<version>
Get a RegisteredAvroSchema back for a given subject name and version
number. Note version numbers are integers greater than 0.
'''
try:
iver = int(version)
if iver < 1:
raise TASRError('Bad version %s' % version)
except:
raise TASRError('Bad version %s' % version)
url = ('http://%s:%s/tasr/subject/%s/version/%s' %
(host, port, subject_name, iver))
return reg_schema_from_url(url, timeout=timeout,
err_404='No such version.')
def lookup_by_id_str(subject_name, id_str,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/subject/<subject name>/id/<version>
Get a RegisteredAvroSchema back for a given subject name and a multi-type
ID string.
'''
url = ('http://%s:%s/tasr/subject/%s/id/%s' %
(host, port, subject_name, id_str))
return reg_schema_from_url(url, timeout=timeout,
err_404='No schema registered with this ID.')
def lookup_latest(subject_name,
host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
''' GET /tasr/subject/<subject name>/latest
Get the most recent RegisteredAvroSchema back for a given subject name.
'''
url = ('http://%s:%s/tasr/subject/%s/latest' % (host, port, subject_name))
return reg_schema_from_url(url, timeout=timeout,
err_404='No such version.')
#############################################################################
# Wrapped in a class
#############################################################################
class TASRClientSV(object):
'''An object means you only need to specify the host settings once.
'''
def __init__(self, host=TASR_HOST, port=TASR_PORT, timeout=TIMEOUT):
self.host = host
self.port = port
self.timeout = timeout
# subject calls
def register_subject(self, subject_name, config_dict=None):
'''Registers a subject name. Returns a GroupMetadata object.'''
return register_subject(subject_name, config_dict,
self.host, self.port, self.timeout)
def lookup_subject(self, subject_name):
'''Checks whether a subject has been registered.'''
return lookup_subject(subject_name,
self.host, self.port, self.timeout)
def subject_config(self, subject_name):
'''Gets the config map for the subject.'''
return get_subject_config(subject_name,
self.host, self.port, self.timeout)
def update_subject_config(self, subject_name, config_dict):
'''Updates the config map for the subject.'''
return update_subject_config(subject_name, config_dict,
self.host, self.port, self.timeout)
def is_subject_integral(self, subject_name):
'''Indicates whether schema IDs are guaranteed to be integers.'''
return is_subject_integral(subject_name,
self.host, self.port, self.timeout)
def active_subject_names(self):
'''Returns a list of active subject names.'''
return get_active_subject_names(self.host, self.port, self.timeout)
def all_subject_names(self):
'''Returns a list of registered subject names.'''
return get_all_subject_names(self.host, self.port, self.timeout)
def all_subject_schema_ids(self, subject_name):
'''Returns a list of SHA256-based IDs for schema versions registered
for the specified subject.'''
return get_all_subject_schema_ids(subject_name,
self.host, self.port, self.timeout)
def all_subject_schemas(self, subject_name):
'''Returns a version-ordered list of registered schemas for the
specified subject.'''
return get_all_subject_schemas(subject_name,
self.host, self.port, self.timeout)
# schema calls
def register_schema(self, subject_name, schema_str):
'''Register a schema for a subject.'''
return register_schema(subject_name, schema_str)
def register_schema_if_latest_version(self, subject_name, ver, schema_str):
'''Register a schema for a subject if the version number is currently
the latest for the subject.'''
return register_schema_if_latest(subject_name, ver, schema_str)
def lookup_by_schema_str(self, subject, schema_str):
'''Get a registered schema for a specified schema str.'''
return lookup_by_schema_str(subject, schema_str,
self.host, self.port, self.timeout)
def lookup_by_version(self, subject_name, version):
'''Get a registered schema for the subject and version.'''
return lookup_by_version(subject_name, version,
self.host, self.port, self.timeout)
def lookup_by_id_str(self, subject_name, id_str):
'''Get a registered schema for the subject and multi-type ID string.'''
return lookup_by_id_str(subject_name, id_str,
self.host, self.port, self.timeout)
def lookup_latest(self, subject_name):
'''Get the latest registered schema for the subject.'''
return lookup_latest(subject_name, self.host, self.port, self.timeout)
| {
"content_hash": "e999991184be8caede9cc84f751e79cd",
"timestamp": "",
"source": "github",
"line_count": 458,
"max_line_length": 79,
"avg_line_length": 42.956331877729255,
"alnum_prop": 0.6258513774524753,
"repo_name": "ifwe/tasr",
"id": "c416008bc1ca320411c99a0b5dcec02d189bb901",
"size": "19674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/py/tasr/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "337136"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
import re
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.serializers import json # FIXME: disambiguate name from JSON module
from django.utils import six
from django.utils.encoding import force_text, smart_bytes
from tastypie.bundle import Bundle
from tastypie.exceptions import BadRequest, UnsupportedFormat
from tastypie.utils import format_datetime, format_date, format_time, make_naive
try:
import defusedxml.lxml as lxml
from defusedxml.common import DefusedXmlException
from defusedxml.lxml import parse as parse_xml
from lxml.etree import Element, tostring, LxmlError, XMLParser
except ImportError:
lxml = None
try:
import yaml
from django.core.serializers import pyyaml
except ImportError:
yaml = None
try:
import biplist
except ImportError:
biplist = None
try:
import simplejson
except ImportError:
import json as simplejson
XML_ENCODING = re.compile('<\?xml.*?\?>', re.IGNORECASE)
# Ugh & blah.
# So doing a regular dump is generally fine, since Tastypie doesn't usually
# serialize advanced types. *HOWEVER*, it will dump out Python Unicode strings
# as a custom YAML tag, which of course ``yaml.safe_load`` can't handle.
if yaml is not None:
from yaml.constructor import SafeConstructor
from yaml.loader import Reader, Scanner, Parser, Composer, Resolver
class TastypieConstructor(SafeConstructor):
def construct_yaml_unicode_dammit(self, node):
value = self.construct_scalar(node)
try:
return value.encode('ascii')
except UnicodeEncodeError:
return value
TastypieConstructor.add_constructor(u'tag:yaml.org,2002:python/unicode', TastypieConstructor.construct_yaml_unicode_dammit)
class TastypieLoader(Reader, Scanner, Parser, Composer, TastypieConstructor, Resolver):
def __init__(self, stream):
Reader.__init__(self, stream)
Scanner.__init__(self)
Parser.__init__(self)
Composer.__init__(self)
TastypieConstructor.__init__(self)
Resolver.__init__(self)
class Serializer(object):
"""
A swappable class for serialization.
This handles most types of data as well as the following output formats::
* json
* jsonp (Disabled by default)
* xml
* yaml
* html
* plist (see http://explorapp.com/biplist/)
It was designed to make changing behavior easy, either by overridding the
various format methods (i.e. ``to_json``), by changing the
``formats/content_types`` options or by altering the other hook methods.
"""
formats = ['json', 'xml', 'yaml', 'html', 'plist']
content_types = {'json': 'application/json',
'jsonp': 'text/javascript',
'xml': 'application/xml',
'yaml': 'text/yaml',
'html': 'text/html',
'plist': 'application/x-plist'}
def __init__(self, formats=None, content_types=None, datetime_formatting=None):
if datetime_formatting is not None:
self.datetime_formatting = datetime_formatting
else:
self.datetime_formatting = getattr(settings, 'TASTYPIE_DATETIME_FORMATTING', 'iso-8601')
self.supported_formats = []
if content_types is not None:
self.content_types = content_types
if formats is not None:
self.formats = formats
if self.formats is Serializer.formats and hasattr(settings, 'TASTYPIE_DEFAULT_FORMATS'):
# We want TASTYPIE_DEFAULT_FORMATS to override unmodified defaults but not intentational changes
# on Serializer subclasses:
self.formats = settings.TASTYPIE_DEFAULT_FORMATS
if not isinstance(self.formats, (list, tuple)):
raise ImproperlyConfigured('Formats should be a list or tuple, not %r' % self.formats)
for format in self.formats:
try:
self.supported_formats.append(self.content_types[format])
except KeyError:
raise ImproperlyConfigured("Content type for specified type '%s' not found. Please provide it at either the class level or via the arguments." % format)
def get_mime_for_format(self, format):
"""
Given a format, attempts to determine the correct MIME type.
If not available on the current ``Serializer``, returns
``application/json`` by default.
"""
try:
return self.content_types[format]
except KeyError:
return 'application/json'
def format_datetime(self, data):
"""
A hook to control how datetimes are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "2010-12-16T03:02:14".
"""
data = make_naive(data)
if self.datetime_formatting == 'rfc-2822':
return format_datetime(data)
if self.datetime_formatting == 'iso-8601-strict':
# Remove microseconds to strictly adhere to iso-8601
data = data - datetime.timedelta(microseconds = data.microsecond)
return data.isoformat()
def format_date(self, data):
"""
A hook to control how dates are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "2010-12-16".
"""
if self.datetime_formatting == 'rfc-2822':
return format_date(data)
return data.isoformat()
def format_time(self, data):
"""
A hook to control how times are formatted.
Can be overridden at the ``Serializer`` level (``datetime_formatting``)
or globally (via ``settings.TASTYPIE_DATETIME_FORMATTING``).
Default is ``iso-8601``, which looks like "03:02:14".
"""
if self.datetime_formatting == 'rfc-2822':
return format_time(data)
if self.datetime_formatting == 'iso-8601-strict':
# Remove microseconds to strictly adhere to iso-8601
data = (datetime.datetime.combine(datetime.date(1,1,1),data) - datetime.timedelta(microseconds = data.microsecond)).time()
return data.isoformat()
def serialize(self, bundle, format='application/json', options={}):
"""
Given some data and a format, calls the correct method to serialize
the data and returns the result.
"""
desired_format = None
for short_format, long_format in self.content_types.items():
if format == long_format:
if hasattr(self, "to_%s" % short_format):
desired_format = short_format
break
if desired_format is None:
raise UnsupportedFormat("The format indicated '%s' had no available serialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format)
serialized = getattr(self, "to_%s" % desired_format)(bundle, options)
return serialized
def deserialize(self, content, format='application/json'):
"""
Given some data and a format, calls the correct method to deserialize
the data and returns the result.
"""
desired_format = None
format = format.split(';')[0]
for short_format, long_format in self.content_types.items():
if format == long_format:
if hasattr(self, "from_%s" % short_format):
desired_format = short_format
break
if desired_format is None:
raise UnsupportedFormat("The format indicated '%s' had no available deserialization method. Please check your ``formats`` and ``content_types`` on your Serializer." % format)
if isinstance(content, six.binary_type):
content = force_text(content)
deserialized = getattr(self, "from_%s" % desired_format)(content)
return deserialized
def to_simple(self, data, options):
"""
For a piece of data, attempts to recognize it and provide a simplified
form of something complex.
This brings complex Python data structures down to native types of the
serialization format(s).
"""
if isinstance(data, (list, tuple)):
return [self.to_simple(item, options) for item in data]
if isinstance(data, dict):
return dict((key, self.to_simple(val, options)) for (key, val) in data.items())
elif isinstance(data, Bundle):
return dict((key, self.to_simple(val, options)) for (key, val) in data.data.items())
elif hasattr(data, 'dehydrated_type'):
if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False:
if data.full:
return self.to_simple(data.fk_resource, options)
else:
return self.to_simple(data.value, options)
elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True:
if data.full:
return [self.to_simple(bundle, options) for bundle in data.m2m_bundles]
else:
return [self.to_simple(val, options) for val in data.value]
else:
return self.to_simple(data.value, options)
elif isinstance(data, datetime.datetime):
return self.format_datetime(data)
elif isinstance(data, datetime.date):
return self.format_date(data)
elif isinstance(data, datetime.time):
return self.format_time(data)
elif isinstance(data, bool):
return data
elif isinstance(data, (six.integer_types, float)):
return data
elif data is None:
return None
else:
return force_text(data)
def to_etree(self, data, options=None, name=None, depth=0):
"""
Given some data, converts that data to an ``etree.Element`` suitable
for use in the XML output.
"""
if isinstance(data, (list, tuple)):
element = Element(name or 'objects')
if name:
element = Element(name)
element.set('type', 'list')
else:
element = Element('objects')
for item in data:
element.append(self.to_etree(item, options, depth=depth+1))
element[:] = sorted(element, key=lambda x: x.tag)
elif isinstance(data, dict):
if depth == 0:
element = Element(name or 'response')
else:
element = Element(name or 'object')
element.set('type', 'hash')
for (key, value) in data.items():
element.append(self.to_etree(value, options, name=key, depth=depth+1))
element[:] = sorted(element, key=lambda x: x.tag)
elif isinstance(data, Bundle):
element = Element(name or 'object')
for field_name, field_object in data.data.items():
element.append(self.to_etree(field_object, options, name=field_name, depth=depth+1))
element[:] = sorted(element, key=lambda x: x.tag)
elif hasattr(data, 'dehydrated_type'):
if getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == False:
if data.full:
return self.to_etree(data.fk_resource, options, name, depth+1)
else:
return self.to_etree(data.value, options, name, depth+1)
elif getattr(data, 'dehydrated_type', None) == 'related' and data.is_m2m == True:
if data.full:
element = Element(name or 'objects')
for bundle in data.m2m_bundles:
element.append(self.to_etree(bundle, options, bundle.resource_name, depth+1))
else:
element = Element(name or 'objects')
for value in data.value:
element.append(self.to_etree(value, options, name, depth=depth+1))
else:
return self.to_etree(data.value, options, name)
else:
element = Element(name or 'value')
simple_data = self.to_simple(data, options)
data_type = get_type_string(simple_data)
if data_type != 'string':
element.set('type', get_type_string(simple_data))
if data_type != 'null':
if isinstance(simple_data, six.text_type):
element.text = simple_data
else:
element.text = force_text(simple_data)
return element
def from_etree(self, data):
"""
Not the smartest deserializer on the planet. At the request level,
it first tries to output the deserialized subelement called "object"
or "objects" and falls back to deserializing based on hinted types in
the XML element attribute "type".
"""
if data.tag == 'request':
# if "object" or "objects" exists, return deserialized forms.
elements = data.getchildren()
for element in elements:
if element.tag in ('object', 'objects'):
return self.from_etree(element)
return dict((element.tag, self.from_etree(element)) for element in elements)
elif data.tag == 'object' or data.get('type') == 'hash':
return dict((element.tag, self.from_etree(element)) for element in data.getchildren())
elif data.tag == 'objects' or data.get('type') == 'list':
return [self.from_etree(element) for element in data.getchildren()]
else:
type_string = data.get('type')
if type_string in ('string', None):
return data.text
elif type_string == 'integer':
return int(data.text)
elif type_string == 'float':
return float(data.text)
elif type_string == 'boolean':
if data.text == 'True':
return True
else:
return False
else:
return None
def to_json(self, data, options=None):
"""
Given some Python data, produces JSON output.
"""
options = options or {}
data = self.to_simple(data, options)
if django.get_version() >= '1.5':
return json.json.dumps(data, cls=json.DjangoJSONEncoder, sort_keys=True, ensure_ascii=False)
else:
return simplejson.dumps(data, cls=json.DjangoJSONEncoder, sort_keys=True, ensure_ascii=False)
def from_json(self, content):
"""
Given some JSON data, returns a Python dictionary of the decoded data.
"""
try:
return simplejson.loads(content)
except ValueError:
raise BadRequest
def to_jsonp(self, data, options=None):
"""
Given some Python data, produces JSON output wrapped in the provided
callback.
Due to a difference between JSON and Javascript, two
newline characters, \u2028 and \u2029, need to be escaped.
See http://timelessrepo.com/json-isnt-a-javascript-subset for
details.
"""
options = options or {}
json = self.to_json(data, options)
json = json.replace(u'\u2028', u'\\u2028').replace(u'\u2029', u'\\u2029')
return u'%s(%s)' % (options['callback'], json)
def to_xml(self, data, options=None):
"""
Given some Python data, produces XML output.
"""
options = options or {}
if lxml is None:
raise ImproperlyConfigured("Usage of the XML aspects requires lxml and defusedxml.")
return tostring(self.to_etree(data, options), xml_declaration=True, encoding='utf-8')
def from_xml(self, content, forbid_dtd=True, forbid_entities=True):
"""
Given some XML data, returns a Python dictionary of the decoded data.
By default XML entity declarations and DTDs will raise a BadRequest
exception content but subclasses may choose to override this if
necessary.
"""
if lxml is None:
raise ImproperlyConfigured("Usage of the XML aspects requires lxml and defusedxml.")
try:
# Stripping the encoding declaration. Because lxml.
# See http://lxml.de/parsing.html, "Python unicode strings".
content = XML_ENCODING.sub('', content)
parsed = parse_xml(
six.StringIO(content),
forbid_dtd=forbid_dtd,
forbid_entities=forbid_entities
)
except (LxmlError, DefusedXmlException):
raise BadRequest()
return self.from_etree(parsed.getroot())
def to_yaml(self, data, options=None):
"""
Given some Python data, produces YAML output.
"""
options = options or {}
if yaml is None:
raise ImproperlyConfigured("Usage of the YAML aspects requires yaml.")
return yaml.dump(self.to_simple(data, options))
def from_yaml(self, content):
"""
Given some YAML data, returns a Python dictionary of the decoded data.
"""
if yaml is None:
raise ImproperlyConfigured("Usage of the YAML aspects requires yaml.")
return yaml.load(content, Loader=TastypieLoader)
def to_plist(self, data, options=None):
"""
Given some Python data, produces binary plist output.
"""
options = options or {}
if biplist is None:
raise ImproperlyConfigured("Usage of the plist aspects requires biplist.")
return biplist.writePlistToString(self.to_simple(data, options))
def from_plist(self, content):
"""
Given some binary plist data, returns a Python dictionary of the decoded data.
"""
if biplist is None:
raise ImproperlyConfigured("Usage of the plist aspects requires biplist.")
if isinstance(content, six.text_type):
content = smart_bytes(content)
return biplist.readPlistFromString(content)
def to_html(self, data, options=None):
"""
Reserved for future usage.
The desire is to provide HTML output of a resource, making an API
available to a browser. This is on the TODO list but not currently
implemented.
"""
options = options or {}
return 'Sorry, not implemented yet. Please append "?format=json" to your URL.'
def from_html(self, content):
"""
Reserved for future usage.
The desire is to handle form-based (maybe Javascript?) input, making an
API available to a browser. This is on the TODO list but not currently
implemented.
"""
pass
def get_type_string(data):
"""
Translates a Python data type into a string format.
"""
data_type = type(data)
if data_type in six.integer_types:
return 'integer'
elif data_type == float:
return 'float'
elif data_type == bool:
return 'boolean'
elif data_type in (list, tuple):
return 'list'
elif data_type == dict:
return 'hash'
elif data is None:
return 'null'
elif isinstance(data, six.string_types):
return 'string'
| {
"content_hash": "cfd88f0221c9a2c679934cfcc6b2301c",
"timestamp": "",
"source": "github",
"line_count": 523,
"max_line_length": 186,
"avg_line_length": 38.07839388145315,
"alnum_prop": 0.5968365553602812,
"repo_name": "ericholscher/django-tastypie",
"id": "059f0916a9649bf366723d8965514826ae7558f5",
"size": "19915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tastypie/serializers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from osgeo import gdal
import sys
import numpy
src_file = sys.argv[1]
dst_file = sys.argv[2]
out_bands = 1
# Open source file
# dataset
src_ds = gdal.Open( src_file )
cols = src_ds.RasterXSize
rows = src_ds.RasterYSize
bands = src_ds.RasterCount
driver = src_ds.GetDriver()
driver_short= src_ds.GetDriver().ShortName
driver_long = src_ds.GetDriver().LongName
# band
src_band = src_ds.GetRasterBand(1)
# extent for other formats
#bandtype = gdal.GetDataTypeName(src_band.DataType)
# do the byteswap
data = src_band.ReadAsArray(0, 0, cols, rows)
dst_band = data.byteswap(True)
dst_band.astype('float32').tofile(dst_file)
| {
"content_hash": "3c2cf9fa4e5b76ef3c37bd47f8916abd",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 51,
"avg_line_length": 20.866666666666667,
"alnum_prop": 0.7348242811501597,
"repo_name": "BuddyVolly/OpenSARKit",
"id": "b85da6d92119f057490a260d00d9b4e42e1c0bd4",
"size": "646",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/ost_byteswap32.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9601"
},
{
"name": "Shell",
"bytes": "246841"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
sys.path.insert(0, os.path.join(project_root, 'src'))
def setup_django():
import django
from django.conf import settings
from tests.settings import INSTALLED_APPS
settings.configure(INSTALLED_APPS=INSTALLED_APPS)
django.setup()
setup_django()
import django_powerbank
autodoc_default_flags = ['members',]
autosummary_generate = True
modindex_common_prefix = ['django_powerbank.']
# html_domain_indices = ['py-modindex'] # ignore np-modindex
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinxcontrib.spelling',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Powerbank'
copyright = u"2017, Janusz Skonieczny"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = django_powerbank.__version__
# The full version, including alpha/beta/rc tags.
release = django_powerbank.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_package_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django_powerbank_doc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'django-powerbank.tex',
u'Django Powerbank Documentation',
u'Janusz Skonieczny', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-powerbank',
u'Django Powerbank Documentation',
[u'Janusz Skonieczny'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-powerbank',
u'Django Powerbank Documentation',
u'Janusz Skonieczny',
'django-powerbank',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "9cd8b4375da6481a1817fcc06a30a7fd",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 76,
"avg_line_length": 30.696428571428573,
"alnum_prop": 0.7052937754508435,
"repo_name": "wooyek/django-powerbank",
"id": "dcbbff0bdb92bb25656bf4c99c40a6a605bf0720",
"size": "9046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5257"
},
{
"name": "Makefile",
"bytes": "3746"
},
{
"name": "Python",
"bytes": "65553"
},
{
"name": "Shell",
"bytes": "1936"
}
],
"symlink_target": ""
} |
import matplotlib.pyplot as plt
class FigureStyle:
def __init__(self, dpi=300, width=None,
height=None, width_per_subplot=None,
height_per_subplot=None,
share_x_axis=False, share_y_axis=False):
"""
:param figure_id:
:param dpi:
:param figure_width:
:param figure_height:
:param figure_width_per_subplot:
:param figure_height_per_subplot:
:param horizontal_subplot_number:
:param vertical_subplot_number:
:param share_x_axis:
:param share_y_axis:
Controls sharing of properties among x (sharex) or y (sharey) axes:
True or 'all': x- or y-axis will be shared among all subplots.
False or 'none': each subplot x- or y-axis will be independent.
'row': each subplot row will share an x- or y-axis.
'col': each subplot column will share an x- or y-axis.
"""
self.width = width
self.height = height
self.width_per_subplot = width_per_subplot
self.height_per_subplot = height_per_subplot
self.share_x_axis = share_x_axis
self.share_y_axis = share_y_axis
self.dpi = dpi
def apply(self):
pass
default_figure_style = FigureStyle(dpi=300, width_per_subplot=None,
height_per_subplot=None,
share_x_axis=False, share_y_axis=False)
chromosome_figure_style = FigureStyle(dpi=300, width=15, height_per_subplot=None)
rainfall_figure_style = FigureStyle(dpi=300, width_per_subplot=None,
height_per_subplot=2,
share_x_axis=True, share_y_axis=True)
plot_figure_style = FigureStyle(dpi=300, width_per_subplot=2,
height_per_subplot=2,
share_x_axis=True, share_y_axis=True,
width=8, height=8)
one_plot_figure_style = FigureStyle(dpi=300, width_per_subplot=5,
height_per_subplot=5, width=5,
height=5,
share_x_axis=False, share_y_axis=False)
| {
"content_hash": "0f0b580b9cbe46fe6519bca16e20e9f9",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 81,
"avg_line_length": 36.885245901639344,
"alnum_prop": 0.5413333333333333,
"repo_name": "mahajrod/MACE",
"id": "cb7bfeb4595e436f580f6b4c657b4e1c8fa8190c",
"size": "2250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MACE/Visualization/Styles/Figure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "924706"
},
{
"name": "Python",
"bytes": "492761"
},
{
"name": "Shell",
"bytes": "1699"
},
{
"name": "Terra",
"bytes": "4344300"
}
],
"symlink_target": ""
} |
"""
Copyright 2013 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import os
RG_STORE = "Box"
#XXX -- Work in Progress
APP_KEY = None
APP_SECRET = None
#----------------------------
def get_access_token(app_key=APP_KEY, app_secret=APP_SECRET):
'''
Get a new access token for a client
'''
flow = dropbox.client.DropboxOAuth2FlowNoRedirect(app_key, app_secret)
# Have the user sign in and authorize this token
authorize_url = flow.start()
print '1. Go to: ' + authorize_url
print '2. Click "Allow" (you might have to log in first)'
print '3. Copy the authorization code.'
code = raw_input("Enter the authorization code here: ").strip()
# This will fail if the user enters an invalid authorization code
access_token, user_id = flow.finish(code)
return access_token
#----------------------------
def get_bucket(bucket_name):
from etc.config import AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY
aws_id = AWS_ACCESS_KEY_ID
aws_key = AWS_SECRET_ACCESS_KEY
#from boto.s3.connection import Location
try:
client = dropbox.client.DropboxClient(ACCESS_TOKEN)
if(DEBUG):
print 'Using account: ', client.account_info()
except:
if(DEBUG):
print "ERROR: Connection to Dropbox failed"
else:
if(DEBUG):
print "Connected to S3"
try:
bucket = conn.create_bucket(bucket_name)
except:
if(DEBUG):
print "ERROR: Could not create/fetch bucket " + bucket_name
else:
if(DEBUG):
print "Fetched/created bucket: " + bucket_name
return bucket
#-------------------------
def write_file(file_name, bucket_name):
if(DEBUG):
print "Writing File: " + file_name
try:
f = open(bucket_name + file_name)
response = client.put_file(bucket_name + file_name, f)
except Exception as e:
print e
return False
if(DEBUG):
print "Written file to dropbox: " + file_name
return True
#-------------------------
def read_file(file_name, bucket_name):
if(DEBUG):
print "Reading File: " + file_name
bucket = get_bucket(bucket_name)
from boto.s3.key import Key
k = Key(bucket)
k.key = file_name
file_name_with_path = FILE_ROOT + '/data/in/' + file_name
try:
k.get_contents_to_filename(file_name_with_path)
except:
if(DEBUG):
print "ERROR: reading"
return False
if(DEBUG):
print "Read data from s3 to file: " + file_name_with_path
return True
#-------------------------
def delete_file(file_name, bucket_name):
if(DEBUG):
print "Deleting File: " + file_name
bucket = get_bucket(bucket_name)
from boto.s3.key import Key
k = Key(bucket)
k.key = file_name
try:
k.delete()
except:
return False
if(DEBUG):
print "Deleted s3 file: " + bucket_name + '/' + file_name
return True
#-------------------------
def usage():
print 'Usage: {prog} [OPTIONS -w -r -d] <file_name> <bucket_name>'.format(prog=sys.argv[0])
return -1
#-------------------------
if __name__ == "__main__":
if len(sys.argv) != 4:
usage()
else:
option = sys.argv[1]
file_name = sys.argv[2]
bucket_name = sys.argv[3]
if(option == '-w'):
write_file(file_name,bucket_name)
elif(option == '-r'):
read_file(file_name,bucket_name)
elif(option == '-d'):
delete_file(file_name,bucket_name)
else:
usage()
| {
"content_hash": "8f2dedc3cf103590ffbc47c848225825",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 95,
"avg_line_length": 24.350877192982455,
"alnum_prop": 0.5840537944284342,
"repo_name": "iychoi/syndicate-core",
"id": "29db1bec48c43d6ed6fee4625410f5df375008ff",
"size": "4187",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/syndicate/rg/drivers/box/driver.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "119973"
},
{
"name": "C++",
"bytes": "1774442"
},
{
"name": "Makefile",
"bytes": "20511"
},
{
"name": "Python",
"bytes": "2257256"
},
{
"name": "Shell",
"bytes": "42197"
}
],
"symlink_target": ""
} |
import numpy as np
import matplotlib.pyplot as plt
from scipy.io import wavfile
rate_h,hstrain=wavfile.read(r'H1_Strain.wav','rb')
rate_l,lstrain=wavfile.read(r'L1_Strain.wav',"rb")
reftime,ref_H1=np.genfromtxt('wf_template.txt').transpose()
htime_interval=1/rate_h
ltime_interval=1/rate_l
htime_len=hstrain.shape[0]/rate_h
htime=np.arange(-htime_len/2,htime_len/2,htime_interval)
ltime_len=lstrain.shape[0]/rate_l
ltime=np.arange(-ltime_len/2,ltime_len/2,ltime_interval)
fig=plt.figure(figsize=(12,6))
plth=fig.add_subplot(221)
plth.plot(htime,hstrain,'r')
plth.set_xlabel('Time(seconds)')
plth.set_ylabel('H1 Strain')
plth.set_title('H1 Strain')
pltl=fig.add_subplot(222)
pltl.plot(ltime,lstrain,'g')
pltl.set_xlabel('Time(seconds)')
pltl.set_ylabel('L1 Strain')
pltl.set_title('L1 Strain')
pltref=fig.add_subplot(212)
pltref.plot(reftime,ref_H1)
pltref.set_xlabel('Time(Seconds)')
pltref.set_ylabel('Template Strain')
pltref.set_title('Template')
fig.tight_layout()
fig.savefig('Gravitational_Waves_Original.png')
plt.show()
plt.close('all') | {
"content_hash": "13cac1bdac1abe990fdbb4f82a2e531e",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 59,
"avg_line_length": 25.095238095238095,
"alnum_prop": 0.7495256166982922,
"repo_name": "ds17/reptiles_gh",
"id": "5c53bd09f099478702fcda68a95f1dba367bae86",
"size": "1054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mooc_ex/DataVisual/引力波/stress wave.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "532973"
},
{
"name": "Jupyter Notebook",
"bytes": "326056"
},
{
"name": "Python",
"bytes": "54636"
}
],
"symlink_target": ""
} |
"""Top-level display functions for displaying object in different formats."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
try:
from base64 import encodebytes as base64_encode
except ImportError:
from base64 import encodestring as base64_encode
import json
import mimetypes
import os
import struct
import sys
import warnings
from IPython.utils.py3compat import (string_types, cast_bytes_py2, cast_unicode,
unicode_type)
from IPython.testing.skipdoctest import skip_doctest
__all__ = ['display', 'display_pretty', 'display_html', 'display_markdown',
'display_svg', 'display_png', 'display_jpeg', 'display_latex', 'display_json',
'display_javascript', 'display_pdf', 'DisplayObject', 'TextDisplayObject',
'Pretty', 'HTML', 'Markdown', 'Math', 'Latex', 'SVG', 'JSON', 'Javascript',
'Image', 'clear_output', 'set_matplotlib_formats', 'set_matplotlib_close',
'publish_display_data']
#-----------------------------------------------------------------------------
# utility functions
#-----------------------------------------------------------------------------
def _safe_exists(path):
"""Check path, but don't let exceptions raise"""
try:
return os.path.exists(path)
except Exception:
return False
def _merge(d1, d2):
"""Like update, but merges sub-dicts instead of clobbering at the top level.
Updates d1 in-place
"""
if not isinstance(d2, dict) or not isinstance(d1, dict):
return d2
for key, value in d2.items():
d1[key] = _merge(d1.get(key), value)
return d1
def _display_mimetype(mimetype, objs, raw=False, metadata=None):
"""internal implementation of all display_foo methods
Parameters
----------
mimetype : str
The mimetype to be published (e.g. 'image/png')
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
if metadata:
metadata = {mimetype: metadata}
if raw:
# turn list of pngdata into list of { 'image/png': pngdata }
objs = [ {mimetype: obj} for obj in objs ]
display(*objs, raw=raw, metadata=metadata, include=[mimetype])
#-----------------------------------------------------------------------------
# Main functions
#-----------------------------------------------------------------------------
def publish_display_data(data, metadata=None, source=None):
"""Publish data and metadata to all frontends.
See the ``display_data`` message in the messaging documentation for
more details about this message type.
The following MIME types are currently implemented:
* text/plain
* text/html
* text/markdown
* text/latex
* application/json
* application/javascript
* image/png
* image/jpeg
* image/svg+xml
Parameters
----------
data : dict
A dictionary having keys that are valid MIME types (like
'text/plain' or 'image/svg+xml') and values that are the data for
that MIME type. The data itself must be a JSON'able data
structure. Minimally all data should have the 'text/plain' data,
which can be displayed by all frontends. If more than the plain
text is given, it is up to the frontend to decide which
representation to use.
metadata : dict
A dictionary for metadata related to the data. This can contain
arbitrary key, value pairs that frontends can use to interpret
the data. mime-type keys matching those in data can be used
to specify metadata about particular representations.
source : str, deprecated
Unused.
"""
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.instance().display_pub.publish(
data=data,
metadata=metadata,
)
def display(*objs, **kwargs):
"""Display a Python object in all frontends.
By default all representations will be computed and sent to the frontends.
Frontends can decide which representation is used and how.
Parameters
----------
objs : tuple of objects
The Python objects to display.
raw : bool, optional
Are the objects to be displayed already mimetype-keyed dicts of raw display data,
or Python objects that need to be formatted before display? [default: False]
include : list or tuple, optional
A list of format type strings (MIME types) to include in the
format data dict. If this is set *only* the format types included
in this list will be computed.
exclude : list or tuple, optional
A list of format type strings (MIME types) to exclude in the format
data dict. If this is set all format types will be computed,
except for those included in this argument.
metadata : dict, optional
A dictionary of metadata to associate with the output.
mime-type keys in this dictionary will be associated with the individual
representation formats, if they exist.
"""
raw = kwargs.get('raw', False)
include = kwargs.get('include')
exclude = kwargs.get('exclude')
metadata = kwargs.get('metadata')
from IPython.core.interactiveshell import InteractiveShell
if not raw:
format = InteractiveShell.instance().display_formatter.format
for obj in objs:
if raw:
publish_display_data(data=obj, metadata=metadata)
else:
format_dict, md_dict = format(obj, include=include, exclude=exclude)
if not format_dict:
# nothing to display (e.g. _ipython_display_ took over)
continue
if metadata:
# kwarg-specified metadata gets precedence
_merge(md_dict, metadata)
publish_display_data(data=format_dict, metadata=md_dict)
def display_pretty(*objs, **kwargs):
"""Display the pretty (default) representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw text data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/plain', objs, **kwargs)
def display_html(*objs, **kwargs):
"""Display the HTML representation of an object.
Note: If raw=False and the object does not have a HTML
representation, no HTML will be shown.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw HTML data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/html', objs, **kwargs)
def display_markdown(*objs, **kwargs):
"""Displays the Markdown representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw markdown data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/markdown', objs, **kwargs)
def display_svg(*objs, **kwargs):
"""Display the SVG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw svg data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/svg+xml', objs, **kwargs)
def display_png(*objs, **kwargs):
"""Display the PNG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw png data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/png', objs, **kwargs)
def display_jpeg(*objs, **kwargs):
"""Display the JPEG representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw JPEG data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('image/jpeg', objs, **kwargs)
def display_latex(*objs, **kwargs):
"""Display the LaTeX representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw latex data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('text/latex', objs, **kwargs)
def display_json(*objs, **kwargs):
"""Display the JSON representation of an object.
Note that not many frontends support displaying JSON.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw json data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/json', objs, **kwargs)
def display_javascript(*objs, **kwargs):
"""Display the Javascript representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/javascript', objs, **kwargs)
def display_pdf(*objs, **kwargs):
"""Display the PDF representation of an object.
Parameters
----------
objs : tuple of objects
The Python objects to display, or if raw=True raw javascript data to
display.
raw : bool
Are the data objects raw data or Python objects that need to be
formatted before display? [default: False]
metadata : dict (optional)
Metadata to be associated with the specific mimetype output.
"""
_display_mimetype('application/pdf', objs, **kwargs)
#-----------------------------------------------------------------------------
# Smart classes
#-----------------------------------------------------------------------------
class DisplayObject(object):
"""An object that wraps data to be displayed."""
_read_flags = 'r'
_show_mem_addr = False
def __init__(self, data=None, url=None, filename=None):
"""Create a display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. The MIME type of the data should match the
subclasses used, so the Png subclass should be used for 'image/png'
data. If the data is a URL, the data will first be downloaded
and then displayed. If
Parameters
----------
data : unicode, str or bytes
The raw data or a URL or file to load the data from
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
"""
if data is not None and isinstance(data, string_types):
if data.startswith('http') and url is None:
url = data
filename = None
data = None
elif _safe_exists(data) and filename is None:
url = None
filename = data
data = None
self.data = data
self.url = url
self.filename = None if filename is None else unicode_type(filename)
self.reload()
self._check_data()
def __repr__(self):
if not self._show_mem_addr:
cls = self.__class__
r = "<%s.%s object>" % (cls.__module__, cls.__name__)
else:
r = super(DisplayObject, self).__repr__()
return r
def _check_data(self):
"""Override in subclasses if there's something to check."""
pass
def reload(self):
"""Reload the raw data from file or URL."""
if self.filename is not None:
with open(self.filename, self._read_flags) as f:
self.data = f.read()
elif self.url is not None:
try:
try:
from urllib.request import urlopen # Py3
except ImportError:
from urllib2 import urlopen
response = urlopen(self.url)
self.data = response.read()
# extract encoding from header, if there is one:
encoding = None
for sub in response.headers['content-type'].split(';'):
sub = sub.strip()
if sub.startswith('charset'):
encoding = sub.split('=')[-1].strip()
break
# decode data, if an encoding was specified
if encoding:
self.data = self.data.decode(encoding, 'replace')
except:
self.data = None
class TextDisplayObject(DisplayObject):
"""Validate that display data is text"""
def _check_data(self):
if self.data is not None and not isinstance(self.data, string_types):
raise TypeError("%s expects text, not %r" % (self.__class__.__name__, self.data))
class Pretty(TextDisplayObject):
def _repr_pretty_(self):
return self.data
class HTML(TextDisplayObject):
def _repr_html_(self):
return self.data
def __html__(self):
"""
This method exists to inform other HTML-using modules (e.g. Markupsafe,
htmltag, etc) that this object is HTML and does not need things like
special characters (<>&) escaped.
"""
return self._repr_html_()
class Markdown(TextDisplayObject):
def _repr_markdown_(self):
return self.data
class Math(TextDisplayObject):
def _repr_latex_(self):
s = self.data.strip('$')
return "$$%s$$" % s
class Latex(TextDisplayObject):
def _repr_latex_(self):
return self.data
class SVG(DisplayObject):
# wrap data in a property, which extracts the <svg> tag, discarding
# document headers
_data = None
@property
def data(self):
return self._data
@data.setter
def data(self, svg):
if svg is None:
self._data = None
return
# parse into dom object
from xml.dom import minidom
svg = cast_bytes_py2(svg)
x = minidom.parseString(svg)
# get svg tag (should be 1)
found_svg = x.getElementsByTagName('svg')
if found_svg:
svg = found_svg[0].toxml()
else:
# fallback on the input, trust the user
# but this is probably an error.
pass
svg = cast_unicode(svg)
self._data = svg
def _repr_svg_(self):
return self.data
class JSON(DisplayObject):
"""JSON expects a JSON-able dict or list
not an already-serialized JSON string.
Scalar types (None, number, string) are not allowed, only dict or list containers.
"""
# wrap data in a property, which warns about passing already-serialized JSON
_data = None
def _check_data(self):
if self.data is not None and not isinstance(self.data, (dict, list)):
raise TypeError("%s expects JSONable dict or list, not %r" % (self.__class__.__name__, self.data))
@property
def data(self):
return self._data
@data.setter
def data(self, data):
if isinstance(data, string_types):
warnings.warn("JSON expects JSONable dict or list, not JSON strings")
data = json.loads(data)
self._data = data
def _repr_json_(self):
return self.data
css_t = """$("head").append($("<link/>").attr({
rel: "stylesheet",
type: "text/css",
href: "%s"
}));
"""
lib_t1 = """$.getScript("%s", function () {
"""
lib_t2 = """});
"""
class Javascript(TextDisplayObject):
def __init__(self, data=None, url=None, filename=None, lib=None, css=None):
"""Create a Javascript display object given raw data.
When this object is returned by an expression or passed to the
display function, it will result in the data being displayed
in the frontend. If the data is a URL, the data will first be
downloaded and then displayed.
In the Notebook, the containing element will be available as `element`,
and jQuery will be available. Content appended to `element` will be
visible in the output area.
Parameters
----------
data : unicode, str or bytes
The Javascript source code or a URL to download it from.
url : unicode
A URL to download the data from.
filename : unicode
Path to a local file to load the data from.
lib : list or str
A sequence of Javascript library URLs to load asynchronously before
running the source code. The full URLs of the libraries should
be given. A single Javascript library URL can also be given as a
string.
css: : list or str
A sequence of css files to load before running the source code.
The full URLs of the css files should be given. A single css URL
can also be given as a string.
"""
if isinstance(lib, string_types):
lib = [lib]
elif lib is None:
lib = []
if isinstance(css, string_types):
css = [css]
elif css is None:
css = []
if not isinstance(lib, (list,tuple)):
raise TypeError('expected sequence, got: %r' % lib)
if not isinstance(css, (list,tuple)):
raise TypeError('expected sequence, got: %r' % css)
self.lib = lib
self.css = css
super(Javascript, self).__init__(data=data, url=url, filename=filename)
def _repr_javascript_(self):
r = ''
for c in self.css:
r += css_t % c
for l in self.lib:
r += lib_t1 % l
r += self.data
r += lib_t2*len(self.lib)
return r
# constants for identifying png/jpeg data
_PNG = b'\x89PNG\r\n\x1a\n'
_JPEG = b'\xff\xd8'
def _pngxy(data):
"""read the (width, height) from a PNG header"""
ihdr = data.index(b'IHDR')
# next 8 bytes are width/height
w4h4 = data[ihdr+4:ihdr+12]
return struct.unpack('>ii', w4h4)
def _jpegxy(data):
"""read the (width, height) from a JPEG header"""
# adapted from http://www.64lines.com/jpeg-width-height
idx = 4
while True:
block_size = struct.unpack('>H', data[idx:idx+2])[0]
idx = idx + block_size
if data[idx:idx+2] == b'\xFF\xC0':
# found Start of Frame
iSOF = idx
break
else:
# read another block
idx += 2
h, w = struct.unpack('>HH', data[iSOF+5:iSOF+9])
return w, h
class Image(DisplayObject):
_read_flags = 'rb'
_FMT_JPEG = u'jpeg'
_FMT_PNG = u'png'
_ACCEPTABLE_EMBEDDINGS = [_FMT_JPEG, _FMT_PNG]
def __init__(self, data=None, url=None, filename=None, format=None,
embed=None, width=None, height=None, retina=False,
unconfined=False, metadata=None):
"""Create a PNG/JPEG image object given raw data.
When this object is returned by an input cell or passed to the
display function, it will result in the image being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw image data or a URL or filename to load the data from.
This always results in embedded image data.
url : unicode
A URL to download the data from. If you specify `url=`,
the image data will not be embedded unless you also specify `embed=True`.
filename : unicode
Path to a local file to load the data from.
Images from a file are always embedded.
format : unicode
The format of the image data (png/jpeg/jpg). If a filename or URL is given
for format will be inferred from the filename extension.
embed : bool
Should the image data be embedded using a data URI (True) or be
loaded using an <img> tag. Set this to True if you want the image
to be viewable later with no internet connection in the notebook.
Default is `True`, unless the keyword argument `url` is set, then
default value is `False`.
Note that QtConsole is not able to display images if `embed` is set to `False`
width : int
Width in pixels to which to constrain the image in html
height : int
Height in pixels to which to constrain the image in html
retina : bool
Automatically set the width and height to half of the measured
width and height.
This only works for embedded images because it reads the width/height
from image data.
For non-embedded images, you can just set the desired display width
and height directly.
unconfined: bool
Set unconfined=True to disable max-width confinement of the image.
metadata: dict
Specify extra metadata to attach to the image.
Examples
--------
# embedded image data, works in qtconsole and notebook
# when passed positionally, the first arg can be any of raw image data,
# a URL, or a filename from which to load image data.
# The result is always embedding image data for inline images.
Image('http://www.google.fr/images/srpr/logo3w.png')
Image('/path/to/image.jpg')
Image(b'RAW_PNG_DATA...')
# Specifying Image(url=...) does not embed the image data,
# it only generates `<img>` tag with a link to the source.
# This will not work in the qtconsole or offline.
Image(url='http://www.google.fr/images/srpr/logo3w.png')
"""
if filename is not None:
ext = self._find_ext(filename)
elif url is not None:
ext = self._find_ext(url)
elif data is None:
raise ValueError("No image data found. Expecting filename, url, or data.")
elif isinstance(data, string_types) and (
data.startswith('http') or _safe_exists(data)
):
ext = self._find_ext(data)
else:
ext = None
if format is None:
if ext is not None:
if ext == u'jpg' or ext == u'jpeg':
format = self._FMT_JPEG
if ext == u'png':
format = self._FMT_PNG
else:
format = ext.lower()
elif isinstance(data, bytes):
# infer image type from image data header,
# only if format has not been specified.
if data[:2] == _JPEG:
format = self._FMT_JPEG
# failed to detect format, default png
if format is None:
format = 'png'
if format.lower() == 'jpg':
# jpg->jpeg
format = self._FMT_JPEG
self.format = unicode_type(format).lower()
self.embed = embed if embed is not None else (url is None)
if self.embed and self.format not in self._ACCEPTABLE_EMBEDDINGS:
raise ValueError("Cannot embed the '%s' image format" % (self.format))
self.width = width
self.height = height
self.retina = retina
self.unconfined = unconfined
self.metadata = metadata
super(Image, self).__init__(data=data, url=url, filename=filename)
if retina:
self._retina_shape()
def _retina_shape(self):
"""load pixel-doubled width and height from image data"""
if not self.embed:
return
if self.format == 'png':
w, h = _pngxy(self.data)
elif self.format == 'jpeg':
w, h = _jpegxy(self.data)
else:
# retina only supports png
return
self.width = w // 2
self.height = h // 2
def reload(self):
"""Reload the raw data from file or URL."""
if self.embed:
super(Image,self).reload()
if self.retina:
self._retina_shape()
def _repr_html_(self):
if not self.embed:
width = height = klass = ''
if self.width:
width = ' width="%d"' % self.width
if self.height:
height = ' height="%d"' % self.height
if self.unconfined:
klass = ' class="unconfined"'
return u'<img src="{url}"{width}{height}{klass}/>'.format(
url=self.url,
width=width,
height=height,
klass=klass,
)
def _data_and_metadata(self):
"""shortcut for returning metadata with shape information, if defined"""
md = {}
if self.width:
md['width'] = self.width
if self.height:
md['height'] = self.height
if self.unconfined:
md['unconfined'] = self.unconfined
if self.metadata:
md.update(self.metadata)
if md:
return self.data, md
else:
return self.data
def _repr_png_(self):
if self.embed and self.format == u'png':
return self._data_and_metadata()
def _repr_jpeg_(self):
if self.embed and (self.format == u'jpeg' or self.format == u'jpg'):
return self._data_and_metadata()
def _find_ext(self, s):
return unicode_type(s.split('.')[-1].lower())
class Video(DisplayObject):
def __init__(self, data=None, url=None, filename=None, embed=False, mimetype=None):
"""Create a video object given raw data or an URL.
When this object is returned by an input cell or passed to the
display function, it will result in the video being displayed
in the frontend.
Parameters
----------
data : unicode, str or bytes
The raw video data or a URL or filename to load the data from.
Raw data will require passing `embed=True`.
url : unicode
A URL for the video. If you specify `url=`,
the image data will not be embedded.
filename : unicode
Path to a local file containing the video.
Will be interpreted as a local URL unless `embed=True`.
embed : bool
Should the video be embedded using a data URI (True) or be
loaded using a <video> tag (False).
Since videos are large, embedding them should be avoided, if possible.
You must confirm embedding as your intention by passing `embed=True`.
Local files can be displayed with URLs without embedding the content, via::
Video('./video.mp4')
mimetype: unicode
Specify the mimetype for embedded videos.
Default will be guessed from file extension, if available.
Examples
--------
Video('https://archive.org/download/Sita_Sings_the_Blues/Sita_Sings_the_Blues_small.mp4')
Video('path/to/video.mp4')
Video('path/to/video.mp4', embed=True)
Video(b'raw-videodata', embed=True)
"""
if url is None and isinstance(data, string_types) and data.startswith(('http:', 'https:')):
url = data
data = None
elif os.path.exists(data):
filename = data
data = None
if data and not embed:
msg = ''.join([
"To embed videos, you must pass embed=True ",
"(this may make your notebook files huge)\n",
"Consider passing Video(url='...')",
])
raise ValueError(msg)
self.mimetype = mimetype
self.embed = embed
super(Video, self).__init__(data=data, url=url, filename=filename)
def _repr_html_(self):
# External URLs and potentially local files are not embedded into the
# notebook output.
if not self.embed:
url = self.url if self.url is not None else self.filename
output = """<video src="{0}" controls>
Your browser does not support the <code>video</code> element.
</video>""".format(url)
return output
# Embedded videos are base64-encoded.
mimetype = self.mimetype
if self.filename is not None:
if not mimetype:
mimetype, _ = mimetypes.guess_type(self.filename)
with open(self.filename, 'rb') as f:
video = f.read()
else:
video = self.data
if isinstance(video, unicode_type):
# unicode input is already b64-encoded
b64_video = video
else:
b64_video = base64_encode(video).decode('ascii').rstrip()
output = """<video controls>
<source src="data:{0};base64,{1}" type="{0}">
Your browser does not support the video tag.
</video>""".format(mimetype, b64_video)
return output
def reload(self):
# TODO
pass
def _repr_png_(self):
# TODO
pass
def _repr_jpeg_(self):
# TODO
pass
def clear_output(wait=False):
"""Clear the output of the current cell receiving output.
Parameters
----------
wait : bool [default: false]
Wait to clear the output until new output is available to replace it."""
from IPython.core.interactiveshell import InteractiveShell
if InteractiveShell.initialized():
InteractiveShell.instance().display_pub.clear_output(wait)
else:
print('\033[2K\r', end='')
sys.stdout.flush()
print('\033[2K\r', end='')
sys.stderr.flush()
@skip_doctest
def set_matplotlib_formats(*formats, **kwargs):
"""Select figure formats for the inline backend. Optionally pass quality for JPEG.
For example, this enables PNG and JPEG output with a JPEG quality of 90%::
In [1]: set_matplotlib_formats('png', 'jpeg', quality=90)
To set this in your config files use the following::
c.InlineBackend.figure_formats = {'png', 'jpeg'}
c.InlineBackend.print_figure_kwargs.update({'quality' : 90})
Parameters
----------
*formats : strs
One or more figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs :
Keyword args will be relayed to ``figure.canvas.print_figure``.
"""
from IPython.core.interactiveshell import InteractiveShell
from IPython.core.pylabtools import select_figure_formats
# build kwargs, starting with InlineBackend config
kw = {}
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
kw.update(cfg.print_figure_kwargs)
kw.update(**kwargs)
shell = InteractiveShell.instance()
select_figure_formats(shell, formats, **kw)
@skip_doctest
def set_matplotlib_close(close=True):
"""Set whether the inline backend closes all figures automatically or not.
By default, the inline backend used in the IPython Notebook will close all
matplotlib figures automatically after each cell is run. This means that
plots in different cells won't interfere. Sometimes, you may want to make
a plot in one cell and then refine it in later cells. This can be accomplished
by::
In [1]: set_matplotlib_close(False)
To set this in your config files use the following::
c.InlineBackend.close_figures = False
Parameters
----------
close : bool
Should all matplotlib figures be automatically closed after each cell is
run?
"""
from ipykernel.pylab.config import InlineBackend
cfg = InlineBackend.instance()
cfg.close_figures = close
| {
"content_hash": "08931db8977875118b7ee9307335e3c0",
"timestamp": "",
"source": "github",
"line_count": 1003,
"max_line_length": 110,
"avg_line_length": 33.93818544366899,
"alnum_prop": 0.5918331374853114,
"repo_name": "lancezlin/ml_template_py",
"id": "4a943e4dbb4eca8d0724e54589e9c0b4751b8bdc",
"size": "34064",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/IPython/core/display.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "326933"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "7806"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "596861"
},
{
"name": "JavaScript",
"bytes": "4020233"
},
{
"name": "Jupyter Notebook",
"bytes": "517957"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "41191064"
},
{
"name": "Shell",
"bytes": "3373"
},
{
"name": "Smarty",
"bytes": "26298"
}
],
"symlink_target": ""
} |
"""Service calling related helpers."""
import asyncio
from functools import partial, wraps
import logging
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
)
import voluptuous as vol
from homeassistant.auth.permissions.const import CAT_ENTITIES, POLICY_CONTROL
from homeassistant.const import (
ATTR_AREA_ID,
ATTR_ENTITY_ID,
CONF_SERVICE,
CONF_SERVICE_TEMPLATE,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
)
import homeassistant.core as ha
from homeassistant.exceptions import (
HomeAssistantError,
TemplateError,
Unauthorized,
UnknownUser,
)
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, TemplateVarsType
from homeassistant.loader import async_get_integration, bind_hass
from homeassistant.util.yaml import load_yaml
from homeassistant.util.yaml.loader import JSON_TYPE
if TYPE_CHECKING:
from homeassistant.helpers.entity import Entity # noqa
# mypy: allow-untyped-defs, no-check-untyped-defs
CONF_SERVICE_ENTITY_ID = "entity_id"
CONF_SERVICE_DATA = "data"
CONF_SERVICE_DATA_TEMPLATE = "data_template"
_LOGGER = logging.getLogger(__name__)
SERVICE_DESCRIPTION_CACHE = "service_description_cache"
@bind_hass
def call_from_config(
hass: HomeAssistantType,
config: ConfigType,
blocking: bool = False,
variables: TemplateVarsType = None,
validate_config: bool = True,
) -> None:
"""Call a service based on a config hash."""
asyncio.run_coroutine_threadsafe(
async_call_from_config(hass, config, blocking, variables, validate_config),
hass.loop,
).result()
@bind_hass
async def async_call_from_config(
hass: HomeAssistantType,
config: ConfigType,
blocking: bool = False,
variables: TemplateVarsType = None,
validate_config: bool = True,
context: Optional[ha.Context] = None,
) -> None:
"""Call a service based on a config hash."""
try:
parms = async_prepare_call_from_config(hass, config, variables, validate_config)
except HomeAssistantError as ex:
if blocking:
raise
_LOGGER.error(ex)
else:
await hass.services.async_call(*parms, blocking, context)
@ha.callback
@bind_hass
def async_prepare_call_from_config(
hass: HomeAssistantType,
config: ConfigType,
variables: TemplateVarsType = None,
validate_config: bool = False,
) -> Tuple[str, str, Dict[str, Any]]:
"""Prepare to call a service based on a config hash."""
if validate_config:
try:
config = cv.SERVICE_SCHEMA(config)
except vol.Invalid as ex:
raise HomeAssistantError(
f"Invalid config for calling service: {ex}"
) from ex
if CONF_SERVICE in config:
domain_service = config[CONF_SERVICE]
else:
try:
config[CONF_SERVICE_TEMPLATE].hass = hass
domain_service = config[CONF_SERVICE_TEMPLATE].async_render(variables)
domain_service = cv.service(domain_service)
except TemplateError as ex:
raise HomeAssistantError(
f"Error rendering service name template: {ex}"
) from ex
except vol.Invalid as ex:
raise HomeAssistantError(
f"Template rendered invalid service: {domain_service}"
) from ex
domain, service = domain_service.split(".", 1)
service_data = dict(config.get(CONF_SERVICE_DATA, {}))
if CONF_SERVICE_DATA_TEMPLATE in config:
try:
template.attach(hass, config[CONF_SERVICE_DATA_TEMPLATE])
service_data.update(
template.render_complex(config[CONF_SERVICE_DATA_TEMPLATE], variables)
)
except TemplateError as ex:
raise HomeAssistantError(f"Error rendering data template: {ex}") from ex
if CONF_SERVICE_ENTITY_ID in config:
service_data[ATTR_ENTITY_ID] = config[CONF_SERVICE_ENTITY_ID]
return domain, service, service_data
@bind_hass
def extract_entity_ids(
hass: HomeAssistantType, service_call: ha.ServiceCall, expand_group: bool = True
) -> Set[str]:
"""Extract a list of entity ids from a service call.
Will convert group entity ids to the entity ids it represents.
"""
return asyncio.run_coroutine_threadsafe(
async_extract_entity_ids(hass, service_call, expand_group), hass.loop
).result()
@bind_hass
async def async_extract_entities(
hass: HomeAssistantType,
entities: Iterable["Entity"],
service_call: ha.ServiceCall,
expand_group: bool = True,
) -> List["Entity"]:
"""Extract a list of entity objects from a service call.
Will convert group entity ids to the entity ids it represents.
"""
data_ent_id = service_call.data.get(ATTR_ENTITY_ID)
if data_ent_id == ENTITY_MATCH_ALL:
return [entity for entity in entities if entity.available]
entity_ids = await async_extract_entity_ids(hass, service_call, expand_group)
found = []
for entity in entities:
if entity.entity_id not in entity_ids:
continue
entity_ids.remove(entity.entity_id)
if not entity.available:
continue
found.append(entity)
if entity_ids:
_LOGGER.warning(
"Unable to find referenced entities %s", ", ".join(sorted(entity_ids))
)
return found
@bind_hass
async def async_extract_entity_ids(
hass: HomeAssistantType, service_call: ha.ServiceCall, expand_group: bool = True
) -> Set[str]:
"""Extract a list of entity ids from a service call.
Will convert group entity ids to the entity ids it represents.
"""
entity_ids = service_call.data.get(ATTR_ENTITY_ID)
area_ids = service_call.data.get(ATTR_AREA_ID)
extracted: Set[str] = set()
if entity_ids in (None, ENTITY_MATCH_NONE) and area_ids in (
None,
ENTITY_MATCH_NONE,
):
return extracted
if entity_ids and entity_ids != ENTITY_MATCH_NONE:
# Entity ID attr can be a list or a string
if isinstance(entity_ids, str):
entity_ids = [entity_ids]
if expand_group:
entity_ids = hass.components.group.expand_entity_ids(entity_ids)
extracted.update(entity_ids)
if area_ids and area_ids != ENTITY_MATCH_NONE:
if isinstance(area_ids, str):
area_ids = [area_ids]
dev_reg, ent_reg = await asyncio.gather(
hass.helpers.device_registry.async_get_registry(),
hass.helpers.entity_registry.async_get_registry(),
)
devices = [
device
for area_id in area_ids
for device in hass.helpers.device_registry.async_entries_for_area(
dev_reg, area_id
)
]
extracted.update(
entry.entity_id
for device in devices
for entry in hass.helpers.entity_registry.async_entries_for_device(
ent_reg, device.id
)
)
return extracted
async def _load_services_file(hass: HomeAssistantType, domain: str) -> JSON_TYPE:
"""Load services file for an integration."""
integration = await async_get_integration(hass, domain)
try:
return await hass.async_add_executor_job(
load_yaml, str(integration.file_path / "services.yaml")
)
except FileNotFoundError:
_LOGGER.warning("Unable to find services.yaml for the %s integration", domain)
return {}
except HomeAssistantError:
_LOGGER.warning("Unable to parse services.yaml for the %s integration", domain)
return {}
@bind_hass
async def async_get_all_descriptions(
hass: HomeAssistantType,
) -> Dict[str, Dict[str, Any]]:
"""Return descriptions (i.e. user documentation) for all service calls."""
descriptions_cache = hass.data.setdefault(SERVICE_DESCRIPTION_CACHE, {})
format_cache_key = "{}.{}".format
services = hass.services.async_services()
# See if there are new services not seen before.
# Any service that we saw before already has an entry in description_cache.
missing = set()
for domain in services:
for service in services[domain]:
if format_cache_key(domain, service) not in descriptions_cache:
missing.add(domain)
break
# Files we loaded for missing descriptions
loaded = {}
if missing:
contents = await asyncio.gather(
*(_load_services_file(hass, domain) for domain in missing)
)
for domain, content in zip(missing, contents):
loaded[domain] = content
# Build response
descriptions: Dict[str, Dict[str, Any]] = {}
for domain in services:
descriptions[domain] = {}
for service in services[domain]:
cache_key = format_cache_key(domain, service)
description = descriptions_cache.get(cache_key)
# Cache missing descriptions
if description is None:
domain_yaml = loaded[domain]
yaml_description = domain_yaml.get(service, {})
# Don't warn for missing services, because it triggers false
# positives for things like scripts, that register as a service
description = descriptions_cache[cache_key] = {
"description": yaml_description.get("description", ""),
"fields": yaml_description.get("fields", {}),
}
descriptions[domain][service] = description
return descriptions
@ha.callback
@bind_hass
def async_set_service_schema(
hass: HomeAssistantType, domain: str, service: str, schema: Dict[str, Any]
) -> None:
"""Register a description for a service."""
hass.data.setdefault(SERVICE_DESCRIPTION_CACHE, {})
description = {
"description": schema.get("description") or "",
"fields": schema.get("fields") or {},
}
hass.data[SERVICE_DESCRIPTION_CACHE][f"{domain}.{service}"] = description
@bind_hass
async def entity_service_call(hass, platforms, func, call, required_features=None):
"""Handle an entity service call.
Calls all platforms simultaneously.
"""
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(context=call.context)
entity_perms = user.permissions.check_entity
else:
entity_perms = None
target_all_entities = call.data.get(ATTR_ENTITY_ID) == ENTITY_MATCH_ALL
if not target_all_entities:
# A set of entities we're trying to target.
entity_ids = await async_extract_entity_ids(hass, call, True)
# If the service function is a string, we'll pass it the service call data
if isinstance(func, str):
data = {
key: val
for key, val in call.data.items()
if key not in cv.ENTITY_SERVICE_FIELDS
}
# If the service function is not a string, we pass the service call
else:
data = call
# Check the permissions
# A list with entities to call the service on.
entity_candidates = []
if entity_perms is None:
for platform in platforms:
if target_all_entities:
entity_candidates.extend(platform.entities.values())
else:
entity_candidates.extend(
[
entity
for entity in platform.entities.values()
if entity.entity_id in entity_ids
]
)
elif target_all_entities:
# If we target all entities, we will select all entities the user
# is allowed to control.
for platform in platforms:
entity_candidates.extend(
[
entity
for entity in platform.entities.values()
if entity_perms(entity.entity_id, POLICY_CONTROL)
]
)
else:
for platform in platforms:
platform_entities = []
for entity in platform.entities.values():
if entity.entity_id not in entity_ids:
continue
if not entity_perms(entity.entity_id, POLICY_CONTROL):
raise Unauthorized(
context=call.context,
entity_id=entity.entity_id,
permission=POLICY_CONTROL,
)
platform_entities.append(entity)
entity_candidates.extend(platform_entities)
if not target_all_entities:
for entity in entity_candidates:
entity_ids.remove(entity.entity_id)
if entity_ids:
_LOGGER.warning(
"Unable to find referenced entities %s", ", ".join(sorted(entity_ids))
)
entities = []
for entity in entity_candidates:
if not entity.available:
continue
# Skip entities that don't have the required feature.
if required_features is not None and not any(
entity.supported_features & feature_set == feature_set
for feature_set in required_features
):
continue
entities.append(entity)
if not entities:
return
done, pending = await asyncio.wait(
[
entity.async_request_call(
_handle_entity_call(hass, entity, func, data, call.context)
)
for entity in entities
]
)
assert not pending
for future in done:
future.result() # pop exception if have
tasks = []
for entity in entities:
if not entity.should_poll:
continue
# Context expires if the turn on commands took a long time.
# Set context again so it's there when we update
entity.async_set_context(call.context)
tasks.append(entity.async_update_ha_state(True))
if tasks:
done, pending = await asyncio.wait(tasks)
assert not pending
for future in done:
future.result() # pop exception if have
async def _handle_entity_call(hass, entity, func, data, context):
"""Handle calling service method."""
entity.async_set_context(context)
if isinstance(func, str):
result = hass.async_add_job(partial(getattr(entity, func), **data))
else:
result = hass.async_add_job(func, entity, data)
# Guard because callback functions do not return a task when passed to async_add_job.
if result is not None:
await result
if asyncio.iscoroutine(result):
_LOGGER.error(
"Service %s for %s incorrectly returns a coroutine object. Await result instead in service handler. Report bug to integration author.",
func,
entity.entity_id,
)
await result
@bind_hass
@ha.callback
def async_register_admin_service(
hass: HomeAssistantType,
domain: str,
service: str,
service_func: Callable,
schema: vol.Schema = vol.Schema({}, extra=vol.PREVENT_EXTRA),
) -> None:
"""Register a service that requires admin access."""
@wraps(service_func)
async def admin_handler(call: ha.ServiceCall) -> None:
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(context=call.context)
if not user.is_admin:
raise Unauthorized(context=call.context)
result = hass.async_add_job(service_func, call)
if result is not None:
await result
hass.services.async_register(domain, service, admin_handler, schema)
@bind_hass
@ha.callback
def verify_domain_control(hass: HomeAssistantType, domain: str) -> Callable:
"""Ensure permission to access any entity under domain in service call."""
def decorator(service_handler: Callable) -> Callable:
"""Decorate."""
if not asyncio.iscoroutinefunction(service_handler):
raise HomeAssistantError("Can only decorate async functions.")
async def check_permissions(call):
"""Check user permission and raise before call if unauthorized."""
if not call.context.user_id:
return await service_handler(call)
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id,
)
reg = await hass.helpers.entity_registry.async_get_registry()
authorized = False
for entity in reg.entities.values():
if entity.platform != domain:
continue
if user.permissions.check_entity(entity.entity_id, POLICY_CONTROL):
authorized = True
break
if not authorized:
raise Unauthorized(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id,
perm_category=CAT_ENTITIES,
)
return await service_handler(call)
return check_permissions
return decorator
| {
"content_hash": "988143349e4a22670ac420f77a599322",
"timestamp": "",
"source": "github",
"line_count": 571,
"max_line_length": 147,
"avg_line_length": 30.833625218914186,
"alnum_prop": 0.6135976371691468,
"repo_name": "robbiet480/home-assistant",
"id": "2c4f02990bf6b7952c7423c0e27d1a4d2c6d620b",
"size": "17606",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/helpers/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18837456"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
""" Git Feed Views reports tests """
from django.test import TestCase
from django.test import Client
from django.conf import settings
from django.utils import timezone
from app.logic.gitrepo.models.GitProjectModel import GitProjectEntry
from app.logic.gitrepo.models.GitUserModel import GitUserEntry
from app.logic.gitrepo.models.GitCommitModel import GitCommitEntry
from app.logic.gitrepo.models.GitParentModel import GitParentEntry
from app.logic.gitrepo.models.GitDiffModel import GitDiffEntry
from app.logic.gitrepo.models.GitBranchModel import GitBranchEntry
from app.logic.gitrepo.models.GitBranchTrailModel import GitBranchTrailEntry
from app.logic.gitfeeder.helper import FeederTestHelper
from app.logic.commandrepo.models.CommandGroupModel import CommandGroupEntry
from app.logic.commandrepo.models.CommandSetModel import CommandSetEntry
from app.logic.commandrepo.models.CommandModel import CommandEntry
from app.logic.commandrepo.models.CommandResultModel import CommandResultEntry
from app.logic.httpcommon import res
from datetime import timedelta
import json
import os
import hashlib
import shutil
import datetime
class GitFeedViewsReportsTestCase(TestCase):
def setUp(self):
self.client = Client()
self.git_project1 = GitProjectEntry.objects.create(url='http://test/')
self.git_user1 = GitUserEntry.objects.create(
project=self.git_project1,
name='user1',
email='user1@test.com'
)
self.commit_time = str(timezone.now().isoformat())
self.commit1 = FeederTestHelper.create_commit(1, [], 'user1', 'user1@test.com', self.commit_time, self.commit_time)
self.commit2 = FeederTestHelper.create_commit(2, [], 'user1', 'user1@test.com', self.commit_time, self.commit_time)
self.branch1 = FeederTestHelper.create_branch('master', 1, 'master', 1, 1, [1], 'merge-target-content')
def tearDown(self):
pass
def test_report_ok(self):
reports = []
report = {}
report['commands'] = []
command = {}
command['command'] = 'command1 arg1 arg2'
command['result'] = {}
command['result']['error'] = 'error-text-1'
command['result']['out'] = 'out-text-1'
command['result']['status'] = 0
command['result']['start_time'] = datetime.datetime.utcnow().isoformat()
command['result']['finish_time'] = datetime.datetime.utcnow().isoformat()
report['commands'].append(command)
reports.append(report)
obj = {}
obj['reports'] = reports
resp = self.client.post(
'/main/feed/report/project/{0}/'.format(self.git_project1.id),
data = json.dumps(obj),
content_type='application/json')
res.check_cross_origin_headers(self, resp)
resp_obj = json.loads(resp.content)
self.assertEqual(200, resp_obj['status'])
self.assertEqual('Reports added correctly', resp_obj['message'])
reports_entry = CommandGroupEntry.objects.all()
self.assertEqual(1, len(reports_entry))
sets_entry = CommandSetEntry.objects.all()
self.assertEqual(1, len(sets_entry))
self.assertEqual(reports_entry[0], sets_entry[0].group)
comm_entry = CommandEntry.objects.all().first()
self.assertEqual(u'command1 arg1 arg2', comm_entry.command)
comm_result_entry = CommandResultEntry.objects.all().first()
self.assertEqual('error-text-1', comm_result_entry.error)
self.assertEqual('out-text-1', comm_result_entry.out)
self.assertEqual(0, comm_result_entry.status)
self.assertEqual(sets_entry[0], comm_entry.command_set)
def test_many_reports(self):
report_1 = FeederTestHelper.get_default_report(1)
report_2 = FeederTestHelper.get_default_report(1)
obj = {}
obj['reports'] = report_1
resp = self.client.post(
'/main/feed/report/project/{0}/'.format(self.git_project1.id),
data = json.dumps(obj),
content_type='application/json')
res.check_cross_origin_headers(self, resp)
resp_obj = json.loads(resp.content)
self.assertEqual(200, resp_obj['status'])
self.assertEqual('Reports added correctly', resp_obj['message'])
obj['reports'] = report_2
resp = self.client.post(
'/main/feed/report/project/{0}/'.format(self.git_project1.id),
data = json.dumps(obj),
content_type='application/json')
res.check_cross_origin_headers(self, resp)
resp_obj = json.loads(resp.content)
self.assertEqual(200, resp_obj['status'])
self.assertEqual('Reports added correctly', resp_obj['message'])
reports_entry = CommandGroupEntry.objects.all()
self.assertEqual(2, len(reports_entry))
sets_entry = CommandSetEntry.objects.all()
self.assertEqual(2, len(sets_entry))
commands_entry = CommandEntry.objects.all()
self.assertEqual(2, len(commands_entry))
| {
"content_hash": "d4518c57762757f2c2100b9f1e5527a4",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 123,
"avg_line_length": 36.85401459854015,
"alnum_prop": 0.6694394929689047,
"repo_name": "imvu/bluesteel",
"id": "82c2df4d6fd2c5d8d117c3e6884b613fc90a26fa",
"size": "5049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/presenter/tests/tests_views_json_ViewGitDiff_reports.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16828"
},
{
"name": "HTML",
"bytes": "119014"
},
{
"name": "JavaScript",
"bytes": "36015"
},
{
"name": "Python",
"bytes": "1220104"
}
],
"symlink_target": ""
} |
import errno
import json
import os
import random
import shlex
import shutil
import stat
import subprocess
import sys
import tempfile
import threading
import unittest
import uuid
from collections import Counter, OrderedDict
from contextlib import contextmanager
import bottle
import requests
import six
import time
from mock import Mock
from six import StringIO
from six.moves.urllib.parse import quote, urlsplit, urlunsplit
from webtest.app import TestApp
from requests.exceptions import HTTPError
from conans import tools, load, __version__
from conans.client.cache.cache import ClientCache
from conans.client.cache.remote_registry import Remotes
from conans.client.command import Command
from conans.client.conan_api import Conan
from conans.client.hook_manager import HookManager
from conans.client.loader import ProcessedProfile
from conans.client.output import ConanOutput
from conans.client.rest.conan_requester import ConanRequester
from conans.client.rest.uploader_downloader import IterableToFileAdapter
from conans.client.runner import ConanRunner
from conans.client.tools import environment_append
from conans.client.tools.files import chdir
from conans.client.tools.files import replace_in_file
from conans.client.tools.oss import check_output
from conans.client.tools.scm import Git, SVN
from conans.client.tools.win import get_cased_path
from conans.client.userio import UserIO
from conans.errors import NotFoundException, RecipeNotFoundException, PackageNotFoundException
from conans.model.manifest import FileTreeManifest
from conans.model.profile import Profile
from conans.model.ref import ConanFileReference, PackageReference
from conans.model.settings import Settings
from conans.server.revision_list import _RevisionEntry
from conans.test.utils.server_launcher import (TESTING_REMOTE_PRIVATE_PASS,
TESTING_REMOTE_PRIVATE_USER,
TestServerLauncher)
from conans.test.utils.test_files import temp_folder
from conans.tools import set_global_instances
from conans.util.env_reader import get_env
from conans.util.files import mkdir, save_files
from conans.client.rest.rest_client import RestApiClient
from conans.client.store.localdb import LocalDB
from conans.client.rest.auth_manager import ConanApiAuthManager
from conans.client.remote_manager import RemoteManager
from conans.client.migrations import ClientMigrator
from conans.model.version import Version
NO_SETTINGS_PACKAGE_ID = "5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9"
ARTIFACTORY_DEFAULT_USER = os.getenv("ARTIFACTORY_DEFAULT_USER", "admin")
ARTIFACTORY_DEFAULT_PASSWORD = os.getenv("ARTIFACTORY_DEFAULT_PASSWORD", "password")
ARTIFACTORY_DEFAULT_URL = os.getenv("ARTIFACTORY_DEFAULT_URL", "http://localhost:8090/artifactory")
def inc_recipe_manifest_timestamp(cache, reference, inc_time):
ref = ConanFileReference.loads(reference)
path = cache.package_layout(ref).export()
manifest = FileTreeManifest.load(path)
manifest.time += inc_time
manifest.save(path)
def inc_package_manifest_timestamp(cache, package_reference, inc_time):
pref = PackageReference.loads(package_reference)
path = cache.package_layout(pref.ref).package(pref)
manifest = FileTreeManifest.load(path)
manifest.time += inc_time
manifest.save(path)
def test_processed_profile(profile=None, settings=None):
if profile is None:
profile = Profile()
if profile.processed_settings is None:
profile.processed_settings = settings or Settings()
return ProcessedProfile(profile=profile)
class TestingResponse(object):
"""Wraps a response from TestApp external tool
to guarantee the presence of response.ok, response.content
and response.status_code, as it was a requests library object.
Is instanced by TestRequester on each request"""
def __init__(self, test_response):
self.test_response = test_response
def close(self):
pass # Compatibility with close() method of a requests when stream=True
@property
def headers(self):
return self.test_response.headers
@property
def ok(self):
return self.test_response.status_code == 200
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = u'%s Client Error: %s' % (self.status_code, self.content)
elif 500 <= self.status_code < 600:
http_error_msg = u'%s Server Error: %s' % (self.status_code, self.content)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
@property
def content(self):
return self.test_response.body
@property
def charset(self):
return self.test_response.charset
@charset.setter
def charset(self, newcharset):
self.test_response.charset = newcharset
@property
def text(self):
return self.test_response.text
def iter_content(self, chunk_size=1): # @UnusedVariable
return [self.content]
@property
def status_code(self):
return self.test_response.status_code
class TestRequester(object):
"""Fake requests module calling server applications
with TestApp"""
def __init__(self, test_servers):
self.test_servers = test_servers
@staticmethod
def _get_url_path(url):
# Remove schema from url
_, _, path, query, _ = urlsplit(url)
url = urlunsplit(("", "", path, query, ""))
return url
def _get_wsgi_app(self, url):
for test_server in self.test_servers.values():
if url.startswith(test_server.fake_url):
return test_server.app
raise Exception("Testing error: Not remote found")
def get(self, url, **kwargs):
app, url = self._prepare_call(url, kwargs)
if app:
response = app.get(url, **kwargs)
return TestingResponse(response)
else:
return requests.get(url, **kwargs)
def put(self, url, **kwargs):
app, url = self._prepare_call(url, kwargs)
if app:
response = app.put(url, **kwargs)
return TestingResponse(response)
else:
return requests.put(url, **kwargs)
def delete(self, url, **kwargs):
app, url = self._prepare_call(url, kwargs)
if app:
response = app.delete(url, **kwargs)
return TestingResponse(response)
else:
return requests.delete(url, **kwargs)
def post(self, url, **kwargs):
app, url = self._prepare_call(url, kwargs)
if app:
response = app.post(url, **kwargs)
return TestingResponse(response)
else:
requests.post(url, **kwargs)
def _prepare_call(self, url, kwargs):
if not url.startswith("http://fake"): # Call to S3 (or external), perform a real request
return None, url
app = self._get_wsgi_app(url)
url = self._get_url_path(url) # Remove http://server.com
self._set_auth_headers(kwargs)
if app:
kwargs["expect_errors"] = True
kwargs.pop("stream", None)
kwargs.pop("verify", None)
kwargs.pop("auth", None)
kwargs.pop("cert", None)
kwargs.pop("timeout", None)
if "data" in kwargs:
if isinstance(kwargs["data"], IterableToFileAdapter):
data_accum = b""
for tmp in kwargs["data"]:
data_accum += tmp
kwargs["data"] = data_accum
kwargs["params"] = kwargs["data"]
del kwargs["data"] # Parameter in test app is called "params"
if kwargs.get("json"):
# json is a high level parameter of requests, not a generic one
# translate it to data and content_type
kwargs["params"] = json.dumps(kwargs["json"])
kwargs["content_type"] = "application/json"
kwargs.pop("json", None)
return app, url
@staticmethod
def _set_auth_headers(kwargs):
if kwargs.get("auth"):
mock_request = Mock()
mock_request.headers = {}
kwargs["auth"](mock_request)
if "headers" not in kwargs:
kwargs["headers"] = {}
kwargs["headers"].update(mock_request.headers)
class ArtifactoryServerStore(object):
def __init__(self, repo_url, user, password):
self._user = user or ARTIFACTORY_DEFAULT_USER
self._password = password or ARTIFACTORY_DEFAULT_PASSWORD
self._repo_url = repo_url
@property
def _auth(self):
return self._user, self._password
@staticmethod
def _root_recipe(ref):
return "{}/{}/{}/{}".format(ref.user, ref.name, ref.version, ref.channel)
@staticmethod
def _ref_index(ref):
return "{}/index.json".format(ArtifactoryServerStore._root_recipe(ref))
@staticmethod
def _pref_index(pref):
tmp = ArtifactoryServerStore._root_recipe(pref.ref)
return "{}/{}/package/{}/index.json".format(tmp, pref.ref.revision, pref.id)
def get_recipe_revisions(self, ref):
time.sleep(0.1) # Index appears to not being updated immediately after a remove
url = "{}/{}".format(self._repo_url, self._ref_index(ref))
response = requests.get(url, auth=self._auth)
response.raise_for_status()
the_json = response.json()
if not the_json["revisions"]:
raise RecipeNotFoundException(ref)
tmp = [_RevisionEntry(i["revision"], i["time"]) for i in the_json["revisions"]]
return tmp
def get_package_revisions(self, pref):
time.sleep(0.1) # Index appears to not being updated immediately
url = "{}/{}".format(self._repo_url, self._pref_index(pref))
response = requests.get(url, auth=self._auth)
response.raise_for_status()
the_json = response.json()
if not the_json["revisions"]:
raise PackageNotFoundException(pref)
tmp = [_RevisionEntry(i["revision"], i["time"]) for i in the_json["revisions"]]
return tmp
def get_last_revision(self, ref):
revisions = self.get_recipe_revisions(ref)
return revisions[0]
def get_last_package_revision(self, ref):
revisions = self.get_package_revisions(ref)
return revisions[0]
def package_exists(self, pref):
try:
if pref.revision:
path = self.server_store.package(pref)
else:
path = self.test_server.server_store.package_revisions_root(pref)
return self.test_server.server_store.path_exists(path)
except NotFoundException: # When resolves the latest and there is no package
return False
class ArtifactoryServer(object):
def __init__(self, *args, **kwargs):
self._user = ARTIFACTORY_DEFAULT_USER
self._password = ARTIFACTORY_DEFAULT_PASSWORD
self._url = ARTIFACTORY_DEFAULT_URL
self._repo_name = "conan_{}".format(str(uuid.uuid4()).replace("-", ""))
self.create_repository()
self.server_store = ArtifactoryServerStore(self.repo_url, self._user, self._password)
@property
def _auth(self):
return self._user, self._password
@property
def repo_url(self):
return "{}/{}".format(self._url, self._repo_name)
@property
def repo_api_url(self):
return "{}/api/conan/{}".format(self._url, self._repo_name)
def recipe_revision_time(self, ref):
revs = self.server_store.get_recipe_revisions(ref)
for r in revs:
if r.revision == ref.revision:
return r.time
return None
def package_revision_time(self, pref):
revs = self.server_store.get_package_revisions(pref)
for r in revs:
if r.revision == pref.revision:
return r.time
return None
def create_repository(self):
url = "{}/api/repositories/{}".format(self._url, self._repo_name)
config = {"key": self._repo_name, "rclass": "local", "packageType": "conan"}
ret = requests.put(url, auth=self._auth, json=config)
ret.raise_for_status()
def package_exists(self, pref):
try:
revisions = self.server_store.get_package_revisions(pref)
if pref.revision:
for r in revisions:
if pref.revision == r.revision:
return True
return False
return True
except Exception: # When resolves the latest and there is no package
return False
def recipe_exists(self, ref):
try:
revisions = self.server_store.get_recipe_revisions(ref)
if ref.revision:
for r in revisions:
if ref.revision == r.revision:
return True
return False
return True
except Exception: # When resolves the latest and there is no package
return False
class TestServer(object):
def __init__(self, read_permissions=None,
write_permissions=None, users=None, plugins=None, base_path=None,
server_capabilities=None, complete_urls=False):
"""
'read_permissions' and 'write_permissions' is a list of:
[("opencv/2.3.4@lasote/testing", "user1, user2")]
'users': {username: plain-text-passwd}
"""
# Unique identifier for this server, will be used by TestRequester
# to determine where to call. Why? remote_manager just assing an url
# to the rest_client, so rest_client doesn't know about object instances,
# just urls, so testing framework performs a map between fake urls and instances
if read_permissions is None:
read_permissions = [("*/*@*/*", "*")]
if write_permissions is None:
write_permissions = []
if users is None:
users = {"lasote": "mypass", "conan": "password"}
self.fake_url = "http://fake%s.com" % str(uuid.uuid4()).replace("-", "")
base_url = "%s/v1" % self.fake_url if complete_urls else "v1"
self.test_server = TestServerLauncher(base_path, read_permissions,
write_permissions, users,
base_url=base_url,
plugins=plugins,
server_capabilities=server_capabilities)
self.app = TestApp(self.test_server.ra.root_app)
@property
def server_store(self):
return self.test_server.server_store
def __repr__(self):
return "TestServer @ " + self.fake_url
def __str__(self):
return self.fake_url
def recipe_exists(self, ref):
try:
if not ref.revision:
path = self.test_server.server_store.conan_revisions_root(ref)
else:
path = self.test_server.server_store.base_folder(ref)
return self.test_server.server_store.path_exists(path)
except NotFoundException: # When resolves the latest and there is no package
return False
def package_exists(self, pref):
try:
if pref.revision:
path = self.test_server.server_store.package(pref)
else:
path = self.test_server.server_store.package_revisions_root(pref)
return self.test_server.server_store.path_exists(path)
except NotFoundException: # When resolves the latest and there is no package
return False
def latest_recipe(self, ref):
rev, _ = self.test_server.server_store.get_last_revision(ref)
return ref.copy_with_rev(rev)
def recipe_revision_time(self, ref):
if not ref.revision:
raise Exception("Pass a ref with revision (Testing framework)")
return self.test_server.server_store.get_revision_time(ref)
def latest_package(self, pref):
if not pref.ref.revision:
raise Exception("Pass a pref with .rev.revision (Testing framework)")
prev = self.test_server.server_store.get_last_package_revision(pref)
return pref.copy_with_revs(pref.ref.revision, prev)
def package_revision_time(self, pref):
if not pref:
raise Exception("Pass a pref with revision (Testing framework)")
tmp = self.test_server.server_store.get_package_revision_time(pref)
return tmp
if get_env("CONAN_TEST_WITH_ARTIFACTORY", False):
TestServer = ArtifactoryServer
class TestBufferConanOutput(ConanOutput):
""" wraps the normal output of the application, captures it into an stream
and gives it operators similar to string, so it can be compared in tests
"""
def __init__(self):
self._buffer = StringIO()
ConanOutput.__init__(self, self._buffer, color=False)
def __repr__(self):
# FIXME: I'm sure there is a better approach. Look at six docs.
if six.PY2:
return str(self._buffer.getvalue().encode("ascii", "ignore"))
else:
return self._buffer.getvalue()
def __str__(self, *args, **kwargs):
return self.__repr__()
def __eq__(self, value):
return self.__repr__() == value
def __ne__(self, value):
return not self.__eq__(value)
def __contains__(self, value):
return value in self.__repr__()
def create_local_git_repo(files=None, branch=None, submodules=None, folder=None):
tmp = folder or temp_folder()
tmp = get_cased_path(tmp)
if files:
save_files(tmp, files)
git = Git(tmp)
git.run("init .")
git.run('config user.email "you@example.com"')
git.run('config user.name "Your Name"')
if branch:
git.run("checkout -b %s" % branch)
git.run("add .")
git.run('commit -m "commiting"')
if submodules:
for submodule in submodules:
git.run('submodule add "%s"' % submodule)
git.run('commit -m "add submodules"')
return tmp.replace("\\", "/"), git.get_revision()
def create_local_svn_checkout(files, repo_url, rel_project_path=None,
commit_msg='default commit message', delete_checkout=True,
folder=None):
tmp_dir = folder or temp_folder()
try:
rel_project_path = rel_project_path or str(uuid.uuid4())
# Do not use SVN class as it is what we will be testing
subprocess.check_output('svn co "{url}" "{path}"'.format(url=repo_url,
path=tmp_dir),
shell=True)
tmp_project_dir = os.path.join(tmp_dir, rel_project_path)
mkdir(tmp_project_dir)
save_files(tmp_project_dir, files)
with chdir(tmp_project_dir):
subprocess.check_output("svn add .", shell=True)
subprocess.check_output('svn commit -m "{}"'.format(commit_msg), shell=True)
if SVN.get_version() >= SVN.API_CHANGE_VERSION:
rev = check_output("svn info --show-item revision").strip()
else:
import xml.etree.ElementTree as ET
output = check_output("svn info --xml").strip()
root = ET.fromstring(output)
rev = root.findall("./entry")[0].get("revision")
project_url = repo_url + "/" + quote(rel_project_path.replace("\\", "/"))
return project_url, rev
finally:
if delete_checkout:
shutil.rmtree(tmp_dir, ignore_errors=False, onerror=try_remove_readonly)
def create_remote_svn_repo(folder=None):
tmp_dir = folder or temp_folder()
subprocess.check_output('svnadmin create "{}"'.format(tmp_dir), shell=True)
return SVN.file_protocol + quote(tmp_dir.replace("\\", "/"), safe='/:')
def try_remove_readonly(func, path, exc): # TODO: May promote to conan tools?
# src: https://stackoverflow.com/questions/1213706/what-user-do-python-scripts-run-as-in-windows
excvalue = exc[1]
if func in (os.rmdir, os.remove, os.unlink) and excvalue.errno == errno.EACCES:
os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777
func(path)
else:
raise OSError("Cannot make read-only %s" % path)
class SVNLocalRepoTestCase(unittest.TestCase):
path_with_spaces = True
def _create_local_svn_repo(self):
folder = os.path.join(self._tmp_folder, 'repo_server')
return create_remote_svn_repo(folder)
def gimme_tmp(self, create=True):
tmp = os.path.join(self._tmp_folder, str(uuid.uuid4()))
if create:
os.makedirs(tmp)
return tmp
def create_project(self, files, rel_project_path=None, commit_msg='default commit message',
delete_checkout=True):
tmp_dir = self.gimme_tmp()
return create_local_svn_checkout(files, self.repo_url, rel_project_path=rel_project_path,
commit_msg=commit_msg, delete_checkout=delete_checkout,
folder=tmp_dir)
def run(self, *args, **kwargs):
tmp_folder = tempfile.mkdtemp(suffix='_conans')
try:
self._tmp_folder = os.path.join(tmp_folder, 'path with spaces'
if self.path_with_spaces else 'pathwithoutspaces')
os.makedirs(self._tmp_folder)
self.repo_url = self._create_local_svn_repo()
super(SVNLocalRepoTestCase, self).run(*args, **kwargs)
finally:
shutil.rmtree(tmp_folder, ignore_errors=False, onerror=try_remove_readonly)
class MockedUserIO(UserIO):
"""
Mock for testing. If get_username or get_password is requested will raise
an exception except we have a value to return.
"""
def __init__(self, logins, ins=sys.stdin, out=None):
"""
logins is a dict of {remote: list(user, password)}
will return sequentially
"""
assert isinstance(logins, dict)
self.logins = logins
self.login_index = Counter()
UserIO.__init__(self, ins, out)
def get_username(self, remote_name):
username_env = self._get_env_username(remote_name)
if username_env:
return username_env
self._raise_if_non_interactive()
sub_dict = self.logins[remote_name]
index = self.login_index[remote_name]
if len(sub_dict) - 1 < index:
raise Exception("Bad user/password in testing framework, "
"provide more tuples or input the right ones")
return sub_dict[index][0]
def get_password(self, remote_name):
"""Overridable for testing purpose"""
password_env = self._get_env_password(remote_name)
if password_env:
return password_env
self._raise_if_non_interactive()
sub_dict = self.logins[remote_name]
index = self.login_index[remote_name]
tmp = sub_dict[index][1]
self.login_index.update([remote_name])
return tmp
class TestClient(object):
""" Test wrap of the conans application to launch tests in the same way as
in command line
"""
def __init__(self, base_folder=None, current_folder=None, servers=None, users=None,
requester_class=None, runner=None, path_with_spaces=True,
revisions_enabled=None, cpu_count=1):
"""
current_folder: Current execution folder
servers: dict of {remote_name: TestServer}
logins is a list of (user, password) for auto input in order
if required==> [("lasote", "mypass"), ("other", "otherpass")]
"""
self.all_output = "" # For debugging purpose, append all the run outputs
self.users = users
if self.users is None:
self.users = {"default": [(TESTING_REMOTE_PRIVATE_USER, TESTING_REMOTE_PRIVATE_PASS)]}
self.base_folder = base_folder or temp_folder(path_with_spaces)
self.cache = ClientCache(self.base_folder, TestBufferConanOutput())
self.storage_folder = self.cache.store
self.requester_class = requester_class
self.conan_runner = runner
if revisions_enabled is None:
revisions_enabled = get_env("TESTING_REVISIONS_ENABLED", False)
self.tune_conan_conf(base_folder, cpu_count, revisions_enabled)
if servers and len(servers) > 1 and not isinstance(servers, OrderedDict):
raise Exception("""Testing framework error: Servers should be an OrderedDict. e.g:
servers = OrderedDict()
servers["r1"] = server
servers["r2"] = TestServer()
""")
self.servers = servers or {}
if servers is not False: # Do not mess with registry remotes
self.update_servers()
self.init_dynamic_vars()
self.current_folder = current_folder or temp_folder(path_with_spaces)
def _set_revisions(self, value):
current_conf = load(self.cache.conan_conf_path)
if "revisions_enabled" in current_conf: # Invalidate any previous value to be sure
replace_in_file(self.cache.conan_conf_path, "revisions_enabled", "#revisions_enabled",
output=TestBufferConanOutput())
replace_in_file(self.cache.conan_conf_path,
"[general]", "[general]\nrevisions_enabled = %s" % value,
output=TestBufferConanOutput())
# Invalidate the cached config
self.cache.invalidate()
def enable_revisions(self):
self._set_revisions("1")
assert self.cache.config.revisions_enabled
def disable_revisions(self):
self._set_revisions("0")
assert not self.cache.config.revisions_enabled
def tune_conan_conf(self, base_folder, cpu_count, revisions_enabled):
# Create the default
self.cache.config
if cpu_count:
replace_in_file(self.cache.conan_conf_path,
"# cpu_count = 1", "cpu_count = %s" % cpu_count,
output=TestBufferConanOutput(), strict=not bool(base_folder))
current_conf = load(self.cache.conan_conf_path)
if "revisions_enabled" in current_conf: # Invalidate any previous value to be sure
replace_in_file(self.cache.conan_conf_path, "revisions_enabled", "#revisions_enabled",
output=TestBufferConanOutput())
if revisions_enabled:
replace_in_file(self.cache.conan_conf_path,
"[general]", "[general]\nrevisions_enabled = 1",
output=TestBufferConanOutput())
# Invalidate the cached config
self.cache.invalidate()
def update_servers(self):
Remotes().save(self.cache.registry_path)
registry = self.cache.registry
def add_server_to_registry(name, server):
if isinstance(server, ArtifactoryServer):
registry.add(name, server.repo_api_url)
self.users.update({name: [(ARTIFACTORY_DEFAULT_USER,
ARTIFACTORY_DEFAULT_PASSWORD)]})
elif isinstance(server, TestServer):
registry.add(name, server.fake_url)
else:
registry.add(name, server)
for name, server in self.servers.items():
if name == "default":
add_server_to_registry(name, server)
for name, server in self.servers.items():
if name != "default":
add_server_to_registry(name, server)
@property
def default_compiler_visual_studio(self):
settings = self.cache.default_profile.settings
return settings.get("compiler", None) == "Visual Studio"
@property
def out(self):
return self.user_io.out
@contextmanager
def chdir(self, newdir):
old_dir = self.current_folder
if not os.path.isabs(newdir):
newdir = os.path.join(old_dir, newdir)
mkdir(newdir)
self.current_folder = newdir
try:
yield
finally:
self.current_folder = old_dir
def _get_http_requester(self):
# Check if servers are real
real_servers = False
for server in self.servers.values():
if isinstance(server, str) or isinstance(server, ArtifactoryServer): # Just URI
real_servers = True
break
http_requester = None
if not real_servers:
if self.requester_class:
http_requester = self.requester_class(self.servers)
else:
http_requester = TestRequester(self.servers)
return http_requester
def init_dynamic_vars(self, user_io=None):
# Migration system
output = TestBufferConanOutput()
self.user_io = user_io or MockedUserIO(self.users, out=output)
self.cache = ClientCache(self.base_folder, output)
# Migration system
migrator = ClientMigrator(self.cache, Version(__version__), output)
migrator.migrate()
http_requester = self._get_http_requester()
config = self.cache.config
if self.conan_runner:
self.runner = self.conan_runner
else:
self.runner = ConanRunner(config.print_commands_to_output, config.generate_run_log_file,
config.log_run_to_output, output=output)
self.requester = ConanRequester(config, http_requester)
self.hook_manager = HookManager(self.cache.hooks_path, config.hooks, self.user_io.out)
put_headers = self.cache.read_put_headers()
self.rest_api_client = RestApiClient(self.user_io.out, self.requester,
revisions_enabled=config.revisions_enabled,
put_headers=put_headers)
# To store user and token
self.localdb = LocalDB.create(self.cache.localdb)
# Wraps RestApiClient to add authentication support (same interface)
auth_manager = ConanApiAuthManager(self.rest_api_client, self.user_io, self.localdb)
# Handle remote connections
self.remote_manager = RemoteManager(self.cache, auth_manager, self.user_io.out,
self.hook_manager)
return output, self.requester
def run(self, command_line, user_io=None, assert_error=False):
""" run a single command as in the command line.
If user or password is filled, user_io will be mocked to return this
tuple if required
"""
output, requester = self.init_dynamic_vars(user_io)
with tools.environment_append(self.cache.config.env_vars):
# Settings preprocessor
interactive = not get_env("CONAN_NON_INTERACTIVE", False)
conan = Conan(self.cache, self.user_io, self.runner, self.remote_manager,
self.hook_manager, requester, interactive=interactive)
command = Command(conan)
args = shlex.split(command_line)
current_dir = os.getcwd()
os.chdir(self.current_folder)
old_path = sys.path[:]
sys.path.append(os.path.join(self.cache.cache_folder, "python"))
old_modules = list(sys.modules.keys())
old_output, old_requester = set_global_instances(output, requester)
try:
error = command.run(args)
finally:
set_global_instances(old_output, old_requester)
sys.path = old_path
os.chdir(current_dir)
# Reset sys.modules to its prev state. A .copy() DOES NOT WORK
added_modules = set(sys.modules).difference(old_modules)
for added in added_modules:
sys.modules.pop(added, None)
if (assert_error and not error) or (not assert_error and error):
if assert_error:
msg = " Command succeeded (failure expected): "
else:
msg = " Command failed (unexpectedly): "
exc_message = "\n{header}\n{cmd}\n{output_header}\n{output}\n{output_footer}\n".format(
header='{:-^80}'.format(msg),
output_header='{:-^80}'.format(" Output: "),
output_footer='-'*80,
cmd=command_line,
output=self.user_io.out
)
raise Exception(exc_message)
self.all_output += str(self.user_io.out)
return error
def run_command(self, command):
self.all_output += str(self.out)
self.init_dynamic_vars() # Resets the output
return self.runner(command, cwd=self.current_folder)
def save(self, files, path=None, clean_first=False):
""" helper metod, will store files in the current folder
param files: dict{filename: filecontents}
"""
path = path or self.current_folder
if clean_first:
shutil.rmtree(self.current_folder, ignore_errors=True)
files = {f: str(content) for f, content in files.items()}
save_files(path, files)
if not files:
mkdir(self.current_folder)
def copy_from_assets(self, origin_folder, assets):
for asset in assets:
s = os.path.join(origin_folder, asset)
d = os.path.join(self.current_folder, asset)
if os.path.isdir(s):
shutil.copytree(s, d)
else:
shutil.copy2(s, d)
class TurboTestClient(TestClient):
tmp_json_name = ".tmp_json"
def __init__(self, *args, **kwargs):
if "users" not in kwargs:
from collections import defaultdict
kwargs["users"] = defaultdict(lambda: [("conan", "password")])
super(TurboTestClient, self).__init__(*args, **kwargs)
def export(self, ref, conanfile=None, args=None, assert_error=False):
conanfile = str(conanfile) if conanfile else str(GenConanfile())
self.save({"conanfile.py": conanfile})
self.run("export . {} {}".format(ref.full_repr(), args or ""),
assert_error=assert_error)
rrev = self.cache.package_layout(ref).recipe_revision()
return ref.copy_with_rev(rrev)
def create(self, ref, conanfile=None, args=None, assert_error=False):
conanfile = str(conanfile) if conanfile else str(GenConanfile())
self.save({"conanfile.py": conanfile})
self.run("create . {} {} --json {}".format(ref.full_repr(),
args or "", self.tmp_json_name),
assert_error=assert_error)
rrev = self.cache.package_layout(ref).recipe_revision()
json_path = os.path.join(self.current_folder, self.tmp_json_name)
data = json.loads(load(json_path))
if assert_error:
return None
package_id = data["installed"][0]["packages"][0]["id"]
package_ref = PackageReference(ref, package_id)
prev = self.cache.package_layout(ref.copy_clear_rev()).package_revision(package_ref)
return package_ref.copy_with_revs(rrev, prev)
def upload_all(self, ref, remote=None, args=None, assert_error=False):
remote = remote or list(self.servers.keys())[0]
self.run("upload {} -c --all -r {} {}".format(ref.full_repr(), remote, args or ""),
assert_error=assert_error)
if not assert_error:
remote_rrev, _ = self.servers[remote].server_store.get_last_revision(ref)
return ref.copy_with_rev(remote_rrev)
return
def remove_all(self):
self.run("remove '*' -f")
def recipe_exists(self, ref):
return self.cache.package_layout(ref).recipe_exists()
def package_exists(self, pref):
return self.cache.package_layout(pref.ref).package_exists(pref)
def recipe_revision(self, ref):
return self.cache.package_layout(ref).recipe_revision()
def package_revision(self, pref):
return self.cache.package_layout(pref.ref).package_revision(pref)
def search(self, pattern, remote=None, assert_error=False, args=None):
remote = " -r={}".format(remote) if remote else ""
self.run("search {} --json {} {} {}".format(pattern, self.tmp_json_name, remote,
args or ""),
assert_error=assert_error)
json_path = os.path.join(self.current_folder, self.tmp_json_name)
data = json.loads(load(json_path))
return data
def massive_uploader(self, ref, revisions, num_prev, remote=None):
"""Uploads N revisions with M package revisions. The revisions can be specified like:
revisions = [{"os": "Windows"}, {"os": "Linux"}], \
[{"os": "Macos"}], \
[{"os": "Solaris"}, {"os": "FreeBSD"}]
IMPORTANT: Different settings keys will cause different recipe revisions
"""
remote = remote or "default"
ret = []
for i, settings_groups in enumerate(revisions):
tmp = []
for settings in settings_groups:
conanfile_gen = GenConanfile(). \
with_build_msg("REV{}".format(i)). \
with_package_file("file", env_var="MY_VAR")
for s in settings.keys():
conanfile_gen = conanfile_gen.with_setting(s)
for k in range(num_prev):
args = " ".join(["-s {}={}".format(key, value)
for key, value in settings.items()])
with environment_append({"MY_VAR": str(k)}):
pref = self.create(ref, conanfile=conanfile_gen, args=args)
self.upload_all(ref, remote=remote)
tmp.append(pref)
ret.append(tmp)
return ret
def init_git_repo(self, files=None, branch=None, submodules=None, origin_url=None):
_, commit = create_local_git_repo(files, branch, submodules, self.current_folder)
if origin_url:
self.runner('git remote add origin {}'.format(origin_url), cwd=self.current_folder)
return commit
def init_svn_repo(self, subpath, files=None, repo_url=None):
if not repo_url:
repo_url = create_remote_svn_repo(temp_folder())
_, rev = create_local_svn_checkout(files, repo_url, folder=self.current_folder,
rel_project_path=subpath, delete_checkout=False)
return rev
class GenConanfile(object):
"""
USAGE:
x = GenConanfile().with_import("import os").\
with_setting("os").\
with_option("shared", [True, False]).\
with_default_option("shared", True).\
with_build_msg("holaaa").\
with_build_msg("adiooos").\
with_package_file("file.txt", "hola").\
with_package_file("file2.txt", "hola")
"""
def __init__(self):
self._imports = ["from conans import ConanFile"]
self._settings = []
self._options = {}
self._default_options = {}
self._package_files = {}
self._package_files_env = {}
self._build_messages = []
self._scm = {}
self._requirements = []
self._revision_mode = None
def with_revision_mode(self, revision_mode):
self._revision_mode = revision_mode
return self
def with_scm(self, scm):
self._scm = scm
return self
def with_requirement(self, ref):
self._requirements.append(ref)
return self
def with_import(self, i):
if i not in self._imports:
self._imports.append(i)
return self
def with_setting(self, setting):
self._settings.append(setting)
return self
def with_option(self, option_name, values):
self._options[option_name] = values
return self
def with_default_option(self, option_name, value):
self._default_options[option_name] = value
return self
def with_package_file(self, file_name, contents=None, env_var=None):
if not contents and not env_var:
raise Exception("Specify contents or env_var")
self.with_import("import os")
self.with_import("from conans import tools")
if contents:
self._package_files[file_name] = contents
if env_var:
self._package_files_env[file_name] = env_var
return self
def with_build_msg(self, msg):
self._build_messages.append(msg)
return self
@property
def _scm_line(self):
if not self._scm:
return ""
line = ", ".join('"%s": "%s"' % (k, v) for k, v in self._scm.items())
return "scm = {%s}" % line
@property
def _revision_mode_line(self):
if not self._revision_mode:
return ""
line = "revision_mode=\"{}\"".format(self._revision_mode)
return line
@property
def _settings_line(self):
if not self._settings:
return ""
line = ", ".join('"%s"' % s for s in self._settings)
return "settings = {}".format(line)
@property
def _options_line(self):
if not self._options:
return ""
line = ", ".join('"%s": %s' % (k, v) for k, v in self._options.items())
tmp = "options = {%s}" % line
if self._default_options:
line = ", ".join('"%s": %s' % (k, v) for k, v in self._default_options.items())
tmp += "\n default_options = {%s}" % line
return tmp
@property
def _requirements_line(self):
if not self._requirements:
return ""
line = ", ".join(['"{}"'.format(r.full_repr()) for r in self._requirements])
tmp = "requires = %s" % line
return tmp
@property
def _package_method(self):
lines = []
if self._package_files:
lines = [' tools.save(os.path.join(self.package_folder, "{}"), "{}")'
''.format(key, value)
for key, value in self._package_files.items()]
if self._package_files_env:
lines.extend([' tools.save(os.path.join(self.package_folder, "{}"), '
'os.getenv("{}"))'.format(key, value)
for key, value in self._package_files_env.items()])
if not lines:
return ""
return """
def package(self):
{}
""".format("\n".join(lines))
@property
def _build_method(self):
if not self._build_messages:
return ""
lines = [' self.output.warn("{}")'.format(m) for m in self._build_messages]
return """
def build(self):
{}
""".format("\n".join(lines))
def __repr__(self):
ret = []
ret.extend(self._imports)
ret.append("class HelloConan(ConanFile):")
if self._requirements_line:
ret.append(" {}".format(self._requirements_line))
if self._scm:
ret.append(" {}".format(self._scm_line))
if self._revision_mode_line:
ret.append(" {}".format(self._revision_mode_line))
if self._settings_line:
ret.append(" {}".format(self._settings_line))
if self._options_line:
ret.append(" {}".format(self._options_line))
if self._build_method:
ret.append(" {}".format(self._build_method))
if self._package_method:
ret.append(" {}".format(self._package_method))
if len(ret) == 2:
ret.append(" pass")
return "\n".join(ret)
class StoppableThreadBottle(threading.Thread):
"""
Real server to test download endpoints
"""
def __init__(self, host=None, port=None):
self.host = host or "127.0.0.1"
self.port = port or random.randrange(48000, 49151)
self.server = bottle.Bottle()
super(StoppableThreadBottle, self).__init__(target=self.server.run,
kwargs={"host": self.host, "port": self.port})
self.daemon = True
self._stop = threading.Event()
def stop(self):
self._stop.set()
def run_server(self):
self.start()
time.sleep(1)
| {
"content_hash": "7d8ce392a82c94f16bc06d3a5b257331",
"timestamp": "",
"source": "github",
"line_count": 1193,
"max_line_length": 100,
"avg_line_length": 37.49790444258173,
"alnum_prop": 0.5932714876494914,
"repo_name": "memsharded/conan",
"id": "47064174af533f0d6c5904ec8b61834a982e9f9a",
"size": "44735",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/utils/tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Groovy",
"bytes": "12586"
},
{
"name": "Python",
"bytes": "4334185"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
from quant_dequant_test import QuantDequantTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest):
def setUp(self):
self.set_params()
def network():
self.data = fluid.data(
name='data', shape=[1, 28, 28], dtype='float32'
)
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
matmul_out = fluid.layers.matmul(
x=self.data,
y=self.data,
transpose_x=self.transpose_x,
transpose_y=self.transpose_y,
alpha=self.alpha,
)
fc_out = fluid.layers.fc(
input=matmul_out,
size=10,
num_flatten_dims=1,
bias_attr=False,
act=None,
)
result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = paddle.mean(loss)
return avg_loss, result
self.main_program.random_seed = 2
self.startup_program.random_seed = 2
self.test_main_program.random_seed = 2
# self.test_startup_program.random_seed = 2
with fluid.unique_name.guard():
with fluid.program_guard(self.main_program, self.startup_program):
self.loss, result = network()
opt = fluid.optimizer.Adam(learning_rate=0.0001)
opt.minimize(self.loss)
with fluid.unique_name.guard():
with fluid.program_guard(
self.test_main_program, self.startup_program
):
network()
self.feeds = {"data": np.random.random([1, 28, 28]).astype("float32")}
self.fetch_list = [result]
self.enable_trt = True
self.trt_parameters = TensorRTMatMulQuantDequantDims3Test.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False
)
self.activation_quantize_type = 'moving_average_abs_max'
self.weight_quantize_type = 'channel_wise_abs_max'
def set_params(self):
self.transpose_x = False
self.transpose_y = False
self.alpha = 1.0
def test_check_output(self):
# self.quant_dequant()
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(
use_gpu, atol=1, flatten=False, rtol=1e-1
)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
)
class TensorRTMatMulQuantDequantDims3TransposeXTest(
TensorRTMatMulQuantDequantDims3Test
):
def set_params(self):
self.transpose_x = True
self.transpose_y = False
self.alpha = 2.1
class TensorRTMatMulQuantDequantDims3TransposeYTest(
TensorRTMatMulQuantDequantDims3Test
):
def set_params(self):
self.transpose_x = False
self.transpose_y = True
self.alpha = 3.9
class TensorRTMatMulQuantDequantDims3TransposeXYTest(
TensorRTMatMulQuantDequantDims3Test
):
def set_params(self):
self.transpose_x = True
self.transpose_y = True
self.alpha = 8.4
class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest):
def setUp(self):
self.set_params()
def network():
self.data = fluid.data(
name='data', shape=[1, 28, 28], dtype='float32'
)
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
reshape_out = paddle.reshape(self.data, shape=[1, 4, 14, 14])
matmul_out = fluid.layers.matmul(
x=reshape_out,
y=reshape_out,
transpose_x=self.transpose_x,
transpose_y=self.transpose_y,
alpha=self.alpha,
)
out = fluid.layers.batch_norm(matmul_out, is_test=True)
fc_out = fluid.layers.fc(
input=matmul_out,
size=10,
num_flatten_dims=1,
bias_attr=False,
act=None,
)
result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = paddle.mean(loss)
return avg_loss, result
self.main_program.random_seed = 2
self.startup_program.random_seed = 2
self.test_main_program.random_seed = 2
# self.test_startup_program.random_seed = 2
with fluid.unique_name.guard():
with fluid.program_guard(self.main_program, self.startup_program):
self.loss, result = network()
opt = fluid.optimizer.Adam(learning_rate=0.0001)
opt.minimize(self.loss)
with fluid.unique_name.guard():
with fluid.program_guard(
self.test_main_program, self.startup_program
):
network()
self.feeds = {"data": np.random.random([1, 28, 28]).astype("float32")}
self.fetch_list = [result]
self.enable_trt = True
self.trt_parameters = TensorRTMatMulQuantDequantDims4Test.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False
)
self.activation_quantize_type = 'moving_average_abs_max'
self.weight_quantize_type = 'channel_wise_abs_max'
def set_params(self):
self.transpose_x = False
self.transpose_y = False
self.alpha = 1.0
def test_check_output(self):
# self.quant_dequant()
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(
use_gpu, atol=1, flatten=False, rtol=1e-1
)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
)
class TensorRTMatMulQuantDequantDims4TransposeXTest(
TensorRTMatMulQuantDequantDims4Test
):
def set_params(self):
self.transpose_x = True
self.transpose_y = False
self.alpha = 3.2
class TensorRTMatMulQuantDequantDims4TransposeYTest(
TensorRTMatMulQuantDequantDims4Test
):
def set_params(self):
self.transpose_x = False
self.transpose_y = True
self.alpha = 7.5
class TensorRTMatMulQuantDequantDims4TransposeXYTest(
TensorRTMatMulQuantDequantDims4Test
):
def set_params(self):
self.transpose_x = True
self.transpose_y = True
self.alpha = 11.2
class TensorRTMatMulQuantDequantDims3DynamicTest(QuantDequantTest):
def setUp(self):
self.set_params()
def network():
self.data = fluid.data(
name='data', shape=[-1, 28, 28], dtype='float32'
)
self.label = fluid.data(name='label', shape=[1, 1], dtype='int64')
matmul_out = fluid.layers.matmul(
x=self.data,
y=self.data,
transpose_x=self.transpose_x,
transpose_y=self.transpose_y,
alpha=self.alpha,
)
out = fluid.layers.batch_norm(matmul_out, is_test=True)
fc_out = fluid.layers.fc(
input=matmul_out,
size=10,
num_flatten_dims=1,
bias_attr=False,
act=None,
)
result = fluid.layers.relu(fc_out)
loss = fluid.layers.cross_entropy(input=result, label=self.label)
avg_loss = paddle.mean(loss)
return avg_loss, result
self.main_program.random_seed = 2
self.startup_program.random_seed = 2
self.test_main_program.random_seed = 2
# self.test_startup_program.random_seed = 2
with fluid.unique_name.guard():
with fluid.program_guard(self.main_program, self.startup_program):
self.loss, result = network()
opt = fluid.optimizer.Adam(learning_rate=0.0001)
opt.minimize(self.loss)
with fluid.unique_name.guard():
with fluid.program_guard(
self.test_main_program, self.startup_program
):
network()
self.feeds = {"data": np.random.random([3, 28, 28]).astype("float32")}
self.fetch_list = [result]
self.enable_trt = True
self.trt_parameters = (
TensorRTMatMulQuantDequantDims3DynamicTest.TensorRTParam(
1 << 30, 32, 0, AnalysisConfig.Precision.Int8, False, False
)
)
self.dynamic_shape_params = (
TensorRTMatMulQuantDequantDims3DynamicTest.DynamicShapeParam(
{'data': [1, 28, 28]},
{'data': [4, 28, 28]},
{'data': [3, 28, 28]},
False,
)
)
self.activation_quantize_type = 'moving_average_abs_max'
self.weight_quantize_type = 'channel_wise_abs_max'
def set_params(self):
self.transpose_x = False
self.transpose_y = False
self.alpha = 1.0
def test_check_output(self):
# self.quant_dequant()
if core.is_compiled_with_cuda():
use_gpu = True
self.check_output_with_option(
use_gpu, atol=1, flatten=False, rtol=1e-1
)
self.assertTrue(
PassVersionChecker.IsCompatible('tensorrt_subgraph_pass')
)
class TensorRTMatMulQuantDequantDims4TransposeXDynamicTest(
TensorRTMatMulQuantDequantDims3DynamicTest
):
def set_params(self):
self.transpose_x = True
self.transpose_y = False
self.alpha = 2.0
class TensorRTMatMulQuantDequantDims4TransposeYDynamicTest(
TensorRTMatMulQuantDequantDims3DynamicTest
):
def set_params(self):
self.transpose_x = False
self.transpose_y = True
self.alpha = 2.2
class TensorRTMatMulQuantDequantDims4TransposeXYDynamicTest(
TensorRTMatMulQuantDequantDims3DynamicTest
):
def set_params(self):
self.transpose_x = True
self.transpose_y = True
self.alpha = 7.8
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c9093aaf29a8afffb61de02fc89b8fe8",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 80,
"avg_line_length": 33.356913183279744,
"alnum_prop": 0.575187969924812,
"repo_name": "PaddlePaddle/Paddle",
"id": "b85f530cb06af7e68c646ea9c29cd04a6200efef",
"size": "10985",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/ir/inference/test_trt_matmul_quant_dequant.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
} |
import os
import environ
p = environ.Path(__file__) - 2
env = environ.Env()
def root(*paths, **kwargs):
ensure = kwargs.pop('ensure', False)
path = p(*paths, **kwargs)
if ensure and not os.path.exists(path):
os.makedirs(path)
return path
def read_env():
env_file = root('../.env')
if os.path.exists(env_file):
environ.Env.read_env(env_file) # reading env file
| {
"content_hash": "72afc0a240858149c95b104f3711534f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 58,
"avg_line_length": 20.35,
"alnum_prop": 0.6093366093366094,
"repo_name": "xuchao666/msz",
"id": "695403a3e5599c92510e2ed990c4b69f58659715",
"size": "431",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "msz/settings/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "65421"
},
{
"name": "HTML",
"bytes": "35328"
},
{
"name": "JavaScript",
"bytes": "6929"
},
{
"name": "Python",
"bytes": "42485"
}
],
"symlink_target": ""
} |
import argparse
import os
import platform
import re
import sys
import xml.etree.ElementTree as ET
from collections import OrderedDict
from typing import List, Dict, TextIO, Tuple, Optional, Any, Union
# Import hardcoded version information from version.py
root_directory = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../")
sys.path.append(root_directory) # Include the root directory
import version
# $DOCS_URL/path/to/page.html(#fragment-tag)
GODOT_DOCS_PATTERN = re.compile(r"^\$DOCS_URL/(.*)\.html(#.*)?$")
# Based on reStructedText inline markup recognition rules
# https://docutils.sourceforge.io/docs/ref/rst/restructuredtext.html#inline-markup-recognition-rules
MARKUP_ALLOWED_PRECEDENT = " -:/'\"<([{"
MARKUP_ALLOWED_SUBSEQUENT = " -.,:;!?\\/'\")]}>"
# Used to translate section headings and other hardcoded strings when required with
# the --lang argument. The BASE_STRINGS list should be synced with what we actually
# write in this script (check `translate()` uses), and also hardcoded in
# `doc/translations/extract.py` to include them in the source POT file.
BASE_STRINGS = [
"Description",
"Tutorials",
"Properties",
"Constructors",
"Methods",
"Operators",
"Theme Properties",
"Signals",
"Enumerations",
"Constants",
"Annotations",
"Property Descriptions",
"Constructor Descriptions",
"Method Descriptions",
"Operator Descriptions",
"Theme Property Descriptions",
"Inherits:",
"Inherited By:",
"(overrides %s)",
"Default",
"Setter",
"value",
"Getter",
"This method should typically be overridden by the user to have any effect.",
"This method has no side effects. It doesn't modify any of the instance's member variables.",
"This method accepts any number of arguments after the ones described here.",
"This method is used to construct a type.",
"This method doesn't need an instance to be called, so it can be called directly using the class name.",
"This method describes a valid operator to use with this type as left-hand operand.",
]
strings_l10n: Dict[str, str] = {}
STYLES: Dict[str, str] = {}
class State:
def __init__(self) -> None:
self.num_errors = 0
self.num_warnings = 0
self.classes: OrderedDict[str, ClassDef] = OrderedDict()
self.current_class: str = ""
def parse_class(self, class_root: ET.Element, filepath: str) -> None:
class_name = class_root.attrib["name"]
self.current_class = class_name
class_def = ClassDef(class_name)
self.classes[class_name] = class_def
class_def.filepath = filepath
inherits = class_root.get("inherits")
if inherits is not None:
class_def.inherits = inherits
brief_desc = class_root.find("brief_description")
if brief_desc is not None and brief_desc.text:
class_def.brief_description = brief_desc.text
desc = class_root.find("description")
if desc is not None and desc.text:
class_def.description = desc.text
properties = class_root.find("members")
if properties is not None:
for property in properties:
assert property.tag == "member"
property_name = property.attrib["name"]
if property_name in class_def.properties:
print_error('{}.xml: Duplicate property "{}".'.format(class_name, property_name), self)
continue
type_name = TypeName.from_element(property)
setter = property.get("setter") or None # Use or None so '' gets turned into None.
getter = property.get("getter") or None
default_value = property.get("default") or None
if default_value is not None:
default_value = "``{}``".format(default_value)
overrides = property.get("overrides") or None
property_def = PropertyDef(
property_name, type_name, setter, getter, property.text, default_value, overrides
)
class_def.properties[property_name] = property_def
constructors = class_root.find("constructors")
if constructors is not None:
for constructor in constructors:
assert constructor.tag == "constructor"
method_name = constructor.attrib["name"]
qualifiers = constructor.get("qualifiers")
return_element = constructor.find("return")
if return_element is not None:
return_type = TypeName.from_element(return_element)
else:
return_type = TypeName("void")
params = self.parse_params(constructor, "constructor")
desc_element = constructor.find("description")
method_desc = None
if desc_element is not None:
method_desc = desc_element.text
method_def = MethodDef(method_name, return_type, params, method_desc, qualifiers)
method_def.definition_name = "constructor"
if method_name not in class_def.constructors:
class_def.constructors[method_name] = []
class_def.constructors[method_name].append(method_def)
methods = class_root.find("methods")
if methods is not None:
for method in methods:
assert method.tag == "method"
method_name = method.attrib["name"]
qualifiers = method.get("qualifiers")
return_element = method.find("return")
if return_element is not None:
return_type = TypeName.from_element(return_element)
else:
return_type = TypeName("void")
params = self.parse_params(method, "method")
desc_element = method.find("description")
method_desc = None
if desc_element is not None:
method_desc = desc_element.text
method_def = MethodDef(method_name, return_type, params, method_desc, qualifiers)
if method_name not in class_def.methods:
class_def.methods[method_name] = []
class_def.methods[method_name].append(method_def)
operators = class_root.find("operators")
if operators is not None:
for operator in operators:
assert operator.tag == "operator"
method_name = operator.attrib["name"]
qualifiers = operator.get("qualifiers")
return_element = operator.find("return")
if return_element is not None:
return_type = TypeName.from_element(return_element)
else:
return_type = TypeName("void")
params = self.parse_params(operator, "operator")
desc_element = operator.find("description")
method_desc = None
if desc_element is not None:
method_desc = desc_element.text
method_def = MethodDef(method_name, return_type, params, method_desc, qualifiers)
method_def.definition_name = "operator"
if method_name not in class_def.operators:
class_def.operators[method_name] = []
class_def.operators[method_name].append(method_def)
constants = class_root.find("constants")
if constants is not None:
for constant in constants:
assert constant.tag == "constant"
constant_name = constant.attrib["name"]
value = constant.attrib["value"]
enum = constant.get("enum")
is_bitfield = constant.get("is_bitfield") == "true"
constant_def = ConstantDef(constant_name, value, constant.text, is_bitfield)
if enum is None:
if constant_name in class_def.constants:
print_error('{}.xml: Duplicate constant "{}".'.format(class_name, constant_name), self)
continue
class_def.constants[constant_name] = constant_def
else:
if enum in class_def.enums:
enum_def = class_def.enums[enum]
else:
enum_def = EnumDef(enum, is_bitfield)
class_def.enums[enum] = enum_def
enum_def.values[constant_name] = constant_def
annotations = class_root.find("annotations")
if annotations is not None:
for annotation in annotations:
assert annotation.tag == "annotation"
annotation_name = annotation.attrib["name"]
qualifiers = annotation.get("qualifiers")
params = self.parse_params(annotation, "annotation")
desc_element = annotation.find("description")
annotation_desc = None
if desc_element is not None:
annotation_desc = desc_element.text
annotation_def = AnnotationDef(annotation_name, params, annotation_desc, qualifiers)
if annotation_name not in class_def.annotations:
class_def.annotations[annotation_name] = []
class_def.annotations[annotation_name].append(annotation_def)
signals = class_root.find("signals")
if signals is not None:
for signal in signals:
assert signal.tag == "signal"
signal_name = signal.attrib["name"]
if signal_name in class_def.signals:
print_error('{}.xml: Duplicate signal "{}".'.format(class_name, signal_name), self)
continue
params = self.parse_params(signal, "signal")
desc_element = signal.find("description")
signal_desc = None
if desc_element is not None:
signal_desc = desc_element.text
signal_def = SignalDef(signal_name, params, signal_desc)
class_def.signals[signal_name] = signal_def
theme_items = class_root.find("theme_items")
if theme_items is not None:
for theme_item in theme_items:
assert theme_item.tag == "theme_item"
theme_item_name = theme_item.attrib["name"]
theme_item_data_name = theme_item.attrib["data_type"]
theme_item_id = "{}_{}".format(theme_item_data_name, theme_item_name)
if theme_item_id in class_def.theme_items:
print_error(
'{}.xml: Duplicate theme item "{}" of type "{}".'.format(
class_name, theme_item_name, theme_item_data_name
),
self,
)
continue
default_value = theme_item.get("default") or None
if default_value is not None:
default_value = "``{}``".format(default_value)
theme_item_def = ThemeItemDef(
theme_item_name,
TypeName.from_element(theme_item),
theme_item_data_name,
theme_item.text,
default_value,
)
class_def.theme_items[theme_item_name] = theme_item_def
tutorials = class_root.find("tutorials")
if tutorials is not None:
for link in tutorials:
assert link.tag == "link"
if link.text is not None:
class_def.tutorials.append((link.text.strip(), link.get("title", "")))
self.current_class = ""
def parse_params(self, root: ET.Element, context: str) -> List["ParameterDef"]:
param_elements = root.findall("param")
params: Any = [None] * len(param_elements)
for param_index, param_element in enumerate(param_elements):
param_name = param_element.attrib["name"]
index = int(param_element.attrib["index"])
type_name = TypeName.from_element(param_element)
default = param_element.get("default")
if param_name.strip() == "" or param_name.startswith("_unnamed_arg"):
print_error(
'{}.xml: Empty argument name in {} "{}" at position {}.'.format(
self.current_class, context, root.attrib["name"], param_index
),
self,
)
params[index] = ParameterDef(param_name, type_name, default)
cast: List[ParameterDef] = params
return cast
def sort_classes(self) -> None:
self.classes = OrderedDict(sorted(self.classes.items(), key=lambda t: t[0]))
class TypeName:
def __init__(self, type_name: str, enum: Optional[str] = None) -> None:
self.type_name = type_name
self.enum = enum
def to_rst(self, state: State) -> str:
if self.enum is not None:
return make_enum(self.enum, state)
elif self.type_name == "void":
return "void"
else:
return make_type(self.type_name, state)
@classmethod
def from_element(cls, element: ET.Element) -> "TypeName":
return cls(element.attrib["type"], element.get("enum"))
class DefinitionBase:
def __init__(
self,
definition_name: str,
name: str,
) -> None:
self.definition_name = definition_name
self.name = name
class PropertyDef(DefinitionBase):
def __init__(
self,
name: str,
type_name: TypeName,
setter: Optional[str],
getter: Optional[str],
text: Optional[str],
default_value: Optional[str],
overrides: Optional[str],
) -> None:
super().__init__("property", name)
self.type_name = type_name
self.setter = setter
self.getter = getter
self.text = text
self.default_value = default_value
self.overrides = overrides
class ParameterDef(DefinitionBase):
def __init__(self, name: str, type_name: TypeName, default_value: Optional[str]) -> None:
super().__init__("parameter", name)
self.type_name = type_name
self.default_value = default_value
class SignalDef(DefinitionBase):
def __init__(self, name: str, parameters: List[ParameterDef], description: Optional[str]) -> None:
super().__init__("signal", name)
self.parameters = parameters
self.description = description
class AnnotationDef(DefinitionBase):
def __init__(
self,
name: str,
parameters: List[ParameterDef],
description: Optional[str],
qualifiers: Optional[str],
) -> None:
super().__init__("annotation", name)
self.parameters = parameters
self.description = description
self.qualifiers = qualifiers
class MethodDef(DefinitionBase):
def __init__(
self,
name: str,
return_type: TypeName,
parameters: List[ParameterDef],
description: Optional[str],
qualifiers: Optional[str],
) -> None:
super().__init__("method", name)
self.return_type = return_type
self.parameters = parameters
self.description = description
self.qualifiers = qualifiers
class ConstantDef(DefinitionBase):
def __init__(self, name: str, value: str, text: Optional[str], bitfield: bool) -> None:
super().__init__("constant", name)
self.value = value
self.text = text
self.is_bitfield = bitfield
class EnumDef(DefinitionBase):
def __init__(self, name: str, bitfield: bool) -> None:
super().__init__("enum", name)
self.values: OrderedDict[str, ConstantDef] = OrderedDict()
self.is_bitfield = bitfield
class ThemeItemDef(DefinitionBase):
def __init__(
self, name: str, type_name: TypeName, data_name: str, text: Optional[str], default_value: Optional[str]
) -> None:
super().__init__("theme item", name)
self.type_name = type_name
self.data_name = data_name
self.text = text
self.default_value = default_value
class ClassDef(DefinitionBase):
def __init__(self, name: str) -> None:
super().__init__("class", name)
self.constants: OrderedDict[str, ConstantDef] = OrderedDict()
self.enums: OrderedDict[str, EnumDef] = OrderedDict()
self.properties: OrderedDict[str, PropertyDef] = OrderedDict()
self.constructors: OrderedDict[str, List[MethodDef]] = OrderedDict()
self.methods: OrderedDict[str, List[MethodDef]] = OrderedDict()
self.operators: OrderedDict[str, List[MethodDef]] = OrderedDict()
self.signals: OrderedDict[str, SignalDef] = OrderedDict()
self.annotations: OrderedDict[str, List[AnnotationDef]] = OrderedDict()
self.theme_items: OrderedDict[str, ThemeItemDef] = OrderedDict()
self.inherits: Optional[str] = None
self.brief_description: Optional[str] = None
self.description: Optional[str] = None
self.tutorials: List[Tuple[str, str]] = []
# Used to match the class with XML source for output filtering purposes.
self.filepath: str = ""
# Entry point for the RST generator.
def main() -> None:
# Enable ANSI escape code support on Windows 10 and later (for colored console output).
# <https://bugs.python.org/issue29059>
if platform.system().lower() == "windows":
from ctypes import windll, c_int, byref # type: ignore
stdout_handle = windll.kernel32.GetStdHandle(c_int(-11))
mode = c_int(0)
windll.kernel32.GetConsoleMode(c_int(stdout_handle), byref(mode))
mode = c_int(mode.value | 4)
windll.kernel32.SetConsoleMode(c_int(stdout_handle), mode)
parser = argparse.ArgumentParser()
parser.add_argument("path", nargs="+", help="A path to an XML file or a directory containing XML files to parse.")
parser.add_argument("--filter", default="", help="The filepath pattern for XML files to filter.")
parser.add_argument("--lang", "-l", default="en", help="Language to use for section headings.")
parser.add_argument(
"--color",
action="store_true",
help="If passed, force colored output even if stdout is not a TTY (useful for continuous integration).",
)
group = parser.add_mutually_exclusive_group()
group.add_argument("--output", "-o", default=".", help="The directory to save output .rst files in.")
group.add_argument(
"--dry-run",
action="store_true",
help="If passed, no output will be generated and XML files are only checked for errors.",
)
args = parser.parse_args()
should_color = args.color or (hasattr(sys.stdout, "isatty") and sys.stdout.isatty())
STYLES["red"] = "\x1b[91m" if should_color else ""
STYLES["green"] = "\x1b[92m" if should_color else ""
STYLES["yellow"] = "\x1b[93m" if should_color else ""
STYLES["bold"] = "\x1b[1m" if should_color else ""
STYLES["regular"] = "\x1b[22m" if should_color else ""
STYLES["reset"] = "\x1b[0m" if should_color else ""
# Retrieve heading translations for the given language.
if not args.dry_run and args.lang != "en":
lang_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "..", "translations", "{}.po".format(args.lang)
)
if os.path.exists(lang_file):
try:
import polib
except ImportError:
print("Base template strings localization requires `polib`.")
exit(1)
pofile = polib.pofile(lang_file)
for entry in pofile.translated_entries():
if entry.msgid in BASE_STRINGS:
strings_l10n[entry.msgid] = entry.msgstr
else:
print('No PO file at "{}" for language "{}".'.format(lang_file, args.lang))
print("Checking for errors in the XML class reference...")
file_list: List[str] = []
for path in args.path:
# Cut off trailing slashes so os.path.basename doesn't choke.
if path.endswith("/") or path.endswith("\\"):
path = path[:-1]
if os.path.basename(path) == "modules":
for subdir, dirs, _ in os.walk(path):
if "doc_classes" in dirs:
doc_dir = os.path.join(subdir, "doc_classes")
class_file_names = (f for f in os.listdir(doc_dir) if f.endswith(".xml"))
file_list += (os.path.join(doc_dir, f) for f in class_file_names)
elif os.path.isdir(path):
file_list += (os.path.join(path, f) for f in os.listdir(path) if f.endswith(".xml"))
elif os.path.isfile(path):
if not path.endswith(".xml"):
print('Got non-.xml file "{}" in input, skipping.'.format(path))
continue
file_list.append(path)
classes: Dict[str, Tuple[ET.Element, str]] = {}
state = State()
for cur_file in file_list:
try:
tree = ET.parse(cur_file)
except ET.ParseError as e:
print_error("{}: Parse error while reading the file: {}".format(cur_file, e), state)
continue
doc = tree.getroot()
if "version" not in doc.attrib:
print_error('{}: "version" attribute missing from "doc".'.format(cur_file), state)
continue
name = doc.attrib["name"]
if name in classes:
print_error('{}: Duplicate class "{}".'.format(cur_file, name), state)
continue
classes[name] = (doc, cur_file)
for name, data in classes.items():
try:
state.parse_class(data[0], data[1])
except Exception as e:
print_error("{}.xml: Exception while parsing class: {}".format(name, e), state)
state.sort_classes()
pattern = re.compile(args.filter)
# Create the output folder recursively if it doesn't already exist.
os.makedirs(args.output, exist_ok=True)
print("Generating the RST class reference...")
for class_name, class_def in state.classes.items():
if args.filter and not pattern.search(class_def.filepath):
continue
state.current_class = class_name
make_rst_class(class_def, state, args.dry_run, args.output)
print("")
if state.num_warnings >= 2:
print(
"{}{} warnings were found in the class reference XML. Please check the messages above.{}".format(
STYLES["yellow"], state.num_warnings, STYLES["reset"]
)
)
elif state.num_warnings == 1:
print(
"{}1 warning was found in the class reference XML. Please check the messages above.{}".format(
STYLES["yellow"], STYLES["reset"]
)
)
if state.num_errors == 0:
print("{}No errors found in the class reference XML.{}".format(STYLES["green"], STYLES["reset"]))
if not args.dry_run:
print("Wrote reStructuredText files for each class to: %s" % args.output)
else:
if state.num_errors >= 2:
print(
"{}{} errors were found in the class reference XML. Please check the messages above.{}".format(
STYLES["red"], state.num_errors, STYLES["reset"]
)
)
else:
print(
"{}1 error was found in the class reference XML. Please check the messages above.{}".format(
STYLES["red"], STYLES["reset"]
)
)
exit(1)
# Common helpers.
def print_error(error: str, state: State) -> None:
print("{}{}ERROR:{} {}{}".format(STYLES["red"], STYLES["bold"], STYLES["regular"], error, STYLES["reset"]))
state.num_errors += 1
def print_warning(error: str, state: State) -> None:
print("{}{}WARNING:{} {}{}".format(STYLES["yellow"], STYLES["bold"], STYLES["regular"], error, STYLES["reset"]))
state.num_warnings += 1
def translate(string: str) -> str:
"""Translate a string based on translations sourced from `doc/translations/*.po`
for a language if defined via the --lang command line argument.
Returns the original string if no translation exists.
"""
return strings_l10n.get(string, string)
# Generator methods.
def make_rst_class(class_def: ClassDef, state: State, dry_run: bool, output_dir: str) -> None:
class_name = class_def.name
if dry_run:
f = open(os.devnull, "w", encoding="utf-8")
else:
f = open(os.path.join(output_dir, "class_" + class_name.lower() + ".rst"), "w", encoding="utf-8")
# Remove the "Edit on Github" button from the online docs page.
f.write(":github_url: hide\n\n")
# Warn contributors not to edit this file directly.
# Also provide links to the source files for reference.
git_branch = "master"
if hasattr(version, "docs") and version.docs != "latest":
git_branch = version.docs
source_xml_path = os.path.relpath(class_def.filepath, root_directory).replace("\\", "/")
source_github_url = "https://github.com/godotengine/godot/tree/{}/{}".format(git_branch, source_xml_path)
generator_github_url = "https://github.com/godotengine/godot/tree/{}/doc/tools/make_rst.py".format(git_branch)
f.write(".. DO NOT EDIT THIS FILE!!!\n")
f.write(".. Generated automatically from Godot engine sources.\n")
f.write(".. Generator: " + generator_github_url + ".\n")
f.write(".. XML source: " + source_github_url + ".\n\n")
# Document reference id and header.
f.write(".. _class_" + class_name + ":\n\n")
f.write(make_heading(class_name, "=", False))
# Inheritance tree
# Ascendants
if class_def.inherits:
inherits = class_def.inherits.strip()
f.write("**" + translate("Inherits:") + "** ")
first = True
while inherits in state.classes:
if not first:
f.write(" **<** ")
else:
first = False
f.write(make_type(inherits, state))
inode = state.classes[inherits].inherits
if inode:
inherits = inode.strip()
else:
break
f.write("\n\n")
# Descendants
inherited: List[str] = []
for c in state.classes.values():
if c.inherits and c.inherits.strip() == class_name:
inherited.append(c.name)
if len(inherited):
f.write("**" + translate("Inherited By:") + "** ")
for i, child in enumerate(inherited):
if i > 0:
f.write(", ")
f.write(make_type(child, state))
f.write("\n\n")
# Brief description
if class_def.brief_description is not None:
f.write(format_text_block(class_def.brief_description.strip(), class_def, state) + "\n\n")
# Class description
if class_def.description is not None and class_def.description.strip() != "":
f.write(make_heading("Description", "-"))
f.write(format_text_block(class_def.description.strip(), class_def, state) + "\n\n")
# Online tutorials
if len(class_def.tutorials) > 0:
f.write(make_heading("Tutorials", "-"))
for url, title in class_def.tutorials:
f.write("- " + make_link(url, title) + "\n\n")
# Properties overview
if len(class_def.properties) > 0:
f.write(make_heading("Properties", "-"))
ml: List[Tuple[Optional[str], ...]] = []
for property_def in class_def.properties.values():
type_rst = property_def.type_name.to_rst(state)
default = property_def.default_value
if default is not None and property_def.overrides:
ref = ":ref:`{1}<class_{1}_property_{0}>`".format(property_def.name, property_def.overrides)
# Not using translate() for now as it breaks table formatting.
ml.append((type_rst, property_def.name, default + " " + "(overrides %s)" % ref))
else:
ref = ":ref:`{0}<class_{1}_property_{0}>`".format(property_def.name, class_name)
ml.append((type_rst, ref, default))
format_table(f, ml, True)
# Constructors, Methods, Operators overview
if len(class_def.constructors) > 0:
f.write(make_heading("Constructors", "-"))
ml: List[Tuple[Optional[str], ...]] = []
for method_list in class_def.constructors.values():
for m in method_list:
ml.append(make_method_signature(class_def, m, "constructor", state))
format_table(f, ml)
if len(class_def.methods) > 0:
f.write(make_heading("Methods", "-"))
ml: List[Tuple[Optional[str], ...]] = []
for method_list in class_def.methods.values():
for m in method_list:
ml.append(make_method_signature(class_def, m, "method", state))
format_table(f, ml)
if len(class_def.operators) > 0:
f.write(make_heading("Operators", "-"))
ml: List[Tuple[Optional[str], ...]] = []
for method_list in class_def.operators.values():
for m in method_list:
ml.append(make_method_signature(class_def, m, "operator", state))
format_table(f, ml)
# Theme properties
if len(class_def.theme_items) > 0:
f.write(make_heading("Theme Properties", "-"))
pl: List[Tuple[Optional[str], ...]] = []
for theme_item_def in class_def.theme_items.values():
ref = ":ref:`{0}<class_{2}_theme_{1}_{0}>`".format(
theme_item_def.name, theme_item_def.data_name, class_name
)
pl.append((theme_item_def.type_name.to_rst(state), ref, theme_item_def.default_value))
format_table(f, pl, True)
# Signals
if len(class_def.signals) > 0:
f.write(make_heading("Signals", "-"))
index = 0
for signal in class_def.signals.values():
if index != 0:
f.write("----\n\n")
f.write(".. _class_{}_signal_{}:\n\n".format(class_name, signal.name))
_, signature = make_method_signature(class_def, signal, "", state)
f.write("- {}\n\n".format(signature))
if signal.description is not None and signal.description.strip() != "":
f.write(format_text_block(signal.description.strip(), signal, state) + "\n\n")
index += 1
# Enums
if len(class_def.enums) > 0:
f.write(make_heading("Enumerations", "-"))
index = 0
for e in class_def.enums.values():
if index != 0:
f.write("----\n\n")
f.write(".. _enum_{}_{}:\n\n".format(class_name, e.name))
# Sphinx seems to divide the bullet list into individual <ul> tags if we weave the labels into it.
# As such I'll put them all above the list. Won't be perfect but better than making the list visually broken.
# As to why I'm not modifying the reference parser to directly link to the _enum label:
# If somebody gets annoyed enough to fix it, all existing references will magically improve.
for value in e.values.values():
f.write(".. _class_{}_constant_{}:\n\n".format(class_name, value.name))
if e.is_bitfield:
f.write("flags **{}**:\n\n".format(e.name))
else:
f.write("enum **{}**:\n\n".format(e.name))
for value in e.values.values():
f.write("- **{}** = **{}**".format(value.name, value.value))
if value.text is not None and value.text.strip() != "":
# If value.text contains a bullet point list, each entry needs additional indentation
f.write(" --- " + indent_bullets(format_text_block(value.text.strip(), value, state)))
f.write("\n\n")
index += 1
# Constants
if len(class_def.constants) > 0:
f.write(make_heading("Constants", "-"))
# Sphinx seems to divide the bullet list into individual <ul> tags if we weave the labels into it.
# As such I'll put them all above the list. Won't be perfect but better than making the list visually broken.
for constant in class_def.constants.values():
f.write(".. _class_{}_constant_{}:\n\n".format(class_name, constant.name))
for constant in class_def.constants.values():
f.write("- **{}** = **{}**".format(constant.name, constant.value))
if constant.text is not None and constant.text.strip() != "":
f.write(" --- " + format_text_block(constant.text.strip(), constant, state))
f.write("\n\n")
# Annotations
if len(class_def.annotations) > 0:
f.write(make_heading("Annotations", "-"))
index = 0
for method_list in class_def.annotations.values():
for i, m in enumerate(method_list):
if index != 0:
f.write("----\n\n")
if i == 0:
f.write(".. _class_{}_annotation_{}:\n\n".format(class_name, m.name.strip("@")))
_, signature = make_method_signature(class_def, m, "", state)
f.write("- {}\n\n".format(signature))
if m.description is not None and m.description.strip() != "":
f.write(format_text_block(m.description.strip(), m, state) + "\n\n")
index += 1
# Property descriptions
if any(not p.overrides for p in class_def.properties.values()) > 0:
f.write(make_heading("Property Descriptions", "-"))
index = 0
for property_def in class_def.properties.values():
if property_def.overrides:
continue
if index != 0:
f.write("----\n\n")
f.write(".. _class_{}_property_{}:\n\n".format(class_name, property_def.name))
f.write("- {} **{}**\n\n".format(property_def.type_name.to_rst(state), property_def.name))
info: List[Tuple[Optional[str], ...]] = []
# Not using translate() for now as it breaks table formatting.
if property_def.default_value is not None:
info.append(("*" + "Default" + "*", property_def.default_value))
if property_def.setter is not None and not property_def.setter.startswith("_"):
info.append(("*" + "Setter" + "*", property_def.setter + "(" + "value" + ")"))
if property_def.getter is not None and not property_def.getter.startswith("_"):
info.append(("*" + "Getter" + "*", property_def.getter + "()"))
if len(info) > 0:
format_table(f, info)
if property_def.text is not None and property_def.text.strip() != "":
f.write(format_text_block(property_def.text.strip(), property_def, state) + "\n\n")
index += 1
# Constructor, Method, Operator descriptions
if len(class_def.constructors) > 0:
f.write(make_heading("Constructor Descriptions", "-"))
index = 0
for method_list in class_def.constructors.values():
for i, m in enumerate(method_list):
if index != 0:
f.write("----\n\n")
if i == 0:
f.write(".. _class_{}_constructor_{}:\n\n".format(class_name, m.name))
ret_type, signature = make_method_signature(class_def, m, "", state)
f.write("- {} {}\n\n".format(ret_type, signature))
if m.description is not None and m.description.strip() != "":
f.write(format_text_block(m.description.strip(), m, state) + "\n\n")
index += 1
if len(class_def.methods) > 0:
f.write(make_heading("Method Descriptions", "-"))
index = 0
for method_list in class_def.methods.values():
for i, m in enumerate(method_list):
if index != 0:
f.write("----\n\n")
if i == 0:
f.write(".. _class_{}_method_{}:\n\n".format(class_name, m.name))
ret_type, signature = make_method_signature(class_def, m, "", state)
f.write("- {} {}\n\n".format(ret_type, signature))
if m.description is not None and m.description.strip() != "":
f.write(format_text_block(m.description.strip(), m, state) + "\n\n")
index += 1
if len(class_def.operators) > 0:
f.write(make_heading("Operator Descriptions", "-"))
index = 0
for method_list in class_def.operators.values():
for i, m in enumerate(method_list):
if index != 0:
f.write("----\n\n")
if i == 0:
f.write(
".. _class_{}_operator_{}_{}:\n\n".format(
class_name, sanitize_operator_name(m.name, state), m.return_type.type_name
)
)
ret_type, signature = make_method_signature(class_def, m, "", state)
f.write("- {} {}\n\n".format(ret_type, signature))
if m.description is not None and m.description.strip() != "":
f.write(format_text_block(m.description.strip(), m, state) + "\n\n")
index += 1
# Theme property descriptions
if len(class_def.theme_items) > 0:
f.write(make_heading("Theme Property Descriptions", "-"))
index = 0
for theme_item_def in class_def.theme_items.values():
if index != 0:
f.write("----\n\n")
f.write(".. _class_{}_theme_{}_{}:\n\n".format(class_name, theme_item_def.data_name, theme_item_def.name))
f.write("- {} **{}**\n\n".format(theme_item_def.type_name.to_rst(state), theme_item_def.name))
info = []
if theme_item_def.default_value is not None:
# Not using translate() for now as it breaks table formatting.
info.append(("*" + "Default" + "*", theme_item_def.default_value))
if len(info) > 0:
format_table(f, info)
if theme_item_def.text is not None and theme_item_def.text.strip() != "":
f.write(format_text_block(theme_item_def.text.strip(), theme_item_def, state) + "\n\n")
index += 1
f.write(make_footer())
def make_type(klass: str, state: State) -> str:
if klass.find("*") != -1: # Pointer, ignore
return klass
link_type = klass
if link_type.endswith("[]"): # Typed array, strip [] to link to contained type.
link_type = link_type[:-2]
if link_type in state.classes:
return ":ref:`{}<class_{}>`".format(klass, link_type)
print_error('{}.xml: Unresolved type "{}".'.format(state.current_class, klass), state)
return klass
def make_enum(t: str, state: State) -> str:
p = t.find(".")
if p >= 0:
c = t[0:p]
e = t[p + 1 :]
# Variant enums live in GlobalScope but still use periods.
if c == "Variant":
c = "@GlobalScope"
e = "Variant." + e
else:
c = state.current_class
e = t
if c in state.classes and e not in state.classes[c].enums:
c = "@GlobalScope"
if c in state.classes and e in state.classes[c].enums:
return ":ref:`{0}<enum_{1}_{0}>`".format(e, c)
# Don't fail for `Vector3.Axis`, as this enum is a special case which is expected not to be resolved.
if "{}.{}".format(c, e) != "Vector3.Axis":
print_error('{}.xml: Unresolved enum "{}".'.format(state.current_class, t), state)
return t
def make_method_signature(
class_def: ClassDef, definition: Union[AnnotationDef, MethodDef, SignalDef], ref_type: str, state: State
) -> Tuple[str, str]:
ret_type = ""
is_method_def = isinstance(definition, MethodDef)
if is_method_def:
ret_type = definition.return_type.to_rst(state)
qualifiers = None
if is_method_def or isinstance(definition, AnnotationDef):
qualifiers = definition.qualifiers
out = ""
if is_method_def and ref_type != "":
if ref_type == "operator":
out += ":ref:`{0}<class_{1}_{2}_{3}_{4}>` ".format(
definition.name.replace("<", "\\<"), # So operator "<" gets correctly displayed.
class_def.name,
ref_type,
sanitize_operator_name(definition.name, state),
definition.return_type.type_name,
)
else:
out += ":ref:`{0}<class_{1}_{2}_{0}>` ".format(definition.name, class_def.name, ref_type)
else:
out += "**{}** ".format(definition.name)
out += "**(**"
for i, arg in enumerate(definition.parameters):
if i > 0:
out += ", "
else:
out += " "
out += "{} {}".format(arg.type_name.to_rst(state), arg.name)
if arg.default_value is not None:
out += "=" + arg.default_value
if qualifiers is not None and "vararg" in qualifiers:
if len(definition.parameters) > 0:
out += ", ..."
else:
out += " ..."
out += " **)**"
if qualifiers is not None:
# Use substitutions for abbreviations. This is used to display tooltips on hover.
# See `make_footer()` for descriptions.
for qualifier in qualifiers.split():
out += " |" + qualifier + "|"
return ret_type, out
def make_heading(title: str, underline: str, l10n: bool = True) -> str:
if l10n:
new_title = translate(title)
if new_title != title:
title = new_title
underline *= 2 # Double length to handle wide chars.
return title + "\n" + (underline * len(title)) + "\n\n"
def make_footer() -> str:
# Generate reusable abbreviation substitutions.
# This way, we avoid bloating the generated rST with duplicate abbreviations.
# fmt: off
return (
".. |virtual| replace:: :abbr:`virtual (" + translate("This method should typically be overridden by the user to have any effect.") + ")`\n"
".. |const| replace:: :abbr:`const (" + translate("This method has no side effects. It doesn't modify any of the instance's member variables.") + ")`\n"
".. |vararg| replace:: :abbr:`vararg (" + translate("This method accepts any number of arguments after the ones described here.") + ")`\n"
".. |constructor| replace:: :abbr:`constructor (" + translate("This method is used to construct a type.") + ")`\n"
".. |static| replace:: :abbr:`static (" + translate("This method doesn't need an instance to be called, so it can be called directly using the class name.") + ")`\n"
".. |operator| replace:: :abbr:`operator (" + translate("This method describes a valid operator to use with this type as left-hand operand.") + ")`\n"
)
# fmt: on
def make_link(url: str, title: str) -> str:
match = GODOT_DOCS_PATTERN.search(url)
if match:
groups = match.groups()
if match.lastindex == 2:
# Doc reference with fragment identifier: emit direct link to section with reference to page, for example:
# `#calling-javascript-from-script in Exporting For Web`
# Or use the title if provided.
if title != "":
return "`" + title + " <../" + groups[0] + ".html" + groups[1] + ">`__"
return "`" + groups[1] + " <../" + groups[0] + ".html" + groups[1] + ">`__ in :doc:`../" + groups[0] + "`"
elif match.lastindex == 1:
# Doc reference, for example:
# `Math`
if title != "":
return ":doc:`" + title + " <../" + groups[0] + ">`"
return ":doc:`../" + groups[0] + "`"
# External link, for example:
# `http://enet.bespin.org/usergroup0.html`
if title != "":
return "`" + title + " <" + url + ">`__"
return "`" + url + " <" + url + ">`__"
# Formatting helpers.
RESERVED_FORMATTING_TAGS = ["i", "b", "u", "code", "kbd", "center", "url", "br"]
RESERVED_CODEBLOCK_TAGS = ["codeblocks", "codeblock", "gdscript", "csharp"]
RESERVED_CROSSLINK_TAGS = ["method", "member", "signal", "constant", "enum", "annotation", "theme_item", "param"]
def is_in_tagset(tag_text: str, tagset: List[str]) -> bool:
for tag in tagset:
# Complete match.
if tag_text == tag:
return True
# Tag with arguments.
if tag_text.startswith(tag + " "):
return True
# Tag with arguments, special case for [url].
if tag_text.startswith(tag + "="):
return True
return False
def format_text_block(
text: str,
context: Union[DefinitionBase, None],
state: State,
) -> str:
# Linebreak + tabs in the XML should become two line breaks unless in a "codeblock"
pos = 0
while True:
pos = text.find("\n", pos)
if pos == -1:
break
pre_text = text[:pos]
indent_level = 0
while pos + 1 < len(text) and text[pos + 1] == "\t":
pos += 1
indent_level += 1
post_text = text[pos + 1 :]
# Handle codeblocks
if (
post_text.startswith("[codeblock]")
or post_text.startswith("[gdscript]")
or post_text.startswith("[csharp]")
):
block_type = post_text[1:].split("]")[0]
result = format_codeblock(block_type, post_text, indent_level, state)
if result is None:
return ""
text = pre_text + result[0]
pos += result[1] - indent_level
# Handle normal text
else:
text = pre_text + "\n\n" + post_text
pos += 2 - indent_level
next_brac_pos = text.find("[")
text = escape_rst(text, next_brac_pos)
context_name = format_context_name(context)
# Handle [tags]
inside_code = False
inside_code_tag = ""
inside_code_tabs = False
pos = 0
tag_depth = 0
while True:
pos = text.find("[", pos)
if pos == -1:
break
endq_pos = text.find("]", pos + 1)
if endq_pos == -1:
break
pre_text = text[:pos]
post_text = text[endq_pos + 1 :]
tag_text = text[pos + 1 : endq_pos]
escape_pre = False
escape_post = False
# Tag is a reference to a class.
if tag_text in state.classes:
if tag_text == state.current_class:
# Don't create a link to the same class, format it as inline code.
tag_text = "``{}``".format(tag_text)
else:
tag_text = make_type(tag_text, state)
escape_pre = True
escape_post = True
# Tag is a cross-reference or a formating directive.
else:
cmd = tag_text
space_pos = tag_text.find(" ")
# Anything identified as a tag inside of a code block is valid,
# unless it's a matching closing tag.
if inside_code:
# Exiting codeblocks and inline code tags.
if inside_code_tag == cmd[1:]:
if cmd == "/codeblock" or cmd == "/gdscript" or cmd == "/csharp":
tag_text = ""
tag_depth -= 1
inside_code = False
# Strip newline if the tag was alone on one
if pre_text[-1] == "\n":
pre_text = pre_text[:-1]
elif cmd == "/code":
tag_text = "``"
tag_depth -= 1
inside_code = False
escape_post = True
else:
if cmd.startswith("/"):
print_warning(
'{}.xml: Potential error inside of a code tag, found a string that looks like a closing tag "[{}]" in {}.'.format(
state.current_class, cmd, context_name
),
state,
)
tag_text = "[" + tag_text + "]"
# Entering codeblocks and inline code tags.
elif cmd == "codeblocks":
tag_depth += 1
tag_text = "\n.. tabs::"
inside_code_tabs = True
elif cmd == "/codeblocks":
tag_depth -= 1
tag_text = ""
inside_code_tabs = False
elif cmd == "codeblock" or cmd == "gdscript" or cmd == "csharp":
tag_depth += 1
if cmd == "gdscript":
if not inside_code_tabs:
print_error(
"{}.xml: GDScript code block is used outside of [codeblocks] in {}.".format(
state.current_class, cmd, context_name
),
state,
)
tag_text = "\n .. code-tab:: gdscript\n"
elif cmd == "csharp":
if not inside_code_tabs:
print_error(
"{}.xml: C# code block is used outside of [codeblocks] in {}.".format(
state.current_class, cmd, context_name
),
state,
)
tag_text = "\n .. code-tab:: csharp\n"
else:
tag_text = "\n::\n"
inside_code = True
inside_code_tag = cmd
elif cmd == "code":
tag_text = "``"
tag_depth += 1
inside_code = True
inside_code_tag = cmd
escape_pre = True
# Cross-references to items in this or other class documentation pages.
elif is_in_tagset(cmd, RESERVED_CROSSLINK_TAGS):
link_target: str = ""
if space_pos >= 0:
link_target = tag_text[space_pos + 1 :].strip()
if link_target == "":
print_error(
'{}.xml: Empty cross-reference link "{}" in {}.'.format(state.current_class, cmd, context_name),
state,
)
tag_text = ""
else:
if (
cmd.startswith("method")
or cmd.startswith("member")
or cmd.startswith("signal")
or cmd.startswith("constant")
or cmd.startswith("annotation")
or cmd.startswith("theme_item")
):
if link_target.find(".") != -1:
ss = link_target.split(".")
if len(ss) > 2:
print_error(
'{}.xml: Bad reference "{}" in {}.'.format(
state.current_class, link_target, context_name
),
state,
)
class_param, method_param = ss
else:
class_param = state.current_class
method_param = link_target
ref_type = ""
if class_param in state.classes:
class_def = state.classes[class_param]
if cmd.startswith("constructor"):
if method_param not in class_def.constructors:
print_error(
'{}.xml: Unresolved constructor reference "{}" in {}.'.format(
state.current_class, link_target, context_name
),
state,
)
ref_type = "_constructor"
elif cmd.startswith("method"):
if method_param not in class_def.methods:
print_error(
'{}.xml: Unresolved method reference "{}" in {}.'.format(
state.current_class, link_target, context_name
),
state,
)
ref_type = "_method"
elif cmd.startswith("operator"):
if method_param not in class_def.operators:
print_error(
'{}.xml: Unresolved operator reference "{}" in {}.'.format(
state.current_class, link_target, context_name
),
state,
)
ref_type = "_operator"
elif cmd.startswith("member"):
if method_param not in class_def.properties:
print_error(
'{}.xml: Unresolved member reference "{}" in {}.'.format(
state.current_class, link_target, context_name
),
state,
)
ref_type = "_property"
elif cmd.startswith("theme_item"):
if method_param not in class_def.theme_items:
print_error(
'{}.xml: Unresolved theme item reference "{}" in {}.'.format(
state.current_class, link_target, context_name
),
state,
)
ref_type = "_theme_{}".format(class_def.theme_items[method_param].data_name)
elif cmd.startswith("signal"):
if method_param not in class_def.signals:
print_error(
'{}.xml: Unresolved signal reference "{}" in {}.'.format(
state.current_class, link_target, context_name
),
state,
)
ref_type = "_signal"
elif cmd.startswith("annotation"):
if method_param not in class_def.annotations:
print_error(
'{}.xml: Unresolved annotation reference "{}" in {}.'.format(
state.current_class, link_target, context_name
),
state,
)
ref_type = "_annotation"
elif cmd.startswith("constant"):
found = False
# Search in the current class
search_class_defs = [class_def]
if link_target.find(".") == -1:
# Also search in @GlobalScope as a last resort if no class was specified
search_class_defs.append(state.classes["@GlobalScope"])
for search_class_def in search_class_defs:
if method_param in search_class_def.constants:
class_param = search_class_def.name
found = True
else:
for enum in search_class_def.enums.values():
if method_param in enum.values:
class_param = search_class_def.name
found = True
break
if not found:
print_error(
'{}.xml: Unresolved constant reference "{}" in {}.'.format(
state.current_class, link_target, context_name
),
state,
)
ref_type = "_constant"
else:
print_error(
'{}.xml: Unresolved type reference "{}" in method reference "{}" in {}.'.format(
state.current_class, class_param, link_target, context_name
),
state,
)
repl_text = method_param
if class_param != state.current_class:
repl_text = "{}.{}".format(class_param, method_param)
tag_text = ":ref:`{}<class_{}{}_{}>`".format(repl_text, class_param, ref_type, method_param)
escape_pre = True
escape_post = True
elif cmd.startswith("enum"):
tag_text = make_enum(link_target, state)
escape_pre = True
escape_post = True
elif cmd.startswith("param"):
valid_context = (
isinstance(context, MethodDef)
or isinstance(context, SignalDef)
or isinstance(context, AnnotationDef)
)
if not valid_context:
print_error(
'{}.xml: Argument reference "{}" used outside of method, signal, or annotation context in {}.'.format(
state.current_class, link_target, context_name
),
state,
)
else:
context_params: List[ParameterDef] = context.parameters
found = False
for param_def in context_params:
if param_def.name == link_target:
found = True
break
if not found:
print_error(
'{}.xml: Unresolved argument reference "{}" in {}.'.format(
state.current_class, link_target, context_name
),
state,
)
tag_text = "``{}``".format(link_target)
# Formatting directives.
elif is_in_tagset(cmd, ["url"]):
if cmd.startswith("url="):
# URLs are handled in full here as we need to extract the optional link
# title to use `make_link`.
link_url = cmd[4:]
endurl_pos = text.find("[/url]", endq_pos + 1)
if endurl_pos == -1:
print_error(
"{}.xml: Tag depth mismatch for [url]: no closing [/url] in {}.".format(
state.current_class, context_name
),
state,
)
break
link_title = text[endq_pos + 1 : endurl_pos]
tag_text = make_link(link_url, link_title)
pre_text = text[:pos]
post_text = text[endurl_pos + 6 :]
if pre_text and pre_text[-1] not in MARKUP_ALLOWED_PRECEDENT:
pre_text += "\ "
if post_text and post_text[0] not in MARKUP_ALLOWED_SUBSEQUENT:
post_text = "\ " + post_text
text = pre_text + tag_text + post_text
pos = len(pre_text) + len(tag_text)
continue
else:
print_error(
'{}.xml: Misformatted [url] tag "{}" in {}.'.format(state.current_class, cmd, context_name),
state,
)
elif cmd == "br":
# Make a new paragraph instead of a linebreak, rst is not so linebreak friendly
tag_text = "\n\n"
# Strip potential leading spaces
while post_text[0] == " ":
post_text = post_text[1:]
elif cmd == "center" or cmd == "/center":
if cmd == "/center":
tag_depth -= 1
else:
tag_depth += 1
tag_text = ""
elif cmd == "i" or cmd == "/i":
if cmd == "/i":
tag_depth -= 1
escape_post = True
else:
tag_depth += 1
escape_pre = True
tag_text = "*"
elif cmd == "b" or cmd == "/b":
if cmd == "/b":
tag_depth -= 1
escape_post = True
else:
tag_depth += 1
escape_pre = True
tag_text = "**"
elif cmd == "u" or cmd == "/u":
if cmd == "/u":
tag_depth -= 1
escape_post = True
else:
tag_depth += 1
escape_pre = True
tag_text = ""
elif cmd == "kbd" or cmd == "/kbd":
tag_text = "`"
if cmd == "/kbd":
tag_depth -= 1
escape_post = True
else:
tag_text = ":kbd:" + tag_text
tag_depth += 1
escape_pre = True
# Invalid syntax checks.
elif cmd.startswith("/"):
print_error(
'{}.xml: Unrecognized closing tag "{}" in {}.'.format(state.current_class, cmd, context_name), state
)
tag_text = "[" + tag_text + "]"
else:
print_error(
'{}.xml: Unrecognized opening tag "{}" in {}.'.format(state.current_class, cmd, context_name), state
)
tag_text = "``{}``".format(tag_text)
escape_pre = True
escape_post = True
# Properly escape things like `[Node]s`
if escape_pre and pre_text and pre_text[-1] not in MARKUP_ALLOWED_PRECEDENT:
pre_text += "\ "
if escape_post and post_text and post_text[0] not in MARKUP_ALLOWED_SUBSEQUENT:
post_text = "\ " + post_text
next_brac_pos = post_text.find("[", 0)
iter_pos = 0
while not inside_code:
iter_pos = post_text.find("*", iter_pos, next_brac_pos)
if iter_pos == -1:
break
post_text = post_text[:iter_pos] + "\*" + post_text[iter_pos + 1 :]
iter_pos += 2
iter_pos = 0
while not inside_code:
iter_pos = post_text.find("_", iter_pos, next_brac_pos)
if iter_pos == -1:
break
if not post_text[iter_pos + 1].isalnum(): # don't escape within a snake_case word
post_text = post_text[:iter_pos] + "\_" + post_text[iter_pos + 1 :]
iter_pos += 2
else:
iter_pos += 1
text = pre_text + tag_text + post_text
pos = len(pre_text) + len(tag_text)
if tag_depth > 0:
print_error(
"{}.xml: Tag depth mismatch: too many (or too little) open/close tags in {}.".format(
state.current_class, context_name
),
state,
)
return text
def format_context_name(context: Union[DefinitionBase, None]) -> str:
context_name: str = "unknown context"
if context is not None:
context_name = '{} "{}" description'.format(context.definition_name, context.name)
return context_name
def escape_rst(text: str, until_pos: int = -1) -> str:
# Escape \ character, otherwise it ends up as an escape character in rst
pos = 0
while True:
pos = text.find("\\", pos, until_pos)
if pos == -1:
break
text = text[:pos] + "\\\\" + text[pos + 1 :]
pos += 2
# Escape * character to avoid interpreting it as emphasis
pos = 0
while True:
pos = text.find("*", pos, until_pos)
if pos == -1:
break
text = text[:pos] + "\*" + text[pos + 1 :]
pos += 2
# Escape _ character at the end of a word to avoid interpreting it as an inline hyperlink
pos = 0
while True:
pos = text.find("_", pos, until_pos)
if pos == -1:
break
if not text[pos + 1].isalnum(): # don't escape within a snake_case word
text = text[:pos] + "\_" + text[pos + 1 :]
pos += 2
else:
pos += 1
return text
def format_codeblock(code_type: str, post_text: str, indent_level: int, state: State) -> Union[Tuple[str, int], None]:
end_pos = post_text.find("[/" + code_type + "]")
if end_pos == -1:
print_error("{}.xml: [" + code_type + "] without a closing tag.".format(state.current_class), state)
return None
code_text = post_text[len("[" + code_type + "]") : end_pos]
post_text = post_text[end_pos:]
# Remove extraneous tabs
code_pos = 0
while True:
code_pos = code_text.find("\n", code_pos)
if code_pos == -1:
break
to_skip = 0
while code_pos + to_skip + 1 < len(code_text) and code_text[code_pos + to_skip + 1] == "\t":
to_skip += 1
if to_skip > indent_level:
print_error(
"{}.xml: Four spaces should be used for indentation within [{}].".format(
state.current_class, code_type
),
state,
)
if len(code_text[code_pos + to_skip + 1 :]) == 0:
code_text = code_text[:code_pos] + "\n"
code_pos += 1
else:
code_text = code_text[:code_pos] + "\n " + code_text[code_pos + to_skip + 1 :]
code_pos += 5 - to_skip
return ("\n[" + code_type + "]" + code_text + post_text, len("\n[" + code_type + "]" + code_text))
def format_table(f: TextIO, data: List[Tuple[Optional[str], ...]], remove_empty_columns: bool = False) -> None:
if len(data) == 0:
return
column_sizes = [0] * len(data[0])
for row in data:
for i, text in enumerate(row):
text_length = len(text or "")
if text_length > column_sizes[i]:
column_sizes[i] = text_length
sep = ""
for size in column_sizes:
if size == 0 and remove_empty_columns:
continue
sep += "+" + "-" * (size + 2)
sep += "+\n"
f.write(sep)
for row in data:
row_text = "|"
for i, text in enumerate(row):
if column_sizes[i] == 0 and remove_empty_columns:
continue
row_text += " " + (text or "").ljust(column_sizes[i]) + " |"
row_text += "\n"
f.write(row_text)
f.write(sep)
f.write("\n")
def sanitize_operator_name(dirty_name: str, state: State) -> str:
clear_name = dirty_name.replace("operator ", "")
if clear_name == "!=":
clear_name = "neq"
elif clear_name == "==":
clear_name = "eq"
elif clear_name == "<":
clear_name = "lt"
elif clear_name == "<=":
clear_name = "lte"
elif clear_name == ">":
clear_name = "gt"
elif clear_name == ">=":
clear_name = "gte"
elif clear_name == "+":
clear_name = "sum"
elif clear_name == "-":
clear_name = "dif"
elif clear_name == "*":
clear_name = "mul"
elif clear_name == "/":
clear_name = "div"
elif clear_name == "%":
clear_name = "mod"
elif clear_name == "**":
clear_name = "pow"
elif clear_name == "unary+":
clear_name = "unplus"
elif clear_name == "unary-":
clear_name = "unminus"
elif clear_name == "<<":
clear_name = "bwsl"
elif clear_name == ">>":
clear_name = "bwsr"
elif clear_name == "&":
clear_name = "bwand"
elif clear_name == "|":
clear_name = "bwor"
elif clear_name == "^":
clear_name = "bwxor"
elif clear_name == "~":
clear_name = "bwnot"
elif clear_name == "[]":
clear_name = "idx"
else:
clear_name = "xxx"
print_error('Unsupported operator type "{}", please add the missing rule.'.format(dirty_name), state)
return clear_name
def indent_bullets(text: str) -> str:
# Take the text and check each line for a bullet point represented by "-".
# Where found, indent the given line by a further "\t".
# Used to properly indent bullet points contained in the description for enum values.
# Ignore the first line - text will be prepended to it so bullet points wouldn't work anyway.
bullet_points = "-"
lines = text.splitlines(keepends=True)
for line_index, line in enumerate(lines[1:], start=1):
pos = 0
while pos < len(line) and line[pos] == "\t":
pos += 1
if pos < len(line) and line[pos] in bullet_points:
lines[line_index] = line[:pos] + "\t" + line[pos:]
return "".join(lines)
if __name__ == "__main__":
main()
| {
"content_hash": "6fee65e1439e662bf41a510e22ed7d50",
"timestamp": "",
"source": "github",
"line_count": 1856,
"max_line_length": 173,
"avg_line_length": 38.524245689655174,
"alnum_prop": 0.5028600998587432,
"repo_name": "akien-mga/godot",
"id": "519554e02606befff3129d82a6d7d00542279488",
"size": "71615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/tools/make_rst.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AIDL",
"bytes": "1633"
},
{
"name": "C",
"bytes": "1045182"
},
{
"name": "C#",
"bytes": "1578818"
},
{
"name": "C++",
"bytes": "38595824"
},
{
"name": "CMake",
"bytes": "606"
},
{
"name": "GAP",
"bytes": "62"
},
{
"name": "GDScript",
"bytes": "66177"
},
{
"name": "GLSL",
"bytes": "836566"
},
{
"name": "Java",
"bytes": "596743"
},
{
"name": "JavaScript",
"bytes": "188454"
},
{
"name": "Kotlin",
"bytes": "84152"
},
{
"name": "Makefile",
"bytes": "1421"
},
{
"name": "Objective-C",
"bytes": "20550"
},
{
"name": "Objective-C++",
"bytes": "371842"
},
{
"name": "PowerShell",
"bytes": "2713"
},
{
"name": "Python",
"bytes": "464605"
},
{
"name": "Shell",
"bytes": "31057"
}
],
"symlink_target": ""
} |
from django.conf import settings
TAG_TYPES = ('function', 'comparison', 'filter', 'block')
LIBRARY = getattr(settings, 'NATIVE_LIBRARY', {})
TAGS = getattr(settings, 'NATIVE_TAGS', (
'native_tags.contrib.comparison',
'native_tags.contrib.generic_content',
'native_tags.contrib.generic_markup',
'native_tags.contrib.feeds', # Feedparser
))
BUILTIN_TAGS = getattr(settings, 'DJANGO_BUILTIN_TAGS', ())
DEFAULT_CACHE_TIMEOUT = getattr(settings, 'NATIVE_DEFAULT_CACHE_TIMEOUT', None) | {
"content_hash": "736a48e974805f5dd5dd9c4c1befa1ef",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 79,
"avg_line_length": 31.1875,
"alnum_prop": 0.7114228456913828,
"repo_name": "justquick/django-native-tags",
"id": "01d931e44c3b2e3991d44af89b1e792f0936b7a7",
"size": "499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "native_tags/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "90620"
},
{
"name": "Shell",
"bytes": "3090"
}
],
"symlink_target": ""
} |
"""
A genetic algorithm to find optimal seating arrangements.
usage: seatingchart.py [-h] [--data-file DATA_FILE]
[--population-size POPULATION_SIZE]
[--iterations MAX_ITERATIONS]
[--mutation MUTATION_PROB] [--crossover CROSSOVER_PROB]
[--seed SEED] [--crossover-operator CROSSOVER_OPERATOR]
[--stats-file STATS_FILE]
[--population-file POPULATION_FILE] [--verbose]
[--elitism ELITISM] [--generate] [--roles ROLES]
[--people PEOPLE] [--width WIDTH] [--height HEIGHT]
[--attraction ATTRACTION] [--repulsion REPULSION]
"""
from __future__ import print_function
from __future__ import absolute_import
#from builtins import str
#from builtins import range
import math
import random
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import levis
from levis import (configuration, crossover, mutation)
from levis.util import spatial
# pylint: disable=too-many-instance-attributes, abstract-method
class SeatingChartGA(
levis.ElitistGA,
levis.ScalingProportionateGA,
levis.FitnessLoggingGA,
levis.ConfigurableCrossoverGA
):
"""Genetic solution to simple seating chart assignments."""
EMPTY_ROLE = "-"
"""The role ID for an empty seat"""
def __init__(self, config={}):
"""Initialize a genetic algorithm for the Seating Chart Problem."""
super(self.__class__, self).__init__(config)
self.people = self.config.setdefault("people", {})
self.roles = self.config.setdefault("roles", [])
self.width = self.config.setdefault("width", 7)
self.height = self.config.setdefault("height", 7)
self.repulsion = self.config.setdefault("repulsion", {})
self.prev_best = None
self.render_best = False
if "svg_prefix" in self.config:
self.render_best = True
self.svg_prefix = self.config["svg_prefix"]
self.map_type = self.config.setdefault("map_type", "naive")
if self.map_type == "hilbert":
self.map = spatial.HilbertMap(self.width, self.height)
else:
self.map = spatial.NaiveMap(self.width, self.height)
for x in range(0, self.width):
for y in range(0, self.height):
self.map.add(x, y)
self.max_distance = 0.0
worst_len = spatial.euclidian(0, 0, self.width, self.height)
assignments = {}
for _, person in self.people.items():
if person[1] != SeatingChartGA.EMPTY_ROLE:
if person[1] in assignments:
assignments[person[1]] += 1
else:
assignments[person[1]] = 1
for role, count in assignments.items():
edges = (count * (count - 1))
self.max_distance += edges * worst_len
@classmethod
def arg_parser(cls):
parser = super(SeatingChartGA, cls).arg_parser()
parser.add_argument("--map-type", choices=["naive", "hilbert"],
default="naive",
help="Map vectorization strategy")
parser.add_argument("--svg-prefix",
help="Prefix for SVG renderings of the fittest "
"chromosomes from each generation. Requires that "
"the svgwrite package be installed.")
return parser
def seats_by_role(self, chromosome):
"""Return a dict of ``role -> [seat id]``."""
seats_role = {}
for locus, allele in enumerate(chromosome):
person = self.people[allele]
if person[1] != SeatingChartGA.EMPTY_ROLE:
if person[1] not in seats_role:
seats_role[person[1]] = []
seats_role[person[1]].append(self.map.point_at(locus))
return seats_role
def score(self, chromosome):
seats_role = self.seats_by_role(chromosome)
# Tally attractive score
attraction = 0.0
for role, coords in seats_role.items():
attraction += spatial.total_edge_length(coords)
# Tally repulsive score
repulsion = 0.0
for role, repulsors in self.repulsion.items():
repulsed = seats_role[role]
for repulsor in repulsors:
repulsees = seats_role[repulsor]
repulsion += spatial.length_to_nearest(repulsed, repulsees)
return self.max_distance - attraction + repulsion
def create(self):
people = list(self.people.keys())
random.shuffle(people)
return people
def mutate(self, chromosome):
return levis.mutation.swap(chromosome, self.mutation_prob)
def chromosome_str(self, chromosome):
w = int(math.floor(math.log(len(self.roles)))) + 1
txt = str(self.score(chromosome)) + "\n"
for y in range(0, self.height):
for x in range(0, self.width):
locus = self.map.index(x, y)
person = chromosome[locus]
role = self.people[person][1]
role_id = self.roles.index(role)
txt += str(role_id).rjust(w, " ")
txt += "\n"
return txt
def chromosome_repr(self, chromosome):
return "%s:%f" % (str(chromosome), self.score(chromosome))
def post_generate(self):
"""Overloaded ``post_generate()`` hook to enable rendering."""
super(self.__class__, self).post_generate()
if self.render_best:
best = self.scored[0][0]
if self.prev_best != best:
self.prev_best = best
filepath = "%s%i.svg" % (self.svg_prefix, self.iteration)
self.render(best, filepath)
def render(self, chromosome, filepath):
"""Render a chromosome to an SVG file."""
import svgwrite
margin = 100
unit = 200
radius = 50
pad = 10
width = (self.width + 1) * unit + margin * 2
height = (self.height + 1) * unit + margin * 2
doc = svgwrite.Drawing(filename=filepath, size=(width, height))
# Color theme to match the talk...
colors = ["#ff9999", "#9999ff", "#99ff99", "#ffffff"]
# Fill colors at random
def channel():
return int(random.triangular(0, 255, 175))
while len(colors) < len(self.roles):
colors.append("#%02x%02x%02x" % (channel(), channel(), channel()))
# Map row, col to pixels
def origin(row, col):
x = row * unit + margin
y = col * unit + margin
return (x, y)
def color_of_group(group):
idx = self.roles.index(group)
return colors[idx]
def color_of_person(person_id):
group = self.people[person_id][1]
return color_of_group(group)
# Render seating assignments
for seat, person in enumerate(chromosome):
row, col = self.map.point_at(seat)
x, y = origin(row, col)
x, y = (x + radius, y + radius)
doc.add(doc.circle(
center=(x, y),
r=radius,
stroke_width=8,
stroke="#000",
fill=color_of_person(person)
))
doc.save()
# pylint: disable=too-many-locals
def create_data(args):
"""Create problem data and write it to a file as JSON."""
# Imported here to avoid external dependency just to run the GA
from faker import Factory
num_roles = args.setdefault("roles", 5)
num_people = args.setdefault("people", 40)
map_width = args.setdefault("width", 7)
map_height = args.setdefault("height", 7)
fake = Factory.create()
roles = [fake.job() for _ in range(0, num_roles)]
repulsed = {}
people = {}
for role in roles:
if random.random() <= 0.3:
repulsed[role] = []
others = list(roles)
others.remove(role)
for _ in range(0, random.randint(1, num_roles - 1)):
other = random.choice(others)
repulsed[role].append(other)
others.remove(other)
for i in range(0, num_people):
people[i] = (fake.name(), random.choice(roles))
roles.append("-")
for i in range(num_people, map_width * map_height):
people[i] = ("-", "-")
args.update({
"roles": roles,
"people": people,
"repulsion": repulsed
})
configuration.write_file(args)
def main():
"""Main entry point."""
defaults = {
"population_size": 100,
"max_iterations": 100,
"elitism_pct": 0.01,
"crossover_operator": "partially_matched",
"population_file": "population.log",
"stats_file": "stats.csv"
}
description = "Genetic solution for simle seating charts"
parent = [SeatingChartGA.arg_parser()]
parser = configuration.get_parser(description, "seatingchart.json", parent)
parser.add_argument("--generate", action="store_true",
help="Generate and store problem data")
group = parser.add_argument_group("data generation options")
group.add_argument("--roles", type=int, help="Number of roles")
group.add_argument("--people", type=int, help="Number of people in seats")
group.add_argument("--width", type=int, help="Width of map")
group.add_argument("--height", type=int, help="Height of map")
args = configuration.read_args(parser)
if args["generate"]:
del args["generate"]
create_data(args)
else:
config_file = configuration.read_file(args)
config = configuration.merge(defaults, config_file, args)
solver = SeatingChartGA(config)
solver.solve()
print(solver.chromosome_str(solver.best()))
if __name__ == "__main__":
main()
| {
"content_hash": "114a1cebcc9cf08090f768d64278f5ee",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 82,
"avg_line_length": 32.66883116883117,
"alnum_prop": 0.5589346054462333,
"repo_name": "rawg/levis",
"id": "816451cf4213474e875532a9087eab9bbb10533b",
"size": "10062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/seatingchart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "7626"
},
{
"name": "Python",
"bytes": "73802"
}
],
"symlink_target": ""
} |
from typing import Callable, List, Optional, Tuple
import numpy as np
import gdsfactory as gf
from gdsfactory.component import Component, ComponentReference
from gdsfactory.components.bend_euler import bend_euler
from gdsfactory.components.straight import straight as straight_function
from gdsfactory.components.taper import taper as taper_function
from gdsfactory.cross_section import strip
from gdsfactory.port import Port, select_ports_optical
from gdsfactory.routing.get_route import get_route
from gdsfactory.routing.utils import direction_ports_from_list_ports, flip
from gdsfactory.types import ComponentSpec, CrossSectionSpec, Number, Routes
def route_south(
component: Component,
optical_routing_type: int = 1,
excluded_ports: Optional[Tuple[str, ...]] = None,
straight_separation: Number = 4.0,
io_gratings_lines: Optional[List[List[ComponentReference]]] = None,
gc_port_name: str = "o1",
bend: ComponentSpec = bend_euler,
straight: ComponentSpec = straight_function,
taper: Optional[ComponentSpec] = taper_function,
select_ports: Callable = select_ports_optical,
cross_section: CrossSectionSpec = strip,
**kwargs,
) -> Routes:
"""Returns Routes to route a component ports to the south.
Args:
component: component to route.
optical_routing_type: routing heuristic `1` or `2`
`1` uses the component size info to estimate the box size.
`2` only looks at the optical port positions to estimate the size.
excluded_ports: list of port names to NOT route.
straight_separation: in um.
io_gratings_lines: list of ports to which the ports produced by this
function will be connected. Supplying this information helps
avoiding straight collisions.
gc_port_name: grating coupler port name.
Works well if the component looks roughly like a rectangular box with:
north ports on the north of the box.
south ports on the south of the box.
east ports on the east of the box.
west ports on the west of the box.
.. plot::
:include-source:
import gdsfactory as gf
c = gf.components.ring_double()
c = gf.Component()
ref = c << gf.components.ring_double()
r = gf.routing.route_south(ref)
for e in r.references:
c.add(e)
c.plot()
"""
xs = gf.get_cross_section(cross_section)
excluded_ports = excluded_ports or []
assert optical_routing_type in {
1,
2,
}, f"optical_routing_type = {optical_routing_type}, not supported "
optical_ports = list(select_ports(component.ports).values())
optical_ports = [p for p in optical_ports if p.name not in excluded_ports]
csi = component.size_info
references = []
lengths = []
bend90 = bend(cross_section=cross_section, **kwargs) if callable(bend) else bend
bend90 = gf.get_component(bend90)
dy = abs(bend90.info["dy"])
# Handle empty list gracefully
if not optical_ports:
return [], []
conn_params = dict(
bend=bend,
straight=straight,
taper=taper,
cross_section=cross_section,
**kwargs,
)
# Used to avoid crossing between straights in special cases
# This could happen when abs(x_port - x_grating) <= 2 * dy
delta_gr_min = 2 * dy + 1
sep = straight_separation
# Get lists of optical ports by orientation
direction_ports = direction_ports_from_list_ports(optical_ports)
north_ports = direction_ports["N"]
north_start = north_ports[: len(north_ports) // 2]
north_finish = north_ports[len(north_ports) // 2 :]
west_ports = direction_ports["W"]
west_ports.reverse()
east_ports = direction_ports["E"]
south_ports = direction_ports["S"]
north_finish.reverse() # Sort right to left
north_start.reverse() # Sort right to left
ordered_ports = north_start + west_ports + south_ports + east_ports + north_finish
def get_index_port_closest_to_x(x, list_ports):
return np.array([abs(x - p.ports[gc_port_name].x) for p in list_ports]).argmin()
def gen_port_from_port(x, y, p, cross_section):
return Port(
name=p.name,
center=(x, y),
orientation=90.0,
width=p.width,
cross_section=cross_section,
)
west_ports.reverse()
y0 = min(p.y for p in ordered_ports) - dy - 0.5
ports_to_route = []
i = 0
optical_xs_tmp = [p.x for p in ordered_ports]
x_optical_min = min(optical_xs_tmp)
x_optical_max = max(optical_xs_tmp)
# Set starting ``x`` on the west side
# ``x`` is the x-coord of the waypoint where the current component port is connected.
# x starts as close as possible to the component.
# For each new port, the distance is increased by the separation.
# The starting x depends on the heuristic chosen : ``1`` or ``2``
if optical_routing_type == 1:
# use component size to know how far to route
x = csi.west - dy - 1
elif optical_routing_type == 2:
# use optical port to know how far to route
x = x_optical_min - dy - 1
else:
raise ValueError(
f"Invalid optical routing type {optical_routing_type!r} not in [1, 2]"
)
# First route the ports facing west
# In case we have to connect these ports to a line of gratings,
# Ensure that the port is aligned with the grating port or
# has enough space for manhattan routing (at least two bend radius)
for p in west_ports:
if io_gratings_lines:
i_grating = get_index_port_closest_to_x(x, io_gratings_lines[-1])
x_gr = io_gratings_lines[-1][i_grating].ports[gc_port_name].x
if abs(x - x_gr) < delta_gr_min:
if x > x_gr:
x = x_gr
elif x < x_gr:
x = x_gr - delta_gr_min
tmp_port = gen_port_from_port(x, y0, p, cross_section=xs)
ports_to_route.append(tmp_port)
route = get_route(input_port=p, output_port=tmp_port, **conn_params)
references.extend(route.references)
lengths.append(route.length)
x -= sep
i += 1
start_straight_length = 0.5
# First-half of north ports
# This ensures that north ports are routed above the top west one
north_start.reverse() # We need them from left to right
if len(north_start) > 0:
y_max = max(p.y for p in west_ports + north_start)
for p in north_start:
tmp_port = gen_port_from_port(x, y0, p, cross_section=xs)
route = get_route(
input_port=p,
output_port=tmp_port,
start_straight_length=start_straight_length + y_max - p.y,
**conn_params,
)
references.extend(route.references)
lengths.append(route.length)
ports_to_route.append(tmp_port)
x -= sep
start_straight_length += sep
# Set starting ``x`` on the east side
if optical_routing_type == 1:
# use component size to know how far to route
x = csi.east + dy + 1
elif optical_routing_type == 2:
# use optical port to know how far to route
x = x_optical_max + dy + 1
else:
raise ValueError(
f"Invalid optical routing type. Got {optical_routing_type}, only (1, 2 supported) "
)
i = 0
# Route the east ports
# In case we have to connect these ports to a line of gratings,
# Ensure that the port is aligned with the grating port or
# has enough space for manhattan routing (at least two bend radius)
start_straight_length = 0.5
for p in east_ports:
if io_gratings_lines:
i_grating = get_index_port_closest_to_x(x, io_gratings_lines[-1])
x_gr = io_gratings_lines[-1][i_grating].ports[gc_port_name].x
if abs(x - x_gr) < delta_gr_min:
if x < x_gr:
x = x_gr
elif x > x_gr:
x = x_gr + delta_gr_min
tmp_port = gen_port_from_port(x, y0, p, cross_section=xs)
route = get_route(
p, tmp_port, start_straight_length=start_straight_length, **conn_params
)
references.extend(route.references)
lengths.append(route.length)
ports_to_route.append(tmp_port)
x += sep
i += 1
# Route the remaining north ports
start_straight_length = 0.5
if len(north_finish) > 0:
y_max = max(p.y for p in east_ports + north_finish)
for p in north_finish:
tmp_port = gen_port_from_port(x, y0, p, cross_section=xs)
ports_to_route.append(tmp_port)
route = get_route(
input_port=p,
output_port=tmp_port,
start_straight_length=start_straight_length + y_max - p.y,
**conn_params,
)
references.extend(route.references)
lengths.append(route.length)
x += sep
start_straight_length += sep
# Add south ports
ports = [flip(p) for p in ports_to_route] + south_ports
return Routes(references=references, ports=ports, lengths=lengths)
if __name__ == "__main__":
# c = gf.components.mmi2x2()
# c = gf.components.ring_single()
c = gf.components.ring_double()
layer = (2, 0)
c = gf.Component()
ref = c << gf.components.ring_double(layer=layer)
r = route_south(ref, bend=gf.components.bend_euler, layer=layer)
for e in r.references:
c.add(e)
print(r.lengths)
c.show(show_ports=True)
| {
"content_hash": "a4df70c7071c96fe07a2271b89caada9",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 95,
"avg_line_length": 35.30909090909091,
"alnum_prop": 0.6101956745623069,
"repo_name": "gdsfactory/gdsfactory",
"id": "3f5cf6a0524d74d6a4080721bd28c6f79f818586",
"size": "9710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdsfactory/routing/route_south.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "605"
},
{
"name": "Dockerfile",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "4572"
},
{
"name": "Python",
"bytes": "2471982"
},
{
"name": "Shell",
"bytes": "671"
},
{
"name": "XS",
"bytes": "10045"
}
],
"symlink_target": ""
} |
'''
Returns the date, e.g.
2022-05-08z
UTC aka gmt aka zulu (hence the z)
Almost iso 8601 http://www.cl.cam.ac.uk/~mgk25/iso-time.html
deviations:
I use lower case z instead of T between date and time
for readability and to designate UTC (zulu)
'''
#<\mod_doc>
#<copyright> Copyright (c) 2020-2022 Dinkum Software
#<history>
# 2006-12-09 tc Initial
# 2022-05-08 tc t=>z,
# converted to python
# [Ed. note] it's been a while since last edit
#</history>
import sys
import argparse
import textwrap # dedent
import dinkum.time
def main() :
''' See module doc
Normally returns NONE
On error, returns a printable description of the error
'''
print (dinkum.time.datestamp())
#<lic>
'''Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
''' #</lic>
# main() launcher
if __name__ == '__main__':
try:
# This handles normal and error returns
err_msg = main() # returns human readable str on error
if err_msg :
err_msg = "ERROR:" + err_msg # Label the output
sys.exit( err_msg )
except KeyboardInterrupt as e:
# Ctrl-C
sys.exit( "KeyboardInterrupt: Probably Control-C typed.")
except SystemExit as e: # sys.exit()
# Just pass it along
raise e
# Let any other Exception run it's course
assert False, "Can't get here"
| {
"content_hash": "7f3c9326f4551097dce7d0e44339ff83",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 75,
"avg_line_length": 25.554054054054053,
"alnum_prop": 0.6530936012691697,
"repo_name": "dinkumsoftware/dinkum",
"id": "4f265591dbff33e1f9441adc35ac7f5c985f96e0",
"size": "2044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "time/bin/datestamp.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "11925"
},
{
"name": "Emacs Lisp",
"bytes": "10999"
},
{
"name": "HTML",
"bytes": "59920"
},
{
"name": "Perl",
"bytes": "26283"
},
{
"name": "Python",
"bytes": "513836"
},
{
"name": "Shell",
"bytes": "61732"
}
],
"symlink_target": ""
} |
"""Diffie-Hellman elliptic curve key agreement scheme.
Implementation of the Diffie-Hellman key agreement scheme using
elliptic curve cryptography. A key agreement scheme is used
by two entities to establish shared keying data, which will be
later utilized e.g. in symmetric cryptographic scheme.
The two entities must agree on the elliptic curve and key derivation
function to use.
"""
from hashlib import sha256
from math import ceil
from typing import Optional
from btclib.alias import HashF, Point
from btclib.ecc.curve import Curve, mult, secp256k1
from btclib.exceptions import BTClibRuntimeError, BTClibValueError
def ansi_x9_63_kdf(
z: bytes, size: int, hf: HashF, shared_info: Optional[bytes]
) -> bytes:
"""Return keying data according to ANSI-X9.63-KDF.
Return a keying data octet sequence of the requested size according
to ANSI-X9.63-KDF specifications for the key derivation function.
http://www.secg.org/sec1-v2.pdf, section 3.6.1
"""
hf_size = hf().digest_size
max_size = hf_size * (2 ** 32 - 1)
if size > max_size:
raise BTClibValueError(f"cannot derive a key larger than {max_size} bytes")
K_temp = []
for counter in range(1, ceil(size / hf_size) + 1):
h = hf()
hash_input = (
z
+ counter.to_bytes(4, byteorder="big", signed=False)
+ (b"" if shared_info is None else shared_info)
)
h.update(hash_input)
K_temp.append(h.digest())
return b"".join(K_temp)[:size]
def diffie_hellman(
dU: int,
QV: Point,
size: int,
shared_info: Optional[bytes] = None,
ec: Curve = secp256k1,
hf: HashF = sha256,
) -> bytes:
"""Diffie-Hellman elliptic curve key agreement scheme.
http://www.secg.org/sec1-v2.pdf, section 6.1
"""
shared_secret_point = mult(dU, QV, ec)
# edge case that cannot be reproduced in the test suite
if shared_secret_point[1] == 0:
err_msg = "invalid (INF) key" # pragma: no cover
raise BTClibRuntimeError(err_msg) # pragma: no cover
shared_secret_field_element = shared_secret_point[0]
z = shared_secret_field_element.to_bytes(ec.p_size, byteorder="big", signed=False)
return ansi_x9_63_kdf(z, size, hf, shared_info)
| {
"content_hash": "7a376499a1d1a9b57b0cf7ee947b8cfd",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 86,
"avg_line_length": 33.23529411764706,
"alnum_prop": 0.6712389380530973,
"repo_name": "fametrano/BitcoinBlockchainTechnology",
"id": "a571f1caa38b5b163e559637310572b7f54b93cf",
"size": "2636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "btclib/ecc/dh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1048"
},
{
"name": "Python",
"bytes": "254936"
}
],
"symlink_target": ""
} |
"""Setuptools entry point."""
import codecs
import os
from setuptools import setup
import pytest_splinter
dirname = os.path.dirname(__file__)
long_description = (
codecs.open(os.path.join(dirname, 'README.rst'), encoding='utf-8').read() + '\n' +
codecs.open(os.path.join(dirname, 'AUTHORS.rst'), encoding='utf-8').read() + '\n' +
codecs.open(os.path.join(dirname, 'CHANGES.rst'), encoding='utf-8').read()
)
setup(
name='pytest-splinter',
description='Splinter plugin for pytest testing framework',
long_description=long_description,
author='Anatoly Bubenkov, Paylogic International and others',
license='MIT license',
author_email='bubenkoff@gmail.com',
version=pytest_splinter.__version__,
include_package_data=True,
url='https://github.com/pytest-dev/pytest-splinter',
install_requires=[
'setuptools',
'splinter>=0.7.2',
'pytest',
],
classifiers=[
'Development Status :: 6 - Mature',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
] + [('Programming Language :: Python :: %s' % x) for x in '2.7 3.0 3.1 3.2 3.3 3.4'.split()],
tests_require=['tox'],
entry_points={'pytest11': [
'pytest-splinter=pytest_splinter.plugin',
]},
packages=['pytest_splinter'],
)
| {
"content_hash": "5ed91d3dd775922e647cb9f75727219c",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 98,
"avg_line_length": 33.9,
"alnum_prop": 0.6200589970501474,
"repo_name": "pelme/pytest-splinter",
"id": "e22cc6bb122f13405ea989574756235e153ab933",
"size": "1695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "402"
},
{
"name": "Python",
"bytes": "34283"
}
],
"symlink_target": ""
} |
import unittest
import pinq
class queryable_join_tests(unittest.TestCase):
def setUp(self):
self.queryable1 = pinq.as_queryable(range(1, 11))
self.queryable2 = pinq.as_queryable(range(11, 21))
def test_join_all(self):
self.assertEqual(list(self.queryable1.join(
self.queryable2, lambda x: x, lambda x: x - 10, lambda x: x)), [
(1, 11), (2, 12), (3, 13), (4, 14), (5, 15), (6, 16), (7, 17),
(8, 18), (9, 19), (10, 20)])
def test_join_some(self):
self.assertEqual(list(self.queryable1.join(
self.queryable1, lambda x: x, lambda x: x % 3, lambda x: x)), [
(1, 1), (1, 4), (1, 7), (1, 10), (2, 2), (2, 5), (2, 8)])
def test_join_none(self):
self.assertEqual(list(self.queryable1.join(
self.queryable2, lambda x: x, lambda x: x, lambda x: x)), [])
def test_join_some_transform_result(self):
self.assertEqual(list(self.queryable1.join(
self.queryable1, lambda x: x, lambda x: x % 3, lambda x, y: y)), [
1, 4, 7, 10, 2, 5, 8])
def test_join_other_type_error(self):
self.assertRaises(TypeError, self.queryable1.join, 100,
lambda x: x, lambda x: x, lambda x: x)
def test_join_key_selector_type_error(self):
self.assertRaises(TypeError, self.queryable1.join, self.queryable1,
"ten", lambda x: x, lambda x: x)
def test_join_other_key_selector_type_error(self):
self.assertRaises(TypeError, self.queryable1.join, self.queryable1,
lambda x: x, "identity", lambda x: x)
def test_join_result_transform_type_error(self):
self.assertRaises(TypeError, self.queryable1.join, self.queryable1,
lambda x: x, lambda x: x, "test")
| {
"content_hash": "4a47f3d1aeaf107ce1a5f46f6b3063e4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 41.111111111111114,
"alnum_prop": 0.5654054054054054,
"repo_name": "dlshriver/pinq",
"id": "48d1434263242abae817385cd100573608c18a68",
"size": "1850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_queryable_join.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99468"
}
],
"symlink_target": ""
} |
"""fMRI preprocessing workflow."""
from .. import config
def main():
"""Entry point."""
from os import EX_SOFTWARE
from pathlib import Path
import sys
import gc
from multiprocessing import Process, Manager
from .parser import parse_args
from ..utils.bids import write_derivative_description, write_bidsignore
parse_args()
sentry_sdk = None
if not config.execution.notrack:
import sentry_sdk
from ..utils.sentry import sentry_setup
sentry_setup()
# CRITICAL Save the config to a file. This is necessary because the execution graph
# is built as a separate process to keep the memory footprint low. The most
# straightforward way to communicate with the child process is via the filesystem.
config_file = config.execution.work_dir / config.execution.run_uuid / "config.toml"
config_file.parent.mkdir(exist_ok=True, parents=True)
config.to_filename(config_file)
# CRITICAL Call build_workflow(config_file, retval) in a subprocess.
# Because Python on Linux does not ever free virtual memory (VM), running the
# workflow construction jailed within a process preempts excessive VM buildup.
with Manager() as mgr:
from .workflow import build_workflow
retval = mgr.dict()
p = Process(target=build_workflow, args=(str(config_file), retval))
p.start()
p.join()
retcode = p.exitcode or retval.get("return_code", 0)
fmriprep_wf = retval.get("workflow", None)
# CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
# function executed constrained in a process may change the config (and thus the global
# state of fMRIPrep).
config.load(config_file)
if config.execution.reports_only:
sys.exit(int(retcode > 0))
if fmriprep_wf and config.execution.write_graph:
fmriprep_wf.write_graph(graph2use="colored", format="svg", simple_form=True)
retcode = retcode or (fmriprep_wf is None) * EX_SOFTWARE
if retcode != 0:
sys.exit(retcode)
# Generate boilerplate
with Manager() as mgr:
from .workflow import build_boilerplate
p = Process(target=build_boilerplate, args=(str(config_file), fmriprep_wf))
p.start()
p.join()
if config.execution.boilerplate_only:
sys.exit(int(retcode > 0))
# Clean up master process before running workflow, which may create forks
gc.collect()
# Sentry tracking
if sentry_sdk is not None:
with sentry_sdk.configure_scope() as scope:
scope.set_tag("run_uuid", config.execution.run_uuid)
scope.set_tag("npart", len(config.execution.participant_label))
sentry_sdk.add_breadcrumb(message="fMRIPrep started", level="info")
sentry_sdk.capture_message("fMRIPrep started", level="info")
config.loggers.workflow.log(
15,
"\n".join(
["fMRIPrep config:"] + ["\t\t%s" % s for s in config.dumps().splitlines()]
),
)
config.loggers.workflow.log(25, "fMRIPrep started!")
errno = 1 # Default is error exit unless otherwise set
try:
fmriprep_wf.run(**config.nipype.get_plugin())
except Exception as e:
if not config.execution.notrack:
from ..utils.sentry import process_crashfile
crashfolders = [
config.execution.fmriprep_dir
/ "sub-{}".format(s)
/ "log"
/ config.execution.run_uuid
for s in config.execution.participant_label
]
for crashfolder in crashfolders:
for crashfile in crashfolder.glob("crash*.*"):
process_crashfile(crashfile)
if "Workflow did not execute cleanly" not in str(e):
sentry_sdk.capture_exception(e)
config.loggers.workflow.critical("fMRIPrep failed: %s", e)
raise
else:
config.loggers.workflow.log(25, "fMRIPrep finished successfully!")
if not config.execution.notrack:
success_message = "fMRIPrep finished without errors"
sentry_sdk.add_breadcrumb(message=success_message, level="info")
sentry_sdk.capture_message(success_message, level="info")
# Bother users with the boilerplate only iff the workflow went okay.
boiler_file = config.execution.fmriprep_dir / "logs" / "CITATION.md"
if boiler_file.exists():
if config.environment.exec_env in (
"singularity",
"docker",
"fmriprep-docker",
):
boiler_file = Path("<OUTPUT_PATH>") / boiler_file.relative_to(
config.execution.output_dir
)
config.loggers.workflow.log(
25,
"Works derived from this fMRIPrep execution should include the "
f"boilerplate text found in {boiler_file}.",
)
if config.workflow.run_reconall:
from templateflow import api
from niworkflows.utils.misc import _copy_any
dseg_tsv = str(api.get("fsaverage", suffix="dseg", extension=[".tsv"]))
_copy_any(
dseg_tsv, str(config.execution.fmriprep_dir / "desc-aseg_dseg.tsv")
)
_copy_any(
dseg_tsv, str(config.execution.fmriprep_dir / "desc-aparcaseg_dseg.tsv")
)
errno = 0
finally:
from fmriprep.reports.core import generate_reports
from pkg_resources import resource_filename as pkgrf
# Generate reports phase
failed_reports = generate_reports(
config.execution.participant_label,
config.execution.fmriprep_dir,
config.execution.run_uuid,
config=pkgrf("fmriprep", "data/reports-spec.yml"),
packagename="fmriprep",
)
write_derivative_description(
config.execution.bids_dir, config.execution.fmriprep_dir
)
write_bidsignore(config.execution.fmriprep_dir)
if failed_reports and not config.execution.notrack:
sentry_sdk.capture_message(
"Report generation failed for %d subjects" % failed_reports,
level="error",
)
sys.exit(int((errno + failed_reports) > 0))
if __name__ == "__main__":
raise RuntimeError(
"fmriprep/cli/run.py should not be run directly;\n"
"Please `pip install` fmriprep and use the `fmriprep` command"
)
| {
"content_hash": "a8cbd2235da9e844a3fcfa0c0f6acc1b",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 94,
"avg_line_length": 37.36363636363637,
"alnum_prop": 0.6155717761557178,
"repo_name": "poldracklab/preprocessing-workflow",
"id": "65adb70bd3c57f0bc682efe04e053a4b4704703b",
"size": "6622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fmriprep/cli/run.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "146866"
},
{
"name": "Shell",
"bytes": "559"
}
],
"symlink_target": ""
} |
"""
Search postfix logs for emails by making a match on sender and recipient.
"""
__author__ = "Khosrow Ebrahimpour"
__version__ = "0.4.3"
__license__ = "MIT"
| {
"content_hash": "89b027c687407d9132ccf1e109f04219",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 73,
"avg_line_length": 26.5,
"alnum_prop": 0.6540880503144654,
"repo_name": "khosrow/track_msg",
"id": "7c46e165317b29eb0b2a63d2af28c63b9a8fe500",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "track_msg/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18585"
}
],
"symlink_target": ""
} |
import os
from django.db import migrations
from django.db.utils import DataError
def check_legacy_data(apps, schema_editor):
"""
Abort the migration if any legacy site fields still contain data.
"""
Site = apps.get_model('dcim', 'Site')
site_count = Site.objects.exclude(asn__isnull=True).count()
if site_count and 'NETBOX_DELETE_LEGACY_DATA' not in os.environ:
raise DataError(
f"Unable to proceed with deleting asn field from Site model: Found {site_count} sites with "
f"legacy ASN data. Please ensure all legacy site ASN data has been migrated to ASN objects "
f"before proceeding. Or, set the NETBOX_DELETE_LEGACY_DATA environment variable to bypass "
f"this safeguard and delete all legacy site ASN data."
)
site_count = Site.objects.exclude(contact_name='', contact_phone='', contact_email='').count()
if site_count and 'NETBOX_DELETE_LEGACY_DATA' not in os.environ:
raise DataError(
f"Unable to proceed with deleting contact fields from Site model: Found {site_count} sites "
f"with legacy contact data. Please ensure all legacy site contact data has been migrated to "
f"contact objects before proceeding. Or, set the NETBOX_DELETE_LEGACY_DATA environment "
f"variable to bypass this safeguard and delete all legacy site contact data."
)
class Migration(migrations.Migration):
dependencies = [
('dcim', '0144_fix_cable_abs_length'),
]
operations = [
migrations.RunPython(
code=check_legacy_data,
reverse_code=migrations.RunPython.noop
),
migrations.RemoveField(
model_name='site',
name='asn',
),
migrations.RemoveField(
model_name='site',
name='contact_email',
),
migrations.RemoveField(
model_name='site',
name='contact_name',
),
migrations.RemoveField(
model_name='site',
name='contact_phone',
),
]
| {
"content_hash": "d601366b4b377ee5000ba21853df4637",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 105,
"avg_line_length": 35.644067796610166,
"alnum_prop": 0.6214931050879696,
"repo_name": "digitalocean/netbox",
"id": "86918447d7af542f5224df099242682c0a0e4787",
"size": "2103",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/dcim/migrations/0145_site_remove_deprecated_fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815170"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
} |
"""Constructs for different kinds of queries and managers."""
import ast
import operator
from datetime import date
from functools import reduce
from itertools import chain
from django.db import models
from django.db.models import F, Q
from lark import Lark, Transformer, v_args
from studies.fields import CONDITIONS, LANGUAGES
CONST_MAPPING = {"true": True, "false": False, "null": None}
GENDER_MAPPING = {"male": "m", "female": "f", "other": "o"}
CONDITION_FIELDS = {condition_tuple[0] for condition_tuple in CONDITIONS}
LANGUAGE_FIELDS = {f"speaks_{language_tuple[0]}" for language_tuple in LANGUAGES}
QUERY_GRAMMAR = """
?start: bool_expr
?bool_expr: bool_term ("OR" bool_term)*
?bool_term: bool_factor ("AND" bool_factor)*
?bool_factor: not_bool_factor
| "(" bool_expr ")"
| relation_expr
?relation_expr: gender_comparison
| gestational_age_comparison
| age_in_days_comparison
| language_comparison
| condition_comparison
| language_count_comparison
not_bool_factor: "NOT" bool_factor
gender_comparison: "gender" (EQ | NE) gender_target
// 24 to 40 weeks
gestational_age_comparison: "gestational_age_in_weeks" comparator GESTATIONAL_AGE_AS_WEEKS
age_in_days_comparison: "age_in_days" comparator INT
language_count_comparison: ("n_languages" | "num_languages") comparator INT
comparator: EQ | NE | LT | LTE | GT | GTE
gender_target: MALE | FEMALE | OTHER_GENDER | UNSPECIFIED
language_comparison: LANGUAGE_TARGET
condition_comparison: CONDITION_TARGET
// TERMINALS
LANGUAGE_TARGET: {language_targets}
CONDITION_TARGET: {condition_targets}
GESTATIONAL_AGE_AS_WEEKS: /(2[4-9]|3[0-9]|40)/i | UNSPECIFIED
EQ: "="
NE: "!="
LT: "<"
LTE: "<="
GT: ">"
GTE: ">="
TRUE: "true"i
FALSE: "false"i
NULL: "null"i
MALE: "male"i | "m"i
FEMALE: "female"i | "f"i
OTHER_GENDER: "other"i | "o"i
UNSPECIFIED: "na"i | "n/a"i
%import common.INT
%import common.WS
%ignore WS
""".format(
language_targets=" | ".join([f'"{target}"' for target in LANGUAGE_FIELDS]),
condition_targets=" | ".join([f'"{target}"' for target in CONDITION_FIELDS]),
)
QUERY_DSL_PARSER = Lark(QUERY_GRAMMAR, parser="earley")
def get_child_eligibility_for_study(child_obj, study_obj):
return _child_in_age_range_for_study(
child_obj, study_obj
) and get_child_eligibility(child_obj, study_obj.criteria_expression)
def _child_in_age_range_for_study(child, study):
"""Check if child in age range for study, using same age calculations as in study detail and response data.
"""
if not child.birthday:
return False
# Age ranges are defined in DAYS, using shorthand of year = 365 days, month = 30 days,
# to provide a reliable actual unit of time rather than calendar "months" and "years" which vary in duration.
# See logic used in web/studies/study-detail.html to display eligibility to participant,
# help-text provided to researchers in studies/templates/studies/_study_fields.html,
# and documentation for researchers at
# https://lookit.readthedocs.io/en/develop/researchers-set-study-fields.html#minimum-and-maximum-age-cutoffs
min_age_in_days_estimate = (
(study.min_age_years * 365) + (study.min_age_months * 30) + study.min_age_days
)
max_age_in_days_estimate = (
(study.max_age_years * 365) + (study.max_age_months * 30) + study.max_age_days
)
age_in_days = (date.today() - child.birthday).days
return min_age_in_days_estimate <= age_in_days <= max_age_in_days_estimate
def get_child_eligibility(child_obj, criteria_expr):
if criteria_expr:
compiled_tester_func = compile_expression(criteria_expr)
expanded_child = _get_expanded_child(child_obj)
return bool(compiled_tester_func(expanded_child))
else:
return True
def compile_expression(boolean_algebra_expression: str):
"""Compiles a boolean algebra expression into a python function.
Args:
boolean_algebra_expression: a string boolean algebra expression.
Returns:
A function.
Raises:
lark.exceptions.ParseError: in case we cannot parse the boolean algebra.
"""
if boolean_algebra_expression:
parse_tree = QUERY_DSL_PARSER.parse(boolean_algebra_expression)
func_body = FunctionTransformer().transform(parse_tree)
else:
func_body = "True"
func_text = " ".join(["def property_tester(child_obj): return", func_body])
code_object = ast.parse(func_text, mode="exec")
new_func = compile(code_object, filename="temp.py", mode="exec")
temp_namespace = {}
exec(new_func, temp_namespace)
return temp_namespace["property_tester"]
def _get_expanded_child(child_object):
"""Expands a child object such that it can be evaluated easily.
The output of this method should be such that _compile_expression
can evaluate it; i.e. all keys are first-level.
Args:
child_object: a accounts.models.Child instance.
Returns:
A dict representing the child.
"""
expanded_child = _to_dict(child_object)
# 1) Change birthday to age in days.
age_delta = date.today() - expanded_child.pop("birthday")
expanded_child["age_in_days"] = age_delta.days
# 2) Expand existing conditions in-place.
expanded_conditions = dict(expanded_child.pop("existing_conditions").items())
expanded_child.update(expanded_conditions)
# 3) Expand languages in place.
expanded_languages = {
f"speaks_{langcode}": boolean
for langcode, boolean in expanded_child.pop("languages_spoken").items()
}
expanded_child.update(expanded_languages)
ga_enum = expanded_child.pop("gestational_age_at_birth")
gestational_age_in_weeks = _gestational_age_enum_value_to_weeks(ga_enum)
expanded_child["gestational_age_in_weeks"] = gestational_age_in_weeks
return expanded_child
def _to_dict(model_instance):
"""Better version of django.forms.models.model_to_dict.
Args:
model_instance: A django model instance.
Returns:
A dictionary formed from a model instance.
"""
opts = model_instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields):
data[f.name] = f.value_from_object(model_instance)
return data
def _gestational_age_enum_value_to_weeks(enum_value: int):
"""Convert enum value on child object to actual # of weeks.
This enables us to directly query the expanded child object with a
scalar value. 0 == "under 24 weeks"; 17 = "Over 40 weeks". To see
enumerated values, please reference studies/fields.py.
"""
return min(max(23, enum_value + 23), 40) if enum_value else None
@v_args(inline=True)
class FunctionTransformer(Transformer):
def bool_expr(self, bool_term, *others):
or_clauses = " ".join(f"or {other}" for other in others)
return f"({bool_term} {or_clauses})"
def bool_term(self, bool_factor, *others):
and_clauses = " ".join(f"and {other}" for other in others)
return f"({bool_factor} {and_clauses})"
def gender_comparison(self, comparator, target_gender):
return f"child_obj.get('gender') {'==' if comparator == '=' else comparator} {target_gender}"
def gestational_age_comparison(self, comparator, num_weeks):
"""False if no_answer is provided."""
if num_weeks.lower() in ("na", "n/a"):
# TODO: enhance validation layer so that a non-equals comparator will provide a sensible
# error message.
return f"child_obj.get('gestational_age_in_weeks') {comparator} None"
else:
return (
f"child_obj.get('gestational_age_in_weeks') {comparator} {num_weeks} "
"if child_obj.get('gestational_age_in_weeks') else False"
)
def age_in_days_comparison(self, comparator, num_days):
return f"child_obj.get('age_in_days') {comparator} {num_days}"
def language_comparison(self, lang_target):
return f"child_obj.get('{lang_target}', False)"
def condition_comparison(self, condition_target):
return f"child_obj.get('{condition_target}', False)"
def language_count_comparison(self, comparator, num_langs):
return (
f"len({{k: v for k, v in child_obj.items() if k.startswith('speaks_') and v}}) "
f"{comparator} {num_langs}"
)
def gender_target(self, gender):
gender = gender.lower()
return f"'{GENDER_MAPPING.get(gender, gender)}'"
def comparator(self, relation):
return "==" if relation == "=" else relation
def not_bool_factor(self, bool_factor):
return f"not {bool_factor}"
class BitfieldQuerySet(models.QuerySet):
"""A QuerySet that can handle bitwise queries intelligently.
The trick is in constructs like this: F(field_name) +
F(field_name).bitand(reduce(operator.or_, bitmasks, 0)) which might
produce a SQL query like so:
WHERE ...
"accounts_child"."existing_conditions" <
(("accounts_child"."existing_conditions" + (1 * ("accounts_child"."existing_conditions" & 12))))
This is a "bit hack" that relies on the fact that a bit state ANDed with a mask will give us a result that
is greater than zero if ~any~ of the bits match between the mask and the state. So what we are saying is,
"give me rows from this table where my_field is less than my_field + (my_field AND some_mask). This will only
ever be true if there are matching set bits between my_field and some_mask.
For has_one_of, we take all the bits we care about and OR them into a single mask (e.g., 01101)
For has_all_of, we split the individual bits we care about (e.g. 01000, 00100, 00001 - only powers of 2 in decimal)
and split them across AND filters in the where clause of our SQL query.
"""
def has_one_of(self, field_name: str, bitmasks: list):
"""Check to see that field_name has at least one of the bits in
bitmasks.
Args:
field_name: The field which we will be querying against - usually a BigInt
bitmasks: the list of integers which will serve as bitmasks
Returns:
A filtered queryset.
"""
filter_dict = {
f"{field_name}__gt": 0,
# field value contains one of supplied field bits
f"{field_name}__lt": F(field_name)
+ F(field_name).bitand(reduce(operator.or_, bitmasks, 0)),
}
return self.filter(**filter_dict)
def has_all_of(self, field_name: str, bitmasks: list):
"""Check to see that field_name has all of the bits in bitmasks.
Args:
field_name: The field which we will be querying against - usually a BigInt
bitmasks: the list of integers which will serve as bitmasks
Returns:
A filtered queryset.
"""
def make_query_dict(specific_mask):
return {
f"{field_name}__lt": F(field_name) + F(field_name).bitand(specific_mask)
}
has_each = map(lambda c: Q(**make_query_dict(c)), bitmasks)
filter_query = reduce(operator.and_, has_each, Q(**{f"{field_name}__gt": 0}))
return self.filter(filter_query)
| {
"content_hash": "096fb8b575b93bc2b1937b786ca52434",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 119,
"avg_line_length": 33.20294117647059,
"alnum_prop": 0.657631322526353,
"repo_name": "CenterForOpenScience/lookit-api",
"id": "684659aff0ec690ae963f8e0668489dbb9b51867",
"size": "11289",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "accounts/queries.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11022"
},
{
"name": "HTML",
"bytes": "185393"
},
{
"name": "Python",
"bytes": "481700"
},
{
"name": "Shell",
"bytes": "1166"
}
],
"symlink_target": ""
} |
import re
def test_phones_on_home_page(app):
contact_from_home_page = app.contact.get_contact_list()[1]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(1)
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
# assert contact_from_home_page.homephone == clear(contact_from_edit_page.homephone)
# assert contact_from_home_page.mobile == clear(contact_from_edit_page.mobile)
# assert contact_from_home_page.workphone == clear(contact_from_edit_page.workphone)
def test_phones_on_contact_view_page(app):
contact_from_view_page = app.contact.get_contact_from_view_page(0)
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_view_page.homephone == contact_from_edit_page.homephone
assert contact_from_view_page.mobile == contact_from_edit_page.mobile
assert contact_from_view_page.workphone == contact_from_edit_page.workphone
def clear(s):#Замена символов в номерах телефонов, коткоторые мешают в сравнении
return re.sub("[() -]", "", s) #на homepage сохраняется только + , поэтому clear остальное для сравнения
#1.что заменяем, 2.на что заменяем,3.где заменяем
def merge_phones_like_on_home_page(contact):#склеивание с помощью перевода строки списка телефонов
return "\n".join(filter(lambda x: x!= "",#фильтрация пустых строк после очистки и склеивание
map(lambda x: clear(x),#очистка от лишних символов
filter(lambda x: x is not None,#отфильтровываются все пустые
[contact.homephone, contact.mobile, contact.workphone]))))
| {
"content_hash": "869fee75f2fd7b2350ee9241c906c393",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 117,
"avg_line_length": 63.925925925925924,
"alnum_prop": 0.6952491309385863,
"repo_name": "elenagradovich/python_training",
"id": "eca0e9187143987aeffd3c45a11cdadeda8a04ab",
"size": "2005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_phones.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37676"
}
],
"symlink_target": ""
} |
"""
Plot Settings class
"""
## MIT License
##
## Copyright (c) 2017, krishna bhogaonker
## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = 'krishna bhogaonker'
__copyright__ = 'copyright 2017'
__credits__ = ['krishna bhogaonker']
__license__ = "MIT"
__version__ = '0.1.0'
__maintainer__ = 'krishna bhogaonker'
__email__ = 'cyclotomiq@gmail.com'
__status__ = 'pre-alpha'
from .abcPlotSettings import abcPlotSettings
class PlotSettingsOverallMF(abcPlotSettings):
@staticmethod
def get_settings():
defaults_overall = {'line_width': 2,
'width_' : 800,
'height_' : 800,
'transparency' : 0.25,
'target_plot ': False,
'legend_location ': 'top_right',
'color_target ': '#ca0020',
'percent_line_plot': False,
'percent_line_value' : 0.5,
'color_percent_line' : '#ca0020',
'target_plot_linewidth' : 2,
'percent_linewidth' : 2,
'model_legend_label' : ['Model 3'],
'target_plot_legend_label' : 'target',
'percent_legend_label' : 'percent',
'male_female_numbers_plot' : False,
'mf_male_color' : ['blue', 'purple'],
'mf_female_color': ['green', 'brown'],
'mf_target_color' : ['red', 'orange', 'deeppink', 'darkred'],
'mf_male_label' : ['Male Model 3 Spec1', 'Male Model 3 Spec2', 'Male Model 3 Spec3' ],
'mf_female_label': ['Female Model 3 Spec1', 'Female Model 3 Spec2', 'Female Model 3 Spec 3'],
'mf_target_label' : ['Target 1', 'Target 2'],
'mf_male_linewidth' : 2,
'mf_target_linewidth' : 2,
'mf_data_color' : ['blue'],
'mf_female_data_label': ['Female Data Spec1', 'Female Data Spec2'],
'mf_male_data_label': ['Male Data Spec1', 'Male Data Spec2'],
'data_plot' : True,
'data_line_legend_label' : 'Management Data',
'year_offset' : 0,
'data_line_style': 'dashdot'}
return defaults_overall
| {
"content_hash": "42a898a2bde85200f3ca9b1d14040c03",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 463,
"avg_line_length": 45.36923076923077,
"alnum_prop": 0.6490335707019329,
"repo_name": "university-gender-evolution/py-university-gender-dynamics-pkg",
"id": "00e8854854c764a1d5a73db61bba6c4c9222a9a2",
"size": "2970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyugend/PlotSettingsOverallMF.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Makefile",
"bytes": "2078"
},
{
"name": "Python",
"bytes": "289658"
},
{
"name": "Shell",
"bytes": "1731"
}
],
"symlink_target": ""
} |
from pylearn2.models.mlp import MLP, CompositeLayer
from pylearn2.space import CompositeSpace
from theano.compat.python2x import OrderedDict
class MLPWithSource(MLP):
def __init__(self, *args, **kwargs):
self.input_source = kwargs.pop('input_source', 'features')
self.target_source = kwargs.pop('target_source', 'targets')
super(MLPWithSource, self).__init__(*args, **kwargs)
def get_input_source(self):
return self.input_source
def get_target_source(self):
return self.target_source
class CompositeLayerWithSource(CompositeLayer):
def get_input_source(self):
return tuple([layer.get_input_source() for layer in self.layers])
def get_target_source(self):
return tuple([layer.get_target_source() for layer in self.layers])
def set_input_space(self, space):
self.input_space = space
for layer, component in zip(self.layers, space.components):
layer.set_input_space(component)
self.output_space = CompositeSpace(tuple(layer.get_output_space()
for layer in self.layers))
def fprop(self, state_below):
return tuple(layer.fprop(component_state) for
layer, component_state in zip(self.layers, state_below))
def get_monitoring_channels(self):
return OrderedDict()
| {
"content_hash": "1efde4ab2427aef59d63f57115286d04",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 77,
"avg_line_length": 33.63414634146341,
"alnum_prop": 0.6526468455402465,
"repo_name": "vaudrypl/ift6266-speech-synth",
"id": "a19821051f61db942abd4584947b5bdbf04bc374",
"size": "1379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlp_with_source.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1379"
}
],
"symlink_target": ""
} |
"""Do not auto-compute diffusion_level
Revision ID: ac08dcf3f27b
Revises: dfec5f64ac73
Create Date: 2022-02-10 12:45:05.472204
"""
from distutils.util import strtobool
from alembic import op, context
import sqlalchemy as sa
from utils_flask_sqla.migrations.utils import logger
# revision identifiers, used by Alembic.
revision = 'ac08dcf3f27b'
down_revision = 'dfec5f64ac73'
branch_labels = None
depends_on = None
"""
- Lors de l’insertion de données dans la synthèse, seule la sensibilité est calculé,
le niveau de diffusion est maintenant intouché.
- Le calcul de la sensibilité prend en compte le critère OCC_COMPORTEMENT en plus du
critère STATUT_BIO existant.
- Le trigger d’update de la synthèse est passé de AFTER à BEFORE, évitant d’effectuer
un deuxième UPDATE pour mettre à jour la sensibilité.
- Met NULL dans synthese.id_nomenclature_diffusion_level quand le niveau de diffusion
actuel correspond au niveau de sensibilité (laissé tel quel s’il ne correspond pas).
"""
def upgrade():
clear_diffusion_level = context.get_x_argument(as_dictionary=True).get('clear-diffusion-level')
if clear_diffusion_level is not None:
clear_diffusion_level = bool(strtobool(clear_diffusion_level))
else:
clear_diffusion_level = True
op.execute("""
DROP TRIGGER tri_insert_calculate_sensitivity ON gn_synthese.synthese
""")
op.execute("""
DROP TRIGGER tri_update_calculate_sensitivity ON gn_synthese.synthese
""")
op.execute("""
DROP FUNCTION gn_synthese.fct_tri_cal_sensi_diff_level_on_each_statement
""")
op.execute("""
DROP FUNCTION gn_synthese.fct_tri_cal_sensi_diff_level_on_each_row
""")
op.execute("""
CREATE FUNCTION gn_synthese.fct_tri_calculate_sensitivity_on_each_statement()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
-- Calculate sensitivity on insert in synthese
BEGIN
WITH cte AS (
SELECT
id_synthese,
gn_sensitivity.get_id_nomenclature_sensitivity(
new_row.date_min::date,
taxonomie.find_cdref(new_row.cd_nom),
new_row.the_geom_local,
jsonb_build_object(
'STATUT_BIO', new_row.id_nomenclature_bio_status,
'OCC_COMPORTEMENT', new_row.id_nomenclature_behaviour
)
) AS id_nomenclature_sensitivity
FROM
NEW AS new_row
)
UPDATE
gn_synthese.synthese AS s
SET
id_nomenclature_sensitivity = c.id_nomenclature_sensitivity
FROM
cte AS c
WHERE
c.id_synthese = s.id_synthese
;
RETURN NULL;
END;
$function$
;
""")
op.execute("""
CREATE FUNCTION gn_synthese.fct_tri_update_sensitivity_on_each_row()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
-- Calculate sensitivity on update in synthese
BEGIN
NEW.id_nomenclature_sensitivity = gn_sensitivity.get_id_nomenclature_sensitivity(
NEW.date_min::date,
taxonomie.find_cdref(NEW.cd_nom),
NEW.the_geom_local,
jsonb_build_object(
'STATUT_BIO', NEW.id_nomenclature_bio_status,
'OCC_COMPORTEMENT', NEW.id_nomenclature_behaviour
)
);
RETURN NEW;
END;
$function$
;
""")
op.execute("""
CREATE TRIGGER
tri_insert_calculate_sensitivity
AFTER
INSERT
ON
gn_synthese.synthese
REFERENCING
NEW TABLE AS NEW
FOR EACH
STATEMENT
EXECUTE PROCEDURE
gn_synthese.fct_tri_calculate_sensitivity_on_each_statement()
""")
op.execute("""
CREATE TRIGGER
tri_update_calculate_sensitivity
BEFORE UPDATE OF
date_min,
date_max,
cd_nom,
the_geom_local,
id_nomenclature_bio_status,
id_nomenclature_behaviour
ON
gn_synthese.synthese
FOR EACH
ROW
EXECUTE PROCEDURE
gn_synthese.fct_tri_update_sensitivity_on_each_row()
""")
if clear_diffusion_level:
logger.info("Clearing diffusion level…")
count = op.get_bind().execute("""
WITH cleared_rows AS (
UPDATE
gn_synthese.synthese s
SET
id_nomenclature_diffusion_level = NULL
FROM
ref_nomenclatures.t_nomenclatures nomenc_sensitivity,
ref_nomenclatures.t_nomenclatures nomenc_diff_level
WHERE
nomenc_sensitivity.id_nomenclature = s.id_nomenclature_sensitivity
AND nomenc_diff_level.id_nomenclature = s.id_nomenclature_diffusion_level
AND nomenc_diff_level.cd_nomenclature = gn_sensitivity.calculate_cd_diffusion_level(NULL, nomenc_sensitivity.cd_nomenclature)
RETURNING s.id_synthese
)
SELECT
count(*)
FROM
cleared_rows;
""").scalar()
logger.info("Cleared diffusion level on {} rows.".format(count))
def downgrade():
restore_diffusion_level = context.get_x_argument(as_dictionary=True).get('restore-diffusion-level')
if restore_diffusion_level is not None:
restore_diffusion_level = bool(strtobool(restore_diffusion_level))
else:
restore_diffusion_level = True
if restore_diffusion_level:
logger.info("Restore diffusion level…")
count = op.get_bind().execute("""
WITH restored_rows AS (
UPDATE
gn_synthese.synthese s
SET
id_nomenclature_diffusion_level = ref_nomenclatures.get_id_nomenclature(
'NIV_PRECIS',
gn_sensitivity.calculate_cd_diffusion_level(
NULL,
nomenc_sensitivity.cd_nomenclature
)
)
FROM
ref_nomenclatures.t_nomenclatures nomenc_sensitivity
WHERE
nomenc_sensitivity.id_nomenclature = s.id_nomenclature_sensitivity
AND s.id_nomenclature_diffusion_level IS NULL
RETURNING s.id_synthese
)
SELECT
count(*)
FROM
restored_rows
""").scalar()
logger.info("Restored diffusion level on {} rows.".format(count))
op.execute("""
DROP TRIGGER tri_insert_calculate_sensitivity ON gn_synthese.synthese
""")
op.execute("""
DROP TRIGGER tri_update_calculate_sensitivity ON gn_synthese.synthese
""")
op.execute("""
DROP FUNCTION gn_synthese.fct_tri_calculate_sensitivity_on_each_statement
""")
op.execute("""
DROP FUNCTION gn_synthese.fct_tri_update_sensitivity_on_each_row
""")
op.execute("""
CREATE FUNCTION gn_synthese.fct_tri_cal_sensi_diff_level_on_each_statement()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
-- Calculate sensitivity and diffusion level on insert in synthese
BEGIN
WITH cte AS (
SELECT
gn_sensitivity.get_id_nomenclature_sensitivity(
updated_rows.date_min::date,
taxonomie.find_cdref(updated_rows.cd_nom),
updated_rows.the_geom_local,
('{"STATUT_BIO": ' || updated_rows.id_nomenclature_bio_status::text || '}')::jsonb
) AS id_nomenclature_sensitivity,
id_synthese,
t_diff.cd_nomenclature as cd_nomenclature_diffusion_level
FROM NEW AS updated_rows
LEFT JOIN ref_nomenclatures.t_nomenclatures t_diff ON t_diff.id_nomenclature = updated_rows.id_nomenclature_diffusion_level
WHERE updated_rows.id_nomenclature_sensitivity IS NULL
)
UPDATE gn_synthese.synthese AS s
SET
id_nomenclature_sensitivity = c.id_nomenclature_sensitivity,
id_nomenclature_diffusion_level = ref_nomenclatures.get_id_nomenclature(
'NIV_PRECIS',
gn_sensitivity.calculate_cd_diffusion_level(
c.cd_nomenclature_diffusion_level,
t_sensi.cd_nomenclature
)
)
FROM cte AS c
LEFT JOIN ref_nomenclatures.t_nomenclatures t_sensi ON t_sensi.id_nomenclature = c.id_nomenclature_sensitivity
WHERE c.id_synthese = s.id_synthese
;
RETURN NULL;
END;
$function$
;
""")
op.execute("""
CREATE TRIGGER tri_insert_calculate_sensitivity AFTER
INSERT
ON
gn_synthese.synthese REFERENCING NEW TABLE AS NEW FOR EACH STATEMENT EXECUTE PROCEDURE gn_synthese.fct_tri_cal_sensi_diff_level_on_each_statement()
""")
op.execute("""
CREATE FUNCTION gn_synthese.fct_tri_cal_sensi_diff_level_on_each_row()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
-- Calculate sensitivity and diffusion level on update in synthese
DECLARE calculated_id_sensi integer;
BEGIN
SELECT
gn_sensitivity.get_id_nomenclature_sensitivity(
NEW.date_min::date,
taxonomie.find_cdref(NEW.cd_nom),
NEW.the_geom_local,
('{"STATUT_BIO": ' || NEW.id_nomenclature_bio_status::text || '}')::jsonb
) INTO calculated_id_sensi;
UPDATE gn_synthese.synthese
SET
id_nomenclature_sensitivity = calculated_id_sensi,
-- On ne met pas à jour le niveau de diffusion s'il a déjà une valeur
id_nomenclature_diffusion_level = CASE WHEN OLD.id_nomenclature_diffusion_level IS NULL THEN (
SELECT ref_nomenclatures.get_id_nomenclature(
'NIV_PRECIS',
gn_sensitivity.calculate_cd_diffusion_level(
ref_nomenclatures.get_cd_nomenclature(OLD.id_nomenclature_diffusion_level),
ref_nomenclatures.get_cd_nomenclature(calculated_id_sensi)
)
)
)
ELSE OLD.id_nomenclature_diffusion_level
END
WHERE id_synthese = OLD.id_synthese
;
RETURN NULL;
END;
$function$
;
""")
op.execute("""
CREATE TRIGGER tri_update_calculate_sensitivity AFTER
UPDATE
OF date_min,
date_max,
cd_nom,
the_geom_local,
id_nomenclature_bio_status ON
gn_synthese.synthese FOR EACH ROW EXECUTE PROCEDURE gn_synthese.fct_tri_cal_sensi_diff_level_on_each_row()
""")
| {
"content_hash": "3bc2cf4623876339db3db149c7ba574a",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 159,
"avg_line_length": 36.922077922077925,
"alnum_prop": 0.5646324305311291,
"repo_name": "PnEcrins/GeoNature",
"id": "e6fbe7b21af083e3dfbffd044a4b520fadfc198c",
"size": "11404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/geonature/migrations/versions/ac08dcf3f27b_diffusion_level.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1931"
},
{
"name": "Batchfile",
"bytes": "1151"
},
{
"name": "CSS",
"bytes": "763718"
},
{
"name": "HTML",
"bytes": "651"
},
{
"name": "JavaScript",
"bytes": "16182773"
},
{
"name": "PHP",
"bytes": "4058658"
},
{
"name": "PLpgSQL",
"bytes": "893372"
},
{
"name": "Shell",
"bytes": "33147"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from linode.objects import Base, Property
class Image(Base):
"""
An Image is something a Linode or Disk can be deployed from.
"""
api_endpoint = '/images/{id}'
properties = {
"id": Property(identifier=True),
"label": Property(mutable=True),
"description": Property(mutable=True),
"status": Property(),
"created": Property(is_datetime=True),
"created_by": Property(),
"type": Property(),
"is_public": Property(),
"vendor": Property(),
"size": Property(),
"deprecated": Property()
}
| {
"content_hash": "daed0cba27eb935e547897a72270ba19",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 64,
"avg_line_length": 25.48,
"alnum_prop": 0.5745682888540031,
"repo_name": "jo-tez/python-linode-api",
"id": "26b1b336a41ad22c66c7b3f4db7aaf7ba21bc003",
"size": "637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linode/objects/image.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "126503"
}
],
"symlink_target": ""
} |
import json
from flask import url_for
from re import search
from bluepill.server import get_application
from bluepill.server.resource import HOMESERVER, users, get_error_message
from test_resource import OK, NOT_FOUND, BAD_REQUEST, UNPROCESSABLE_ENTITY, app # noqa
# Create a local copy of the @alice:example.com user entry.
USER = users['@alice:example.com'].copy()
# Get the URL for the register endpoint.
with get_application().test_request_context():
URL = url_for('r0.register')
def test_correct_requests(client):
# Test a correct request with the kind set to user.
response = client.post(URL + '?kind=user', data=json.dumps({
'username': 'john',
'bind_email': False,
'password': 'testpassword',
}))
assert response.status == OK
assert response.json['user_id'] == '@john:%s' % HOMESERVER
assert len(str(response.json['access_token'])) == 10
assert len(str(response.json['refresh_token'])) == 10
assert response.json['home_server'] == HOMESERVER
# Test a correct request with the kind set to guest.
response = client.post(URL + '?kind=guest', data=json.dumps({
'username': 'liam',
'bind_email': False,
'password': 'testpassword',
}))
assert response.status == OK
assert response.json['user_id'] == '@liam:%s' % HOMESERVER
assert len(str(response.json['access_token'])) == 10
assert len(str(response.json['refresh_token'])) == 10
assert response.json['home_server'] == HOMESERVER
# Test a correct request with no username specified.
# A random number should be generated as the username.
response = client.post(URL + '?kind=guest', data=json.dumps({
'password': 'testpassword',
}))
assert response.status == OK
# The regex extracts the username from the user ID.
# Ugly, I know.
username = search(r'\A@([^@:]*):', response.json['user_id']).group(1)
# Make sure the username is a number.
int(username)
def test_malformed_requests(client):
# Test a request with the argument malformed.
response = client.post(URL + '?kind=use', data=json.dumps({
'username': 'john',
'bind_email': False,
'password': 'testpassword',
}))
assert response.status == UNPROCESSABLE_ENTITY
# Test a request with the JSON missing a required field.
response = client.post(URL + '?kind=user', data=json.dumps({
'username': 'john',
'bind_email': False,
}))
assert response.status == BAD_REQUEST
assert response.json == get_error_message(
'M_BAD_JSON',
'The supplied JSON was missing the password field.',
)
# Test a request with the JSON missing.
response = client.post(URL + '?kind=user')
assert response.status == BAD_REQUEST
assert response.json == get_error_message(
'M_NOT_JSON',
'Content not JSON.',
)
# Test a request with the JSON being invalid.
response = client.post(URL + '?kind=user', data=""""
{
"username": "john"
"password": "testpassword"
}
""")
assert response.status == BAD_REQUEST
assert response.json == get_error_message(
'M_NOT_JSON',
'Content not JSON.',
)
def test_registering_user_that_exists(client):
data = json.dumps({
'username': 'alice',
'bind_email': False,
'password': 'testpassword',
})
response = client.post(URL + '?kind=user', data=data)
assert response.status == BAD_REQUEST
assert response.json == get_error_message(
'M_USER_IN_USE',
'User ID already taken.',
)
| {
"content_hash": "f3c4d4cd3faf05c186278aa70a9f29c0",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 87,
"avg_line_length": 31.20689655172414,
"alnum_prop": 0.6290055248618784,
"repo_name": "SShrike/bluepill",
"id": "7b6fdd842a5fdd4f1cab328e675d9a06f62fa112",
"size": "4248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_resource_r0_register.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26386"
}
],
"symlink_target": ""
} |
import __builtin__
import contextlib
import copy
import datetime
import errno
import glob
import os
import random
import re
import shutil
import threading
import time
import uuid
import eventlet
from eventlet import greenthread
import fixtures
from lxml import etree
import mock
from mox3 import mox
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import range
from nova.api.metadata import base as instance_metadata
from nova.compute import arch
from nova.compute import cpumodel
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.network import model as network_model
from nova import objects
from nova.openstack.common import fileutils
from nova.openstack.common import loopingcall
from nova.pci import manager as pci_manager
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
from nova.tests.unit.objects import test_pci_device
from nova.tests.unit.objects import test_vcpu_model
from nova.tests.unit.virt.libvirt import fake_imagebackend
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
from nova import utils
from nova import version
from nova.virt import block_device as driver_block_device
from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall as base_firewall
from nova.virt import hardware
from nova.virt.libvirt import blockinfo
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import host
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import lvm
from nova.virt.libvirt import rbd_utils
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt.libvirt import volume as volume_drivers
libvirt_driver.libvirt = fakelibvirt
host.libvirt = fakelibvirt
libvirt_guest.libvirt = fakelibvirt
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('image_cache_subdirectory_name', 'nova.virt.imagecache')
CONF.import_opt('instances_path', 'nova.compute.manager')
_fake_network_info = fake_network.fake_get_instance_nw_info
_fake_NodeDevXml = \
{"pci_0000_04_00_3": """
<device>
<name>pci_0000_04_00_3</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igb</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>0</slot>
<function>3</function>
<product id='0x1521'>I350 Gigabit Network Connection</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='virt_functions'>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x10' function='0x7'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x3'/>
<address domain='0x0000' bus='0x04' slot='0x11' function='0x7'/>
</capability>
</capability>
</device>""",
"pci_0000_04_10_7": """
<device>
<name>pci_0000_04_10_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>16</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>""",
"pci_0000_04_11_7": """
<device>
<name>pci_0000_04_11_7</name>
<parent>pci_0000_00_01_1</parent>
<driver>
<name>igbvf</name>
</driver>
<capability type='pci'>
<domain>0</domain>
<bus>4</bus>
<slot>17</slot>
<function>7</function>
<product id='0x1520'>I350 Ethernet Controller Virtual Function
</product>
<vendor id='0x8086'>Intel Corporation</vendor>
<numa node='0'/>
<capability type='phys_function'>
<address domain='0x0000' bus='0x04' slot='0x00' function='0x3'/>
</capability>
<capability type='virt_functions'>
</capability>
</capability>
</device>"""}
_fake_cpu_info = {
"arch": "test_arch",
"model": "test_model",
"vendor": "test_vendor",
"topology": {
"sockets": 1,
"cores": 8,
"threads": 16
},
"features": ["feature1", "feature2"]
}
def _concurrency(signal, wait, done, target, is_block_dev=False):
signal.send()
wait.wait()
done.send()
class FakeVirDomainSnapshot(object):
def __init__(self, dom=None):
self.dom = dom
def delete(self, flags):
pass
class FakeVirtDomain(object):
def __init__(self, fake_xml=None, uuidstr=None, id=None, name=None):
if uuidstr is None:
uuidstr = str(uuid.uuid4())
self.uuidstr = uuidstr
self.id = id
self.domname = name
self._info = [power_state.RUNNING, 2048 * units.Mi, 1234 * units.Mi,
None, None]
if fake_xml:
self._fake_dom_xml = fake_xml
else:
self._fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
"""
def name(self):
if self.domname is None:
return "fake-domain %s" % self
else:
return self.domname
def ID(self):
return self.id
def info(self):
return self._info
def create(self):
pass
def managedSave(self, *args):
pass
def createWithFlags(self, launch_flags):
pass
def XMLDesc(self, *args):
return self._fake_dom_xml
def UUIDString(self):
return self.uuidstr
def attachDeviceFlags(self, xml, flags):
pass
def attachDevice(self, xml):
pass
def detachDeviceFlags(self, xml, flags):
pass
def snapshotCreateXML(self, xml, flags):
pass
def blockCommit(self, disk, base, top, bandwidth=0, flags=0):
pass
def blockRebase(self, disk, base, bandwidth=0, flags=0):
pass
def blockJobInfo(self, path, flags):
pass
def resume(self):
pass
def destroy(self):
pass
def fsFreeze(self, disks=None, flags=0):
pass
def fsThaw(self, disks=None, flags=0):
pass
class CacheConcurrencyTestCase(test.NoDBTestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
# utils.synchronized() will create the lock_path for us if it
# doesn't already exist. It will also delete it when it's done,
# which can cause race conditions with the multiple threads we
# use for tests. So, create the path here so utils.synchronized()
# won't delete it out from under one of the threads.
self.lock_path = os.path.join(CONF.instances_path, 'locks')
fileutils.ensure_tree(self.lock_path)
def fake_exists(fname):
basedir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if fname == basedir or fname == self.lock_path:
return True
return False
def fake_execute(*args, **kwargs):
pass
def fake_extend(image, size, use_cow=False):
pass
self.stubs.Set(os.path, 'exists', fake_exists)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(imagebackend.disk, 'extend', fake_extend)
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def _fake_instance(self, uuid):
return objects.Instance(id=1, uuid=uuid)
def test_same_fname_concurrency(self):
# Ensures that the same fname cache runs at a sequentially.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname', None,
signal=sig2, wait=wait2, done=done2)
wait2.send()
eventlet.sleep(0)
try:
self.assertFalse(done2.ready())
finally:
wait1.send()
done1.wait()
eventlet.sleep(0)
self.assertTrue(done2.ready())
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
def test_different_fname_concurrency(self):
# Ensures that two different fname caches are concurrent.
uuid = uuidutils.generate_uuid()
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
sig1 = eventlet.event.Event()
thr1 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname2', None,
signal=sig1, wait=wait1, done=done1)
eventlet.sleep(0)
# Thread 1 should run before thread 2.
sig1.wait()
wait2 = eventlet.event.Event()
done2 = eventlet.event.Event()
sig2 = eventlet.event.Event()
thr2 = eventlet.spawn(backend.image(self._fake_instance(uuid),
'name').cache,
_concurrency, 'fname1', None,
signal=sig2, wait=wait2, done=done2)
eventlet.sleep(0)
# Wait for thread 2 to start.
sig2.wait()
wait2.send()
tries = 0
while not done2.ready() and tries < 10:
eventlet.sleep(0)
tries += 1
try:
self.assertTrue(done2.ready())
finally:
wait1.send()
eventlet.sleep(0)
# Wait on greenthreads to assert they didn't raise exceptions
# during execution
thr1.wait()
thr2.wait()
class FakeVolumeDriver(object):
def __init__(self, *args, **kwargs):
pass
def attach_volume(self, *args):
pass
def detach_volume(self, *args):
pass
def get_xml(self, *args):
return ""
def get_config(self, *args):
"""Connect the volume to a fake device."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
conf.target_dev = "fake"
conf.target_bus = "fake"
return conf
def connect_volume(self, *args):
"""Connect the volume to a fake device."""
return self.get_config()
class FakeConfigGuestDisk(object):
def __init__(self, *args, **kwargs):
self.source_type = None
self.driver_cache = None
class FakeConfigGuest(object):
def __init__(self, *args, **kwargs):
self.driver_cache = None
class FakeNodeDevice(object):
def __init__(self, fakexml):
self.xml = fakexml
def XMLDesc(self, *args):
return self.xml
def _create_test_instance():
flavor = objects.Flavor(memory_mb=2048,
swap=0,
vcpu_weight=None,
root_gb=1,
id=2,
name=u'm1.small',
ephemeral_gb=0,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=1,
extra_specs={})
return {
'id': 1,
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
'display_name': "Acme webserver",
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'root_gb': 10,
'ephemeral_gb': 20,
'instance_type_id': '5', # m1.small
'extra_specs': {},
'system_metadata': {},
'flavor': flavor,
'new_flavor': None,
'old_flavor': None,
'pci_devices': objects.PciDeviceList(),
'numa_topology': None,
'config_drive': None,
'vm_mode': None,
'kernel_id': None,
'ramdisk_id': None,
'os_type': 'linux',
'user_id': '838a72b0-0d54-4827-8fd6-fb1227633ceb',
'ephemeral_key_uuid': None,
'vcpu_model': None,
'host': 'fake-host',
}
class LibvirtConnTestCase(test.NoDBTestCase):
REQUIRES_LOCKING = True
_EPHEMERAL_20_DEFAULT = ('ephemeral_20_%s' %
utils.get_hash_str(disk._DEFAULT_FILE_SYSTEM)[:7])
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
self.flags(fake_call=True)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.get_admin_context()
temp_dir = self.useFixture(fixtures.TempDir()).path
self.flags(instances_path=temp_dir)
self.flags(snapshots_directory=temp_dir, group='libvirt')
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.flags(sysinfo_serial="hardware", group="libvirt")
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
def fake_extend(image, size, use_cow=False):
pass
self.stubs.Set(libvirt_driver.disk, 'extend', fake_extend)
self.stubs.Set(imagebackend.Image, 'resolve_driver_format',
imagebackend.Image._get_driver_format)
self.useFixture(fakelibvirt.FakeLibvirtFixture())
self.test_instance = _create_test_instance()
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self.stubs)
self.device_xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial>58a84f6d-3f0c-4e19-a0af-eb657b790657</serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
def relpath(self, path):
return os.path.relpath(path, CONF.instances_path)
def tearDown(self):
nova.tests.unit.image.fake.FakeImageService_reset()
super(LibvirtConnTestCase, self).tearDown()
def test_driver_capabilities(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr.capabilities['has_imagecache'],
'Driver capabilities for \'has_imagecache\''
'is invalid')
self.assertTrue(drvr.capabilities['supports_recreate'],
'Driver capabilities for \'supports_recreate\''
'is invalid')
def create_fake_libvirt_mock(self, **kwargs):
"""Defining mocks for LibvirtDriver(libvirt is not used)."""
# A fake libvirt.virConnect
class FakeLibvirtDriver(object):
def defineXML(self, xml):
return FakeVirtDomain()
# Creating mocks
volume_driver = ['iscsi=nova.tests.unit.virt.libvirt.test_driver'
'.FakeVolumeDriver']
fake = FakeLibvirtDriver()
# Customizing above fake if necessary
for key, val in kwargs.items():
fake.__setattr__(key, val)
self.stubs.Set(libvirt_driver.LibvirtDriver, '_conn', fake)
self.stubs.Set(libvirt_driver.LibvirtDriver, '_get_volume_drivers',
lambda x: volume_driver)
self.stubs.Set(host.Host, 'get_connection', lambda x: fake)
def fake_lookup(self, instance_name):
return FakeVirtDomain()
def fake_execute(self, *args, **kwargs):
open(args[-1], "a").close()
def _create_service(self, **kwargs):
service_ref = {'host': kwargs.get('host', 'dummy'),
'disabled': kwargs.get('disabled', False),
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0}
return objects.Service(**service_ref)
def _get_pause_flag(self, drvr, network_info, power_on=True,
vifs_already_plugged=False):
timeout = CONF.vif_plugging_timeout
events = []
if (drvr._conn_supports_start_paused and
utils.is_neutron() and
not vifs_already_plugged and
power_on and timeout):
events = drvr._get_neutron_events(network_info)
return bool(events)
def test_public_api_signatures(self):
baseinst = driver.ComputeDriver(None)
inst = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertPublicAPISignatures(baseinst, inst)
def test_legacy_block_device_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertFalse(drvr.need_legacy_block_device_info)
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_ok(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
@mock.patch.object(host.Host, "has_min_version")
def test_min_version_start_abort(self, mock_version):
mock_version.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(exception.NovaException,
drvr.init_host,
"dummyhost")
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION) - 1)
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_deprecation_warning(self, mock_warning,
mock_get_libversion):
# Test that a warning is logged if the libvirt version is less than
# the next required minimum version.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': '0.10.2'}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion',
return_value=utils.convert_version_to_int(
libvirt_driver.NEXT_MIN_LIBVIRT_VERSION))
@mock.patch.object(libvirt_driver.LOG, 'warning')
def test_next_min_version_ok(self, mock_warning, mock_get_libversion):
# Test that a warning is not logged if the libvirt version is greater
# than or equal to NEXT_MIN_LIBVIRT_VERSION.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("dummyhost")
# assert that the next min version is in a warning message
expected_arg = {'version': '0.10.2'}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertFalse(version_arg_found)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable(self, mock_svc):
# Tests disabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
self.assertTrue(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable(self, mock_svc):
# Tests enabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
self.assertTrue(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_enable_state_enabled(self, mock_svc):
# Tests enabling an enabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=False, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(True)
self.assertFalse(svc.disabled)
@mock.patch.object(objects.Service, 'get_by_compute_host')
def test_set_host_enabled_with_disable_state_disabled(self, mock_svc):
# Tests disabling a disabled host.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
svc = self._create_service(disabled=True, host='fake-mini')
mock_svc.return_value = svc
drvr._set_host_enabled(False)
self.assertTrue(svc.disabled)
def test_set_host_enabled_swallows_exceptions(self):
# Tests that set_host_enabled will swallow exceptions coming from the
# db_api code so they don't break anything calling it, e.g. the
# _get_new_connection method.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(db, 'service_get_by_compute_host') as db_mock:
# Make db.service_get_by_compute_host raise NovaException; this
# is more robust than just raising ComputeHostNotFound.
db_mock.side_effect = exception.NovaException
drvr._set_host_enabled(False)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
def test_prepare_pci_device(self, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
drvr._prepare_pci_devices_for_use(pci_devices)
@mock.patch.object(fakelibvirt.virConnect, "nodeDeviceLookupByName")
@mock.patch.object(fakelibvirt.virNodeDevice, "dettach")
def test_prepare_pci_device_exception(self, mock_detach, mock_lookup):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn = drvr._host.get_connection()
mock_lookup.side_effect = lambda x: fakelibvirt.NodeDevice(conn)
mock_detach.side_effect = fakelibvirt.libvirtError("xxxx")
self.assertRaises(exception.PciDevicePrepareFailed,
drvr._prepare_pci_devices_for_use, pci_devices)
def test_detach_pci_devices_exception(self):
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid')]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: False
self.assertRaises(exception.PciDeviceDetachFailed,
drvr._detach_pci_devices, None, pci_devices)
def test_detach_pci_devices(self):
fake_domXML1 =\
"""<domain> <devices>
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='xxx'/>
<target dev='vda' bus='virtio'/>
<alias name='virtio-disk0'/>
<address type='pci' domain='0x0000' bus='0x00'
slot='0x04' function='0x0'/>
</disk>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0000"
bus="0x04"/>
</source>
</hostdev></devices></domain>"""
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid',
address="0001:04:10:1")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_get_guest_pci_device')
class FakeDev(object):
def to_xml(self):
pass
libvirt_driver.LibvirtDriver._get_guest_pci_device =\
lambda x, y: FakeDev()
class FakeDomain(object):
def detachDeviceFlags(self, xml, flag):
pci_devices[0]['hypervisor_name'] = 'marked'
pass
def XMLDesc(self, flag):
return fake_domXML1
drvr._detach_pci_devices(FakeDomain(), pci_devices)
self.assertEqual(pci_devices[0]['hypervisor_name'], 'marked')
def test_detach_pci_devices_timeout(self):
fake_domXML1 =\
"""<domain>
<devices>
<hostdev mode="subsystem" type="pci" managed="yes">
<source>
<address function="0x1" slot="0x10" domain="0x0000" bus="0x04"/>
</source>
</hostdev>
</devices>
</domain>"""
pci_devices = [dict(hypervisor_name='xxx',
id='id1',
instance_uuid='uuid',
address="0000:04:10:1")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(host.Host,
'has_min_version')
host.Host.has_min_version = lambda x, y: True
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver,
'_get_guest_pci_device')
class FakeDev(object):
def to_xml(self):
pass
libvirt_driver.LibvirtDriver._get_guest_pci_device =\
lambda x, y: FakeDev()
class FakeDomain(object):
def detachDeviceFlags(self, xml, flag):
pass
def XMLDesc(self, flag):
return fake_domXML1
self.assertRaises(exception.PciDeviceDetachFailed,
drvr._detach_pci_devices, FakeDomain(), pci_devices)
def test_get_connector(self):
initiator = 'fake.initiator.iqn'
ip = 'fakeip'
host = 'fakehost'
wwpns = ['100010604b019419']
wwnns = ['200010604b019419']
self.flags(my_ip=ip)
self.flags(host=host)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {
'ip': ip,
'initiator': initiator,
'host': host,
'wwpns': wwpns,
'wwnns': wwnns
}
volume = {
'id': 'fake'
}
result = drvr.get_volume_connector(volume)
self.assertThat(expected, matchers.DictMatches(result))
def test_get_connector_storage_ip(self):
ip = '100.100.100.100'
storage_ip = '101.101.101.101'
self.flags(my_block_storage_ip=storage_ip, my_ip=ip)
volume = {
'id': 'fake'
}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
result = drvr.get_volume_connector(volume)
self.assertEqual(storage_ip, result['ip'])
def test_lifecycle_event_registration(self):
calls = []
def fake_registerErrorHandler(*args, **kwargs):
calls.append('fake_registerErrorHandler')
def fake_get_host_capabilities(**args):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
calls.append('fake_get_host_capabilities')
return caps
@mock.patch.object(fakelibvirt, 'registerErrorHandler',
side_effect=fake_registerErrorHandler)
@mock.patch.object(host.Host, "get_capabilities",
side_effect=fake_get_host_capabilities)
def test_init_host(get_host_capabilities, register_error_handler):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.init_host("test_host")
test_init_host()
# NOTE(dkliban): Will fail if get_host_capabilities is called before
# registerErrorHandler
self.assertEqual(['fake_registerErrorHandler',
'fake_get_host_capabilities'], calls)
def test_sanitize_log_to_xml(self):
# setup fake data
data = {'auth_password': 'scrubme'}
bdm = [{'connection_info': {'data': data}}]
bdi = {'block_device_mapping': bdm}
# Tests that the parameters to the _get_guest_xml method
# are sanitized for passwords when logged.
def fake_debug(*args, **kwargs):
if 'auth_password' in args[0]:
self.assertNotIn('scrubme', args[0])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = mock.Mock()
with contextlib.nested(
mock.patch.object(libvirt_driver.LOG, 'debug',
side_effect=fake_debug),
mock.patch.object(drvr, '_get_guest_config', return_value=conf)
) as (
debug_mock, conf_mock
):
drvr._get_guest_xml(self.context, self.test_instance,
network_info={}, disk_info={},
image_meta={}, block_device_info=bdi)
# we don't care what the log message is, we just want to make sure
# our stub method is called which asserts the password is scrubbed
self.assertTrue(debug_mock.called)
@mock.patch.object(time, "time")
def test_get_guest_config(self, time_mock):
time_mock.return_value = 1234567.89
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
test_instance = copy.deepcopy(self.test_instance)
test_instance["display_name"] = "purple tomatoes"
ctxt = context.RequestContext(project_id=123,
project_name="aubergine",
user_id=456,
user_name="pie")
flavor = objects.Flavor(name='m1.small',
memory_mb=6,
vcpus=28,
root_gb=496,
ephemeral_gb=8128,
swap=33550336,
extra_specs={})
instance_ref = objects.Instance(**test_instance)
instance_ref.flavor = flavor
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info,
context=ctxt)
self.assertEqual(cfg.uuid, instance_ref["uuid"])
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, 6 * units.Ki)
self.assertEqual(cfg.vcpus, 28)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(len(cfg.metadata), 1)
self.assertIsInstance(cfg.metadata[0],
vconfig.LibvirtConfigGuestMetaNovaInstance)
self.assertEqual(version.version_string_with_package(),
cfg.metadata[0].package)
self.assertEqual("purple tomatoes",
cfg.metadata[0].name)
self.assertEqual(1234567.89,
cfg.metadata[0].creationTime)
self.assertEqual("image",
cfg.metadata[0].roottype)
self.assertEqual(str(instance_ref["image_ref"]),
cfg.metadata[0].rootid)
self.assertIsInstance(cfg.metadata[0].owner,
vconfig.LibvirtConfigGuestMetaNovaOwner)
self.assertEqual(456,
cfg.metadata[0].owner.userid)
self.assertEqual("pie",
cfg.metadata[0].owner.username)
self.assertEqual(123,
cfg.metadata[0].owner.projectid)
self.assertEqual("aubergine",
cfg.metadata[0].owner.projectname)
self.assertIsInstance(cfg.metadata[0].flavor,
vconfig.LibvirtConfigGuestMetaNovaFlavor)
self.assertEqual("m1.small",
cfg.metadata[0].flavor.name)
self.assertEqual(6,
cfg.metadata[0].flavor.memory)
self.assertEqual(28,
cfg.metadata[0].flavor.vcpus)
self.assertEqual(496,
cfg.metadata[0].flavor.disk)
self.assertEqual(8128,
cfg.metadata[0].flavor.ephemeral)
self.assertEqual(33550336,
cfg.metadata[0].flavor.swap)
def test_get_guest_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_lxc_with_id_maps(self):
self.flags(virt_type='lxc', group='libvirt')
self.flags(uid_maps=['0:1000:100'], group='libvirt')
self.flags(gid_maps=['0:1000:100'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, {'mapping': {}})
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertEqual("console=tty0 console=ttyS0", cfg.os_cmdline)
self.assertIsNone(cfg.os_root)
self.assertEqual(3, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertEqual(len(cfg.idmaps), 2)
self.assertIsInstance(cfg.idmaps[0],
vconfig.LibvirtConfigGuestUIDMap)
self.assertIsInstance(cfg.idmaps[1],
vconfig.LibvirtConfigGuestGIDMap)
def test_get_guest_config_numa_host_instance_fits(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_numa_host_instance_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice')
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def _test_get_guest_memory_backing_config(
self, host_topology, inst_topology, numatune):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
with mock.patch.object(
drvr, "_get_host_numa_topology",
return_value=host_topology):
return drvr._get_guest_memory_backing_config(
inst_topology, numatune)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_large_success(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=2048)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertEqual(1, len(result.hugepages))
self.assertEqual(2048, result.hugepages[0].size_kb)
self.assertEqual([0], result.hugepages[0].nodeset)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
def test_get_guest_memory_backing_config_smallest(self, mock_version):
host_topology = objects.NUMATopology(
cells=[
objects.NUMACell(
id=3, cpuset=set([1]), memory=1024, mempages=[
objects.NUMAPagesTopology(size_kb=4, total=2000,
used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512,
used=0),
objects.NUMAPagesTopology(size_kb=1048576, total=0,
used=0),
])])
inst_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024, pagesize=4)])
numa_tune = vconfig.LibvirtConfigGuestNUMATune()
numa_tune.memnodes = [vconfig.LibvirtConfigGuestNUMATuneMemNode()]
numa_tune.memnodes[0].cellid = 0
numa_tune.memnodes[0].nodeset = [3]
result = self._test_get_guest_memory_backing_config(
host_topology, inst_topology, numa_tune)
self.assertIsNone(result)
def test_get_guest_config_numa_host_instance_pci_no_numa_info(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status='available',
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=None)
pci_device = objects.PciDevice(**pci_device_info)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device])):
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_numa_host_instance_2pci_no_fit(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
flavor = objects.Flavor(memory_mb=4096, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status='available',
address='0000:00:00.1',
instance_uuid=None,
request_id=None,
extra_info={},
numa_node=1)
pci_device = objects.PciDevice(**pci_device_info)
pci_device_info.update(numa_node=0, address='0000:00:00.2')
pci_device2 = objects.PciDevice(**pci_device_info)
with contextlib.nested(
mock.patch.object(
host.Host, "get_capabilities", return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([3])),
mock.patch.object(random, 'choice'),
mock.patch.object(pci_manager, "get_instance_pci_devs",
return_value=[pci_device, pci_device2])
) as (get_host_cap_mock,
get_vcpu_pin_set_mock, choice_mock, pci_mock):
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
self.assertFalse(choice_mock.called)
self.assertEqual(set([3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
@mock.patch.object(host.Host, 'get_capabilities')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_set_host_enabled')
def _test_get_guest_config_numa_unsupported(self, fake_lib_version,
fake_version, fake_type,
fake_arch, exception_class,
pagesize, mock_host,
mock_caps, mock_lib_version,
mock_version, mock_type):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]),
memory=1024, pagesize=pagesize)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = {}
flavor = objects.Flavor(memory_mb=1, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = fake_arch
caps.host.topology = self._fake_caps_numa_topology()
mock_type.return_value = fake_type
mock_version.return_value = fake_version
mock_lib_version.return_value = fake_lib_version
mock_caps.return_value = caps
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception_class,
drvr._get_guest_config,
instance_ref, [], {}, disk_info)
def test_get_guest_config_numa_old_version_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1,
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_bad_version_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.BAD_LIBVIRT_NUMA_VERSIONS[0]),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_old_version_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1,
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_other_arch_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.PPC64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_xen(self):
self.flags(virt_type='xen', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION),
utils.convert_version_to_int((4, 5, 0)),
'XEN',
arch.X86_64,
exception.NUMATopologyUnsupported,
None)
def test_get_guest_config_numa_old_pages_libvirt(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1,
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION),
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.MemoryPagesUnsupported,
2048)
def test_get_guest_config_numa_old_pages_qemu(self):
self.flags(virt_type='kvm', group='libvirt')
self._test_get_guest_config_numa_unsupported(
utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION),
utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION) - 1,
host.HV_DRIVER_QEMU,
arch.X86_64,
exception.NUMATopologyUnsupported,
2048)
def test_get_guest_config_numa_host_instance_fit_w_cpu_pinset(self):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
flavor = objects.Flavor(memory_mb=1024, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology(kb_mem=4194304)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set', return_value=set([2, 3])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8)))
) as (has_min_version_mock, get_host_cap_mock,
get_vcpu_pin_set_mock, get_online_cpus_mock):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
# NOTE(ndipanov): we make sure that pin_set was taken into account
# when choosing viable cells
self.assertEqual(set([2, 3]), cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.cpu.numa)
def test_get_guest_config_non_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([0]), memory=1024),
objects.InstanceNUMACell(
id=1, cpuset=set([2]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = {}
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertIsNone(cfg.cpuset)
self.assertEqual(0, len(cfg.cputune.vcpupin))
self.assertIsNone(cfg.numatune)
self.assertIsNotNone(cfg.cpu.numa)
for instance_cell, numa_cfg_cell in zip(
instance_topology.cells, cfg.cpu.numa.cells):
self.assertEqual(instance_cell.id, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
def test_get_guest_config_numa_host_instance_topo(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024, pagesize=None),
objects.InstanceNUMACell(
id=2, cpuset=set([2, 3]), memory=1024,
pagesize=None)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = {}
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([2, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([2, 3]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([4, 5]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([2, 3, 4, 5]), cfg.cputune.emulatorpin.cpuset)
for instance_cell, numa_cfg_cell, index in zip(
instance_topology.cells,
cfg.cpu.numa.cells,
range(len(instance_topology.cells))):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
allnodes = [cell.id for cell in instance_topology.cells]
self.assertEqual(allnodes, cfg.numatune.memory.nodeset)
self.assertEqual("strict", cfg.numatune.memory.mode)
for instance_cell, memnode, index in zip(
instance_topology.cells,
cfg.numatune.memnodes,
range(len(instance_topology.cells))):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_reordered(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=3, cpuset=set([0, 1]), memory=1024),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024)])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = {}
flavor = objects.Flavor(memory_mb=2048, vcpus=4, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([6, 7]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([0, 1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 6, 7]), cfg.cputune.emulatorpin.cpuset)
for index, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells,
cfg.cpu.numa.cells)):
self.assertEqual(index, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for index, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells,
cfg.numatune.memnodes)):
self.assertEqual(index, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_guest_config_numa_host_instance_topo_cpu_pinning(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=1, cpuset=set([0, 1]), memory=1024,
cpu_pinning={0: 24, 1: 25}),
objects.InstanceNUMACell(
id=0, cpuset=set([2, 3]), memory=1024,
cpu_pinning={2: 0, 3: 1})])
instance_ref = objects.Instance(**self.test_instance)
instance_ref.numa_topology = instance_topology
image_meta = {}
flavor = objects.Flavor(memory_mb=2048, vcpus=2, root_gb=496,
ephemeral_gb=8128, swap=33550336, name='fake',
extra_specs={})
instance_ref.flavor = flavor
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = "x86_64"
caps.host.topology = self._fake_caps_numa_topology(
sockets_per_cell=4, cores_per_socket=3, threads_per_core=2)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with contextlib.nested(
mock.patch.object(
objects.InstanceNUMATopology, "get_by_instance_uuid",
return_value=instance_topology),
mock.patch.object(host.Host, 'has_min_version',
return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set(range(8))),
):
cfg = conn._get_guest_config(instance_ref, [], {}, disk_info)
self.assertIsNone(cfg.cpuset)
# Test that the pinning is correct and limited to allowed only
self.assertEqual(0, cfg.cputune.vcpupin[0].id)
self.assertEqual(set([24]), cfg.cputune.vcpupin[0].cpuset)
self.assertEqual(1, cfg.cputune.vcpupin[1].id)
self.assertEqual(set([25]), cfg.cputune.vcpupin[1].cpuset)
self.assertEqual(2, cfg.cputune.vcpupin[2].id)
self.assertEqual(set([0]), cfg.cputune.vcpupin[2].cpuset)
self.assertEqual(3, cfg.cputune.vcpupin[3].id)
self.assertEqual(set([1]), cfg.cputune.vcpupin[3].cpuset)
self.assertIsNotNone(cfg.cpu.numa)
# Emulator must be pinned to union of cfg.cputune.vcpupin[*].cpuset
self.assertIsInstance(cfg.cputune.emulatorpin,
vconfig.LibvirtConfigGuestCPUTuneEmulatorPin)
self.assertEqual(set([0, 1, 24, 25]),
cfg.cputune.emulatorpin.cpuset)
for i, (instance_cell, numa_cfg_cell) in enumerate(zip(
instance_topology.cells, cfg.cpu.numa.cells)):
self.assertEqual(i, numa_cfg_cell.id)
self.assertEqual(instance_cell.cpuset, numa_cfg_cell.cpus)
self.assertEqual(instance_cell.memory * units.Ki,
numa_cfg_cell.memory)
allnodes = set([cell.id for cell in instance_topology.cells])
self.assertEqual(allnodes, set(cfg.numatune.memory.nodeset))
self.assertEqual("strict", cfg.numatune.memory.mode)
for i, (instance_cell, memnode) in enumerate(zip(
instance_topology.cells, cfg.numatune.memnodes)):
self.assertEqual(i, memnode.cellid)
self.assertEqual([instance_cell.id], memnode.nodeset)
self.assertEqual("strict", memnode.mode)
def test_get_cpu_numa_config_from_instance(self):
topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1, 2]), memory=128),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4]), memory=128),
])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(topology)
self.assertIsInstance(conf, vconfig.LibvirtConfigGuestCPUNUMA)
self.assertEqual(0, conf.cells[0].id)
self.assertEqual(set([1, 2]), conf.cells[0].cpus)
self.assertEqual(131072, conf.cells[0].memory)
self.assertEqual(1, conf.cells[1].id)
self.assertEqual(set([3, 4]), conf.cells[1].cpus)
self.assertEqual(131072, conf.cells[1].memory)
def test_get_cpu_numa_config_from_instance_none(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conf = drvr._get_cpu_numa_config_from_instance(None)
self.assertIsNone(conf)
def test_get_guest_config_clock(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {}
hpet_map = {
arch.X86_64: True,
arch.I686: True,
arch.PPC: False,
arch.PPC64: False,
arch.ARMV7: False,
arch.AARCH64: False,
}
for guestarch, expect_hpet in hpet_map.items():
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "utc")
self.assertIsInstance(cfg.clock.timers[0],
vconfig.LibvirtConfigGuestTimer)
self.assertIsInstance(cfg.clock.timers[1],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual(cfg.clock.timers[0].name, "pit")
self.assertEqual(cfg.clock.timers[0].tickpolicy,
"delay")
self.assertEqual(cfg.clock.timers[1].name, "rtc")
self.assertEqual(cfg.clock.timers[1].tickpolicy,
"catchup")
if expect_hpet:
self.assertEqual(3, len(cfg.clock.timers))
self.assertIsInstance(cfg.clock.timers[2],
vconfig.LibvirtConfigGuestTimer)
self.assertEqual('hpet', cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
else:
self.assertEqual(2, len(cfg.clock.timers))
@mock.patch.object(libvirt_utils, 'get_arch')
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows(self, mock_version, mock_get_arch):
mock_version.return_value = False
mock_get_arch.return_value = arch.I686
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.clock.timers), cfg.clock.timers)
self.assertEqual("pit", cfg.clock.timers[0].name)
self.assertEqual("rtc", cfg.clock.timers[1].name)
self.assertEqual("hpet", cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
@mock.patch.object(libvirt_utils, 'get_arch')
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_timer(self, mock_version, mock_get_arch):
mock_version.return_value = True
mock_get_arch.return_value = arch.I686
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(4, len(cfg.clock.timers), cfg.clock.timers)
self.assertEqual("pit", cfg.clock.timers[0].name)
self.assertEqual("rtc", cfg.clock.timers[1].name)
self.assertEqual("hpet", cfg.clock.timers[2].name)
self.assertFalse(cfg.clock.timers[2].present)
self.assertEqual("hypervclock", cfg.clock.timers[3].name)
self.assertTrue(cfg.clock.timers[3].present)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_hyperv_feature1(self, mock_version):
def fake_version(lv_ver=None, hv_ver=None, hv_type=None):
if lv_ver == (1, 0, 0) and hv_ver == (1, 1, 0):
return True
return False
mock_version.side_effect = fake_version
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
self.assertTrue(cfg.features[2].relaxed)
self.assertFalse(cfg.features[2].spinlocks)
self.assertFalse(cfg.features[2].vapic)
@mock.patch.object(host.Host, 'has_min_version')
def test_get_guest_config_windows_hyperv_feature2(self, mock_version):
mock_version.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['os_type'] = 'windows'
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(cfg.clock,
vconfig.LibvirtConfigGuestClock)
self.assertEqual(cfg.clock.offset, "localtime")
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureHyperV)
self.assertTrue(cfg.features[2].relaxed)
self.assertTrue(cfg.features[2].spinlocks)
self.assertEqual(8191, cfg.features[2].spinlock_retries)
self.assertTrue(cfg.features[2].vapic)
def test_get_guest_config_with_two_nics(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 2),
{}, disk_info)
self.assertEqual(2, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureAPIC)
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_boot_dev, ["hd"])
self.assertIsNone(cfg.os_root)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_bug_1118829(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
disk_info = {'disk_bus': 'virtio',
'cdrom_bus': 'ide',
'mapping': {u'vda': {'bus': 'virtio',
'type': 'disk',
'dev': u'vda'},
'root': {'bus': 'virtio',
'type': 'disk',
'dev': 'vda'}}}
# NOTE(jdg): For this specific test leave this blank
# This will exercise the failed code path still,
# and won't require fakes and stubs of the iscsi discovery
block_device_info = {}
drvr._get_guest_config(instance_ref, [], {}, disk_info,
None, block_device_info)
self.assertEqual(instance_ref['root_device_name'], '/dev/vda')
def test_get_guest_config_with_root_device_name(self):
self.flags(virt_type='uml', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
block_device_info = {'root_device_name': '/dev/vdb'}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info,
None, block_device_info)
self.assertEqual(0, len(cfg.features))
self.assertEqual(cfg.memory, 2 * units.Mi)
self.assertEqual(cfg.vcpus, 1)
self.assertEqual(cfg.os_type, "uml")
self.assertEqual(cfg.os_boot_dev, [])
self.assertEqual(cfg.os_root, '/dev/vdb')
self.assertEqual(len(cfg.devices), 3)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
def test_get_guest_config_with_block_device(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdc'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/vdd'}),
])}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info,
None, info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_lxc_with_attached_volume(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'boot_index': 0}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 3,
'source_type': 'volume', 'destination_type': 'volume',
}),
])}
info['block_device_mapping'][0]['connection_info'] = conn_info
info['block_device_mapping'][1]['connection_info'] = conn_info
info['block_device_mapping'][2]['connection_info'] = conn_info
info['block_device_mapping'][0]['mount_device'] = '/dev/vda'
info['block_device_mapping'][1]['mount_device'] = '/dev/vdc'
info['block_device_mapping'][2]['mount_device'] = '/dev/vdd'
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
info)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info,
None, info)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[1].target_dev, 'vdc')
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'vdd')
mock_save.assert_called_with()
def test_get_guest_config_with_configdrive(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
# make configdrive.required_by() return True
instance_ref['config_drive'] = True
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
# The last device is selected for this. on x86 is the last ide
# device (hdd). Since power only support scsi, the last device
# is sdz
expect = {"ppc": "sdz", "ppc64": "sdz"}
disk = expect.get(blockinfo.libvirt_utils.get_arch({}), "hdd")
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, disk)
def test_get_guest_config_with_virtio_scsi_bus(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
instance_ref = objects.Instance(**self.test_instance)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
[])
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[2].model, 'virtio-scsi')
def test_get_guest_config_with_virtio_scsi_bus_bdm(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = {"properties": {"hw_scsi_model": "virtio-scsi"}}
instance_ref = objects.Instance(**self.test_instance)
conn_info = {'driver_volume_type': 'fake'}
bd_info = {
'block_device_mapping': driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdc', 'disk_bus': 'scsi'}),
fake_block_device.FakeDbBlockDeviceDict(
{'id': 2,
'source_type': 'volume', 'destination_type': 'volume',
'device_name': '/dev/sdd', 'disk_bus': 'scsi'}),
])}
bd_info['block_device_mapping'][0]['connection_info'] = conn_info
bd_info['block_device_mapping'][1]['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
bd_info)
with mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'
) as mock_save:
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info, [], bd_info)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[2].target_dev, 'sdc')
self.assertEqual(cfg.devices[2].target_bus, 'scsi')
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[3].target_dev, 'sdd')
self.assertEqual(cfg.devices[3].target_bus, 'scsi')
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestController)
self.assertEqual(cfg.devices[4].model, 'virtio-scsi')
mock_save.assert_called_with()
def test_get_guest_config_with_vnc(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "vnc")
def test_get_guest_config_with_vnc_and_tablet(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=False, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_spice_and_tablet(self):
self.flags(vnc_enabled=False)
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "spice")
def test_get_guest_config_with_spice_and_agent(self):
self.flags(vnc_enabled=False)
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, "qxl")
@mock.patch('nova.console.serial.acquire_port')
@mock.patch('nova.virt.hardware.get_number_of_serial_ports',
return_value=1)
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',)
def test_create_serial_console_devices_based_on_arch(self, mock_get_arch,
mock_get_port_number,
mock_acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
expected = {arch.X86_64: vconfig.LibvirtConfigGuestSerial,
arch.S390: vconfig.LibvirtConfigGuestConsole,
arch.S390X: vconfig.LibvirtConfigGuestConsole}
for guest_arch, device_type in expected.items():
mock_get_arch.return_value = guest_arch
guest = vconfig.LibvirtConfigGuest()
drvr._create_serial_console_devices(guest, instance=None,
flavor={}, image_meta={})
self.assertEqual(1, len(guest.devices))
console_device = guest.devices[0]
self.assertIsInstance(console_device, device_type)
self.assertEqual("tcp", console_device.type)
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console(self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.return_value = 11111
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(8, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual(11111, cfg.devices[2].listen_port)
def test_get_guest_config_serial_console_through_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 3}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(10, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
def test_get_guest_config_serial_console_invalid_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': "a"}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(
exception.ImageSerialPortNumberInvalid,
drvr._get_guest_config, instance_ref, [], {}, disk_info)
def test_get_guest_config_serial_console_image_and_flavor(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = {"properties": {"hw_serial_port_count": "3"}}
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw:serial_port_count': 4}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], image_meta,
disk_info)
self.assertEqual(10, len(cfg.devices), cfg.devices)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("tcp", cfg.devices[2].type)
self.assertEqual("tcp", cfg.devices[3].type)
self.assertEqual("tcp", cfg.devices[4].type)
def test_get_guest_config_serial_console_invalid_img_meta(self):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_serial_port_count": "fail"}}
self.assertRaises(
exception.ImageSerialPortNumberInvalid,
drvr._get_guest_config, instance_ref, [], image_meta, disk_info)
@mock.patch('nova.console.serial.acquire_port')
def test_get_guest_config_serial_console_through_port_rng_exhausted(
self, acquire_port):
self.flags(enabled=True, group='serial_console')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
acquire_port.side_effect = exception.SocketPortRangeExhaustedException(
'127.0.0.1')
self.assertRaises(
exception.SocketPortRangeExhaustedException,
drvr._get_guest_config, instance_ref, [], {}, disk_info)
@mock.patch.object(host.Host, "get_domain")
def test_get_serial_ports_from_instance(self, mock_get_domain):
i = self._test_get_serial_ports_from_instance(None,
mock_get_domain)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(host.Host, "get_domain")
def test_get_serial_ports_from_instance_bind_only(self, mock_get_domain):
i = self._test_get_serial_ports_from_instance('bind',
mock_get_domain)
self.assertEqual([
('127.0.0.1', 101),
('127.0.0.2', 100)], list(i))
@mock.patch.object(host.Host, "get_domain")
def test_get_serial_ports_from_instance_connect_only(self,
mock_get_domain):
i = self._test_get_serial_ports_from_instance('connect',
mock_get_domain)
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.2', 101)], list(i))
@mock.patch.object(host.Host, "get_domain")
def test_get_serial_ports_from_instance_on_s390(self, mock_get_domain):
i = self._test_get_serial_ports_from_instance(None,
mock_get_domain,
'console')
self.assertEqual([
('127.0.0.1', 100),
('127.0.0.1', 101),
('127.0.0.2', 100),
('127.0.0.2', 101)], list(i))
def _test_get_serial_ports_from_instance(self, mode, mock_get_domain,
dev_name='serial'):
xml = """
<domain type='kvm'>
<devices>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="100" mode="connect"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.1" service="101" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="100" mode="bind"/>
</%(dev_name)s>
<%(dev_name)s type="tcp">
<source host="127.0.0.2" service="101" mode="connect"/>
</%(dev_name)s>
</devices>
</domain>""" % {'dev_name': dev_name}
dom = mock.MagicMock()
dom.XMLDesc.return_value = xml
mock_get_domain.return_value = dom
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance = objects.Instance(**self.test_instance)
return drvr._get_serial_ports_from_instance(
instance, mode=mode)
def test_get_guest_config_with_type_xen(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 6)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestConsole)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[3].type, "vnc")
self.assertEqual(cfg.devices[4].type, "xen")
@mock.patch.object(libvirt_driver.libvirt_utils, 'get_arch',
return_value=arch.S390X)
def test_get_guest_config_with_type_kvm_on_s390(self, mock_get_arch):
self.flags(vnc_enabled=False)
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
self._stub_host_capabilities_cpu_arch(arch.S390X)
instance_ref = objects.Instance(**self.test_instance)
cfg = self._get_guest_config_via_fake_api(instance_ref)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
log_file_device = cfg.devices[2]
self.assertIsInstance(log_file_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclplm", log_file_device.target_type)
self.assertEqual("file", log_file_device.type)
terminal_device = cfg.devices[3]
self.assertIsInstance(terminal_device,
vconfig.LibvirtConfigGuestConsole)
self.assertEqual("sclp", terminal_device.target_type)
self.assertEqual("pty", terminal_device.type)
self.assertEqual("s390-ccw-virtio", cfg.os_mach_type)
def _stub_host_capabilities_cpu_arch(self, cpu_arch):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = cpu_arch
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
def _get_guest_config_via_fake_api(self, instance):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
return drvr._get_guest_config(instance, [], {}, disk_info)
def test_get_guest_config_with_type_xen_pae_hvm(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref['vm_mode'] = vm_mode.HVM
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(cfg.os_type, vm_mode.HVM)
self.assertEqual(cfg.os_loader, CONF.libvirt.xen_hvmloader_path)
self.assertEqual(3, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
self.assertIsInstance(cfg.features[1],
vconfig.LibvirtConfigGuestFeatureACPI)
self.assertIsInstance(cfg.features[2],
vconfig.LibvirtConfigGuestFeatureAPIC)
def test_get_guest_config_with_type_xen_pae_pvm(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='xen',
use_usb_tablet=False,
group='libvirt')
self.flags(enabled=False,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(cfg.os_type, vm_mode.XEN)
self.assertEqual(1, len(cfg.features))
self.assertIsInstance(cfg.features[0],
vconfig.LibvirtConfigGuestFeaturePAE)
def test_get_guest_config_with_vnc_and_spice(self):
self.flags(vnc_enabled=True)
self.flags(virt_type='kvm',
use_usb_tablet=True,
group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(len(cfg.devices), 10)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[9],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].target_name, "com.redhat.spice.0")
self.assertEqual(cfg.devices[6].type, "vnc")
self.assertEqual(cfg.devices[7].type, "spice")
def test_invalid_watchdog_action(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_watchdog_action": "something"}}
self.assertRaises(exception.InvalidWatchdogAction,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_get_guest_config_with_watchdog_action_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_watchdog_action": "none"}}
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def _test_get_guest_usb_tablet(self, vnc_enabled, spice_enabled, os_type,
agent_enabled=False):
self.flags(vnc_enabled=vnc_enabled)
self.flags(enabled=spice_enabled,
agent_enabled=agent_enabled, group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
return drvr._get_guest_usb_tablet(os_type)
def test_get_guest_usb_tablet_wipe(self):
self.flags(use_usb_tablet=True, group='libvirt')
tablet = self._test_get_guest_usb_tablet(True, True, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(True, False, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(False, True, vm_mode.HVM)
self.assertIsNotNone(tablet)
tablet = self._test_get_guest_usb_tablet(False, False, vm_mode.HVM)
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(True, True, "foo")
self.assertIsNone(tablet)
tablet = self._test_get_guest_usb_tablet(
False, True, vm_mode.HVM, True)
self.assertIsNone(tablet)
def _test_get_guest_config_with_watchdog_action_flavor(self,
hw_watchdog_action="hw:watchdog_action"):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {hw_watchdog_action: 'none'}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("none", cfg.devices[7].action)
def test_get_guest_config_with_watchdog_action_through_flavor(self):
self._test_get_guest_config_with_watchdog_action_flavor()
# TODO(pkholkin): the test accepting old property name 'hw_watchdog_action'
# should be removed in the next release
def test_get_guest_config_with_watchdog_action_through_flavor_no_scope(
self):
self._test_get_guest_config_with_watchdog_action_flavor(
hw_watchdog_action="hw_watchdog_action")
def test_get_guest_config_with_watchdog_overrides_flavor(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_watchdog_action': 'none'}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_watchdog_action": "pause"}}
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(9, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestWatchdog)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual("pause", cfg.devices[7].action)
def test_unsupported_video_driver_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_video_model": "something"}}
self.assertRaises(exception.InvalidVideoMode,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_get_guest_config_with_video_driver_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_video_model": "vmvga"}}
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[6].type, "vmvga")
def test_get_guest_config_with_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_qemu_guest_agent": "yes"}}
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 9)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[8],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
self.assertEqual(cfg.devices[7].type, "unix")
self.assertEqual(cfg.devices[7].target_name, "org.qemu.guest_agent.0")
def test_get_guest_config_with_video_driver_vram(self):
self.flags(vnc_enabled=False)
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_video:ram_max_mb': "100"}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}}
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestChannel)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[5].type, "spice")
self.assertEqual(cfg.devices[6].type, "qxl")
self.assertEqual(cfg.devices[6].vram, 64 * units.Mi / units.Ki)
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('nova.openstack.common.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_unmount_fs_if_error_during_lxc_create_domain(self,
mock_get_inst_path, mock_ensure_tree, mock_setup_container,
mock_get_info, mock_teardown):
"""If we hit an error during a `_create_domain` call to `libvirt+lxc`
we need to ensure the guest FS is unmounted from the host so that any
future `lvremove` calls will work.
"""
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.side_effect = exception.InstanceNotFound(
instance_id='foo')
drvr._conn.defineXML = mock.Mock()
drvr._conn.defineXML.side_effect = ValueError('somethingbad')
with contextlib.nested(
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(ValueError,
drvr._create_domain_and_network,
self.context,
'xml',
mock_instance, None, None)
mock_teardown.assert_called_with(container_dir='/tmp/rootfs')
def test_video_driver_flavor_limit_not_set(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_video_driver_ram_above_flavor_limit(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(enabled=True,
agent_enabled=True,
group='spice')
instance_ref = objects.Instance(**self.test_instance)
instance_type = instance_ref.get_flavor()
instance_type.extra_specs = {'hw_video:ram_max_mb': "50"}
image_meta = {"properties": {"hw_video_model": "qxl",
"hw_video_ram": "64"}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
with mock.patch.object(objects.Instance, 'save'):
self.assertRaises(exception.RequestedVRamTooHigh,
drvr._get_guest_config,
instance_ref,
[],
image_meta,
disk_info)
def test_get_guest_config_without_qga_through_image_meta(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"hw_qemu_guest_agent": "no"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_qemu_guest_agent": "no"}}
cfg = drvr._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[4].type, "tablet")
self.assertEqual(cfg.devices[5].type, "vnc")
def test_get_guest_config_with_rng_device(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = {"properties": {"hw_rng_model": "virtio"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertIsNone(cfg.devices[6].backend)
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
def test_get_guest_config_with_rng_not_allowed(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"hw_rng_model": "virtio"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 7)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigMemoryBalloon)
def test_get_guest_config_with_rng_limits(self):
self.flags(virt_type='kvm',
use_usb_tablet=False,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True',
'hw_rng:rate_bytes': '1024',
'hw_rng:rate_period': '2'}
image_meta = {"properties": {"hw_rng_model": "virtio"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertIsNone(cfg.devices[6].backend)
self.assertEqual(cfg.devices[6].rate_bytes, 1024)
self.assertEqual(cfg.devices[6].rate_period, 2)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_backend(self, mock_path):
self.flags(virt_type='kvm',
use_usb_tablet=False,
rng_dev_path='/dev/hw_rng',
group='libvirt')
mock_path.return_value = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = {"properties": {"hw_rng_model": "virtio"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [],
image_meta, disk_info)
self.assertEqual(len(cfg.devices), 8)
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestSerial)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
self.assertIsInstance(cfg.devices[6],
vconfig.LibvirtConfigGuestRng)
self.assertIsInstance(cfg.devices[7],
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual(cfg.devices[6].model, 'random')
self.assertEqual(cfg.devices[6].backend, '/dev/hw_rng')
self.assertIsNone(cfg.devices[6].rate_bytes)
self.assertIsNone(cfg.devices[6].rate_period)
@mock.patch('nova.virt.libvirt.driver.os.path.exists')
def test_get_guest_config_with_rng_dev_not_present(self, mock_path):
self.flags(virt_type='kvm',
use_usb_tablet=False,
rng_dev_path='/dev/hw_rng',
group='libvirt')
mock_path.return_value = False
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'hw_rng:allowed': 'True'}
image_meta = {"properties": {"hw_rng_model": "virtio"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(exception.RngDeviceNotExist,
drvr._get_guest_config,
instance_ref,
[],
image_meta, disk_info)
def test_guest_cpu_shares_with_multi_vcpu(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 4
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(4096, cfg.cputune.shares)
def test_get_guest_config_with_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': '10000',
'quota:cpu_period': '20000'}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
self.assertEqual(10000, cfg.cputune.shares)
self.assertEqual(20000, cfg.cputune.period)
def test_get_guest_config_with_bogus_cpu_quota(self):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = {'quota:cpu_shares': 'fishfood',
'quota:cpu_period': '20000'}
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.assertRaises(ValueError,
drvr._get_guest_config,
instance_ref, [], {}, disk_info)
def _test_get_guest_config_sysinfo_serial(self, expected_serial):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
cfg = drvr._get_guest_config_sysinfo(instance_ref)
self.assertIsInstance(cfg, vconfig.LibvirtConfigGuestSysinfo)
self.assertEqual(version.vendor_string(),
cfg.system_manufacturer)
self.assertEqual(version.product_string(),
cfg.system_product)
self.assertEqual(version.version_string_with_package(),
cfg.system_version)
self.assertEqual(expected_serial,
cfg.system_serial)
self.assertEqual(instance_ref['uuid'],
cfg.system_uuid)
def test_get_guest_config_sysinfo_serial_none(self):
self.flags(sysinfo_serial="none", group="libvirt")
self._test_get_guest_config_sysinfo_serial(None)
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
def test_get_guest_config_sysinfo_serial_hardware(self, mock_uuid):
self.flags(sysinfo_serial="hardware", group="libvirt")
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_os(self):
self.flags(sysinfo_serial="os", group="libvirt")
real_open = __builtin__.open
with contextlib.nested(
mock.patch.object(__builtin__, "open"),
) as (mock_open, ):
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
def fake_open(filename, *args, **kwargs):
if filename == "/etc/machine-id":
h = mock.MagicMock()
h.read.return_value = theuuid
h.__enter__.return_value = h
return h
return real_open(filename, *args, **kwargs)
mock_open.side_effect = fake_open
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_auto_hardware(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
with contextlib.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware")
) as (mock_exists, mock_uuid):
def fake_exists(filename):
if filename == "/etc/machine-id":
return False
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
mock_uuid.return_value = theuuid
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_auto_os(self):
self.flags(sysinfo_serial="auto", group="libvirt")
real_exists = os.path.exists
real_open = __builtin__.open
with contextlib.nested(
mock.patch.object(os.path, "exists"),
mock.patch.object(__builtin__, "open"),
) as (mock_exists, mock_open):
def fake_exists(filename):
if filename == "/etc/machine-id":
return True
return real_exists(filename)
mock_exists.side_effect = fake_exists
theuuid = "56b40135-a973-4eb3-87bb-a2382a3e6dbc"
def fake_open(filename, *args, **kwargs):
if filename == "/etc/machine-id":
h = mock.MagicMock()
h.read.return_value = theuuid
h.__enter__.return_value = h
return h
return real_open(filename, *args, **kwargs)
mock_open.side_effect = fake_open
self._test_get_guest_config_sysinfo_serial(theuuid)
def test_get_guest_config_sysinfo_serial_invalid(self):
self.flags(sysinfo_serial="invalid", group="libvirt")
self.assertRaises(exception.NovaException,
libvirt_driver.LibvirtDriver,
fake.FakeVirtAPI(),
True)
def _create_fake_service_compute(self):
service_info = {
'id': 1729,
'host': 'fake',
'report_count': 0
}
service_ref = objects.Service(**service_info)
compute_info = {
'id': 1729,
'vcpus': 2,
'memory_mb': 1024,
'local_gb': 2048,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'free_ram_mb': 1024,
'free_disk_gb': 2048,
'hypervisor_type': 'xen',
'hypervisor_version': 1,
'running_vms': 0,
'cpu_info': '',
'current_workload': 0,
'service_id': service_ref['id'],
'host': service_ref['host']
}
compute_ref = objects.ComputeNode(**compute_info)
return (service_ref, compute_ref)
def test_get_guest_config_with_pci_passthrough_kvm(self):
self.flags(virt_type='kvm', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = {}
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status='allocated',
address='0000:00:00.1',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [], {}, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'yes')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "1")
self.assertEqual(had_pci, 1)
def test_get_guest_config_with_pci_passthrough_xen(self):
self.flags(virt_type='xen', group='libvirt')
service_ref, compute_ref = self._create_fake_service_compute()
instance = objects.Instance(**self.test_instance)
image_meta = {}
pci_device_info = dict(test_pci_device.fake_db_dev)
pci_device_info.update(compute_node_id=1,
label='fake',
status='allocated',
address='0000:00:00.2',
compute_id=compute_ref['id'],
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
pci_device = objects.PciDevice(**pci_device_info)
pci_list = objects.PciDeviceList()
pci_list.objects.append(pci_device)
instance.pci_devices = pci_list
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
cfg = drvr._get_guest_config(instance, [], {}, disk_info)
had_pci = 0
# care only about the PCI devices
for dev in cfg.devices:
if type(dev) == vconfig.LibvirtConfigGuestHostdevPCI:
had_pci += 1
self.assertEqual(dev.type, 'pci')
self.assertEqual(dev.managed, 'no')
self.assertEqual(dev.mode, 'subsystem')
self.assertEqual(dev.domain, "0000")
self.assertEqual(dev.bus, "00")
self.assertEqual(dev.slot, "00")
self.assertEqual(dev.function, "2")
self.assertEqual(had_pci, 1)
def test_get_guest_config_os_command_line_through_image_meta(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"os_command_line":
"fake_os_command_line"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_cmdline, "fake_os_command_line")
def test_get_guest_config_os_command_line_without_kernel_id(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"os_command_line":
"fake_os_command_line"}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsNone(cfg.os_cmdline)
def test_get_guest_config_os_command_empty(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
self.test_instance['kernel_id'] = "fake_kernel_id"
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {"properties": {"os_command_line": ""}}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
# the instance has 'root=/dev/vda console=tty0 console=ttyS0' set by
# default, so testing an empty string and None value in the
# os_command_line image property must pass
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertNotEqual(cfg.os_cmdline, "")
image_meta = {"properties": {"os_command_line": None}}
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertIsNotNone(cfg.os_cmdline)
def test_get_guest_config_armv7(self):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.ARMV7
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertEqual(cfg.os_mach_type, "vexpress-a15")
def test_get_guest_config_aarch64(self):
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigGuestCPU()
cpu.arch = arch.AARCH64
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
return caps
self.flags(virt_type="kvm",
group="libvirt")
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertEqual(cfg.os_mach_type, "virt")
def test_get_guest_config_machine_type_s390(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigGuestCPU()
host_cpu_archs = (arch.S390, arch.S390X)
for host_cpu_arch in host_cpu_archs:
caps.host.cpu.arch = host_cpu_arch
os_mach_type = drvr._get_machine_type(None, caps)
self.assertEqual('s390-ccw-virtio', os_mach_type)
def test_get_guest_config_machine_type_through_image_meta(self):
self.flags(virt_type="kvm",
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {"properties": {"hw_machine_type":
"fake_machine_type"}}
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
image_meta, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def test_get_guest_config_machine_type_from_config(self):
self.flags(virt_type='kvm', group='libvirt')
self.flags(hw_machine_type=['x86_64=fake_machine_type'],
group='libvirt')
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# Make sure the host arch is mocked as x86_64
self.create_fake_libvirt_mock(getCapabilities=fake_getCapabilities,
baselineCPU=fake_baselineCPU,
getVersion=lambda: 1005001)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertEqual(cfg.os_mach_type, "fake_machine_type")
def _test_get_guest_config_ppc64(self, device_index):
"""Test for nova.virt.libvirt.driver.LibvirtDriver._get_guest_config.
"""
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
image_meta = {}
expected = (arch.PPC64, arch.PPC)
for guestarch in expected:
with mock.patch.object(libvirt_driver.libvirt_utils,
'get_arch',
return_value=guestarch):
cfg = drvr._get_guest_config(instance_ref, [],
image_meta,
disk_info)
self.assertIsInstance(cfg.devices[device_index],
vconfig.LibvirtConfigGuestVideo)
self.assertEqual(cfg.devices[device_index].type, 'vga')
def test_get_guest_config_ppc64_through_image_meta_vnc_enabled(self):
self.flags(vnc_enabled=True)
self._test_get_guest_config_ppc64(6)
def test_get_guest_config_ppc64_through_image_meta_spice_enabled(self):
self.flags(enabled=True,
agent_enabled=True,
group='spice')
self._test_get_guest_config_ppc64(8)
def _test_get_guest_config_bootmenu(self, image_meta, extra_specs):
self.flags(virt_type='kvm', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.extra_specs = extra_specs
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref, image_meta)
conf = conn._get_guest_config(instance_ref, [], image_meta, disk_info)
self.assertTrue(conf.os_bootmenu)
def test_get_guest_config_bootmenu_via_image_meta(self):
self._test_get_guest_config_bootmenu(
{"properties": {"hw_boot_menu": "True"}}, {})
def test_get_guest_config_bootmenu_via_extra_specs(self):
self._test_get_guest_config_bootmenu({}, {'hw:boot_menu': 'True'})
def test_get_guest_cpu_config_none(self):
self.flags(cpu_mode="none", group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertIsNone(conf.cpu.mode)
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_kvm(self):
self.flags(virt_type="kvm",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_default_uml(self):
self.flags(virt_type="uml",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_default_lxc(self):
self.flags(virt_type="lxc",
cpu_mode=None,
group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsNone(conf.cpu)
def test_get_guest_cpu_config_host_passthrough(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
self.flags(cpu_mode="host-passthrough", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-passthrough")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_host_model(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
self.flags(cpu_mode="host-model", group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertIsNone(conf.cpu.model)
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_config_custom(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
self.flags(cpu_mode="custom",
cpu_model="Penryn",
group='libvirt')
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "custom")
self.assertEqual(conf.cpu.model, "Penryn")
self.assertEqual(conf.cpu.sockets, 1)
self.assertEqual(conf.cpu.cores, 1)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_cpu_topology(self):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.flavor.vcpus = 8
instance_ref.flavor.extra_specs = {'hw:cpu_max_sockets': '4'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
conf = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
{}, disk_info)
self.assertIsInstance(conf.cpu,
vconfig.LibvirtConfigGuestCPU)
self.assertEqual(conf.cpu.mode, "host-model")
self.assertEqual(conf.cpu.sockets, 4)
self.assertEqual(conf.cpu.cores, 2)
self.assertEqual(conf.cpu.threads, 1)
def test_get_guest_memory_balloon_config_by_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_disable(self):
self.flags(mem_stats_period_seconds=0, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
def test_get_guest_memory_balloon_config_period_value(self):
self.flags(mem_stats_period_seconds=21, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(21, device.period)
def test_get_guest_memory_balloon_config_qemu(self):
self.flags(virt_type='qemu', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('virtio', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_xen(self):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
for device in cfg.devices:
if device.root_name == 'memballoon':
self.assertIsInstance(device,
vconfig.LibvirtConfigMemoryBalloon)
self.assertEqual('xen', device.model)
self.assertEqual(10, device.period)
def test_get_guest_memory_balloon_config_lxc(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref, [], {}, disk_info)
no_exist = True
for device in cfg.devices:
if device.root_name == 'memballoon':
no_exist = False
break
self.assertTrue(no_exist)
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_hvm(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.HVM})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=True)
def test_xml_and_uri_no_ramdisk_no_kernel_xen_pv(self):
instance_data = dict(self.test_instance)
instance_data.update({'vm_mode': vm_mode.XEN})
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, expect_xen_hvm=False,
xen_only=True)
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel_no_ramdisk(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=False, rescue=instance_data)
def test_xml_and_uri_rescue_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=False,
expect_ramdisk=True, rescue=instance_data)
def test_xml_and_uri_rescue_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=False, rescue=instance_data)
def test_xml_uuid(self):
self._check_xml_and_uuid({"disk_format": "raw"})
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
def test_xml_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, None)
def test_xml_user_specified_disk_prefix(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_prefix(instance_data, 'sd')
def test_xml_disk_driver(self):
instance_data = dict(self.test_instance)
self._check_xml_and_disk_driver(instance_data)
def test_xml_disk_bus_virtio(self):
self._check_xml_and_disk_bus({"disk_format": "raw"},
None,
(("disk", "virtio", "vda"),))
def test_xml_disk_bus_ide(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {arch.PPC: ("cdrom", "scsi", "sda"),
arch.PPC64: ("cdrom", "scsi", "sda")}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
self._check_xml_and_disk_bus({"disk_format": "iso"},
None,
(expec_val,))
def test_xml_disk_bus_ide_and_virtio(self):
# It's necessary to check if the architecture is power, because
# power doesn't have support to ide, and so libvirt translate
# all ide calls to scsi
expected = {arch.PPC: ("cdrom", "scsi", "sda"),
arch.PPC64: ("cdrom", "scsi", "sda")}
swap = {'device_name': '/dev/vdc',
'swap_size': 1}
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'size': 1}]
block_device_info = {
'swap': swap,
'ephemerals': ephemerals}
expec_val = expected.get(blockinfo.libvirt_utils.get_arch({}),
("cdrom", "ide", "hda"))
self._check_xml_and_disk_bus({"disk_format": "iso"},
block_device_info,
(expec_val,
("disk", "virtio", "vdb"),
("disk", "virtio", "vdc")))
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instances(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
names = drvr.list_instances()
self.assertEqual(names[0], vm1.name())
self.assertEqual(names[1], vm2.name())
self.assertEqual(names[2], vm3.name())
self.assertEqual(names[3], vm4.name())
mock_list.assert_called_with(only_running=False)
@mock.patch.object(host.Host, "list_instance_domains")
def test_list_instance_uuids(self, mock_list):
vm1 = FakeVirtDomain(id=3, name="instance00000001")
vm2 = FakeVirtDomain(id=17, name="instance00000002")
vm3 = FakeVirtDomain(name="instance00000003")
vm4 = FakeVirtDomain(name="instance00000004")
mock_list.return_value = [vm1, vm2, vm3, vm4]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
uuids = drvr.list_instance_uuids()
self.assertEqual(len(uuids), 4)
self.assertEqual(uuids[0], vm1.UUIDString())
self.assertEqual(uuids[1], vm2.UUIDString())
self.assertEqual(uuids[2], vm3.UUIDString())
self.assertEqual(uuids[3], vm4.UUIDString())
mock_list.assert_called_with(only_running=False)
@mock.patch.object(host.Host, "list_instance_domains")
def test_get_all_block_devices(self, mock_list):
xml = [
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
</devices>
</domain>
""",
"""
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/3'/>
</disk>
</devices>
</domain>
""",
]
mock_list.return_value = [
FakeVirtDomain(xml[0], id=3, name="instance00000001"),
FakeVirtDomain(xml[1], id=1, name="instance00000002"),
FakeVirtDomain(xml[2], id=5, name="instance00000003")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
devices = drvr._get_all_block_devices()
self.assertEqual(devices, ['/path/to/dev/1', '/path/to/dev/3'])
mock_list.assert_called_with()
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-5")
get_online_cpus.return_value = set([4, 5, 6])
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_out_of_range(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.return_value = set([4, 5])
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="4-6")
get_online_cpus.side_effect = not_supported_exc
self.assertRaises(exception.Invalid, drvr._get_vcpu_total)
@mock.patch('nova.virt.libvirt.host.Host.get_online_cpus')
def test_get_host_vcpus_libvirt_error_success(self, get_online_cpus):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
self.flags(vcpu_pin_set="1")
get_online_cpus.side_effect = not_supported_exc
expected_vcpus = 1
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch('nova.virt.libvirt.host.Host.get_cpu_count')
def test_get_host_vcpus_after_hotplug(self, get_cpu_count):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
get_cpu_count.return_value = 2
expected_vcpus = 2
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
get_cpu_count.return_value = 3
expected_vcpus = 3
vcpus = drvr._get_vcpu_total()
self.assertEqual(expected_vcpus, vcpus)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_quiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(lookupByName=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsFreeze") as mock_fsfreeze:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
img_meta = {"properties": {"hw_qemu_guest_agent": "yes",
"os_require_quiesce": "yes"}}
self.assertIsNone(drvr.quiesce(self.context, instance, img_meta))
mock_fsfreeze.assert_called_once_with()
def test_quiesce_not_supported(self):
self.create_fake_libvirt_mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.InstanceQuiesceNotSupported,
drvr.quiesce, self.context, instance, None)
@mock.patch.object(host.Host, "has_min_version", return_value=True)
def test_unquiesce(self, mock_has_min_version):
self.create_fake_libvirt_mock(getLibVersion=lambda: 1002005,
lookupByName=self.fake_lookup)
with mock.patch.object(FakeVirtDomain, "fsThaw") as mock_fsthaw:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
img_meta = {"properties": {"hw_qemu_guest_agent": "yes",
"os_require_quiesce": "yes"}}
self.assertIsNone(drvr.unquiesce(self.context, instance, img_meta))
mock_fsthaw.assert_called_once_with()
def test__create_snapshot_metadata(self):
base = {}
instance_data = {'kernel_id': 'kernel',
'project_id': 'prj_id',
'ramdisk_id': 'ram_id',
'os_type': None}
instance = objects.Instance(**instance_data)
img_fmt = 'raw'
snp_name = 'snapshot_name'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
expected = {'is_public': False,
'status': 'active',
'name': snp_name,
'properties': {
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance['project_id'],
'ramdisk_id': instance['ramdisk_id'],
},
'disk_format': img_fmt,
'container_format': base.get('container_format', 'bare')
}
self.assertEqual(ret, expected)
# simulate an instance with os_type field defined
# disk format equals to ami
# container format not equals to bare
instance['os_type'] = 'linux'
base['disk_format'] = 'ami'
base['container_format'] = 'test_container'
expected['properties']['os_type'] = instance['os_type']
expected['disk_format'] = base['disk_format']
expected['container_format'] = base.get('container_format', 'bare')
ret = drvr._create_snapshot_metadata(base, instance, img_fmt, snp_name)
self.assertEqual(ret, expected)
def test_get_volume_driver(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
driver = conn._get_volume_driver(connection_info)
result = isinstance(driver, volume_drivers.LibvirtFakeVolumeDriver)
self.assertTrue(result)
def test_get_volume_driver_unknown(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'unknown',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
self.assertRaises(
exception.VolumeDriverNotFound,
conn._get_volume_driver,
connection_info
)
@mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.'
'connect_volume')
@mock.patch('nova.virt.libvirt.volume.LibvirtFakeVolumeDriver.get_config')
def test_get_volume_config(self, get_config, connect_volume):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
connection_info = {'driver_volume_type': 'fake',
'data': {'device_path': '/fake',
'access_mode': 'rw'}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
mock_config = mock.MagicMock()
get_config.return_value = mock_config
config = drvr._get_volume_config(connection_info, disk_info)
get_config.assert_called_once_with(connection_info, disk_info)
self.assertEqual(mock_config, config)
def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.VolumeDriverNotFound,
drvr.attach_volume, None,
{"driver_volume_type": "badtype"},
instance,
"/dev/sda")
def test_attach_blockio_invalid_hypervisor(self):
self.flags(virt_type='fake_type', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidHypervisorType,
drvr.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
instance,
"/dev/sda")
@mock.patch.object(fakelibvirt.virConnect, "getLibVersion")
def test_attach_blockio_invalid_version(self, mock_version):
mock_version.return_value = (0 * 1000 * 1000) + (9 * 1000) + 8
self.flags(virt_type='qemu', group='libvirt')
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = self.fake_lookup
instance = objects.Instance(**self.test_instance)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.Invalid,
drvr.attach_volume, None,
{"driver_volume_type": "fake",
"data": {"logical_block_size": "4096",
"physical_block_size": "4096"}
},
instance,
"/dev/sda")
@mock.patch('nova.utils.get_image_from_system_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_attach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain, mock_get_info, get_image):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = {}
get_image.return_value = image_meta
mock_dom = mock.MagicMock()
mock_get_domain.return_value = mock_dom
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
bdm = {'device_name': 'vdb',
'disk_bus': 'fake-bus',
'device_type': 'fake-type'}
disk_info = {'bus': bdm['disk_bus'], 'type': bdm['device_type'],
'dev': 'vdb'}
mock_get_info.return_value = disk_info
mock_conf = mock.MagicMock()
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with contextlib.nested(
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf),
mock.patch.object(drvr, '_set_cache_mode')
) as (mock_connect_volume, mock_get_volume_config,
mock_set_cache_mode):
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
drvr.attach_volume(self.context, connection_info, instance,
"/dev/vdb", disk_bus=bdm['disk_bus'],
device_type=bdm['device_type'])
mock_get_domain.assert_called_with(instance)
mock_get_info.assert_called_with(CONF.libvirt.virt_type,
image_meta, bdm)
mock_connect_volume.assert_called_with(
connection_info, disk_info)
mock_get_volume_config.assert_called_with(
connection_info, disk_info)
mock_set_cache_mode.assert_called_with(mock_conf)
mock_dom.attachDeviceFlags.assert_called_with(
mock_conf.to_xml(), flags=flags)
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_detach_volume_with_vir_domain_affect_live_flag(self,
mock_get_domain):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_xml = """<domain>
<devices>
<disk type='file'>
<source file='/path/to/fake-volume'/>
<target dev='vdc' bus='virtio'/>
</disk>
</devices>
</domain>"""
mock_dom = mock.MagicMock()
mock_dom.XMLDesc.return_value = mock_xml
connection_info = {"driver_volume_type": "fake",
"data": {"device_path": "/fake",
"access_mode": "rw"}}
flags = (fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE)
with mock.patch.object(drvr, '_disconnect_volume') as \
mock_disconnect_volume:
for state in (power_state.RUNNING, power_state.PAUSED):
mock_dom.info.return_value = [state, 512, 512, 2, 1234, 5678]
mock_get_domain.return_value = mock_dom
drvr.detach_volume(connection_info, instance, '/dev/vdc')
mock_get_domain.assert_called_with(instance)
mock_dom.detachDeviceFlags.assert_called_with("""<disk type="file" device="disk">
<source file="/path/to/fake-volume"/>
<target bus="virtio" dev="vdc"/>
</disk>
""", flags)
mock_disconnect_volume.assert_called_with(
connection_info, 'vdc')
def test_multi_nic(self):
network_info = _fake_network_info(self.stubs, 2)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
interfaces = tree.findall("./devices/interface")
self.assertEqual(len(interfaces), 2)
self.assertEqual(interfaces[0].get('type'), 'bridge')
def _behave_supports_direct_io(self, raise_open=False, raise_write=False,
exc=ValueError()):
open_behavior = os.open(os.path.join('.', '.directio.test'),
os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
if raise_open:
open_behavior.AndRaise(exc)
else:
open_behavior.AndReturn(3)
write_bahavior = os.write(3, mox.IgnoreArg())
if raise_write:
write_bahavior.AndRaise(exc)
else:
os.close(3)
os.unlink(3)
def test_supports_direct_io(self):
# O_DIRECT is not supported on all Python runtimes, so on platforms
# where it's not supported (e.g. Mac), we can still test the code-path
# by stubbing out the value.
if not hasattr(os, 'O_DIRECT'):
# `mock` seems to have trouble stubbing an attr that doesn't
# originally exist, so falling back to stubbing out the attribute
# directly.
os.O_DIRECT = 16384
self.addCleanup(delattr, os, 'O_DIRECT')
einval = OSError()
einval.errno = errno.EINVAL
self.mox.StubOutWithMock(os, 'open')
self.mox.StubOutWithMock(os, 'write')
self.mox.StubOutWithMock(os, 'close')
self.mox.StubOutWithMock(os, 'unlink')
_supports_direct_io = libvirt_driver.LibvirtDriver._supports_direct_io
self._behave_supports_direct_io()
self._behave_supports_direct_io(raise_write=True)
self._behave_supports_direct_io(raise_open=True)
self._behave_supports_direct_io(raise_write=True, exc=einval)
self._behave_supports_direct_io(raise_open=True, exc=einval)
self.mox.ReplayAll()
self.assertTrue(_supports_direct_io('.'))
self.assertRaises(ValueError, _supports_direct_io, '.')
self.assertRaises(ValueError, _supports_direct_io, '.')
self.assertFalse(_supports_direct_io('.'))
self.assertFalse(_supports_direct_io('.'))
self.mox.VerifyAll()
def _check_xml_and_container(self, instance):
instance_ref = objects.Instance(**instance)
image_meta = {}
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), 'lxc:///')
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
check = [
(lambda t: t.find('.').get('type'), 'lxc'),
(lambda t: t.find('./os/type').text, 'exe'),
(lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
for i, (check, expected_result) in enumerate(check):
self.assertEqual(check(tree),
expected_result,
'%s failed common check %d' % (xml, i))
target = tree.find('./devices/filesystem/source').get('dir')
self.assertTrue(len(target) > 0)
def _check_xml_and_disk_prefix(self, instance, prefix):
instance_ref = objects.Instance(**instance)
image_meta = {}
def _get_prefix(p, default):
if p:
return p + 'a'
return default
type_disk_map = {
'qemu': [
(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'xen': [
(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'xvda'))],
'kvm': [
(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'vda'))],
'uml': [
(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./devices/disk/target').get('dev'),
_get_prefix(prefix, 'ubda'))]
}
for (virt_type, checks) in six.iteritems(type_disk_map):
self.flags(virt_type=virt_type, group='libvirt')
if prefix:
self.flags(disk_prefix=prefix, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
def _check_xml_and_disk_driver(self, image_meta):
os_open = os.open
directio_supported = True
def os_open_stub(path, flags, *args, **kwargs):
if flags & os.O_DIRECT:
if not directio_supported:
raise OSError(errno.EINVAL,
'%s: %s' % (os.strerror(errno.EINVAL), path))
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
self.stubs.Set(os, 'open', os_open_stub)
@staticmethod
def connection_supports_direct_io_stub(dirpath):
return directio_supported
self.stubs.Set(libvirt_driver.LibvirtDriver,
'_supports_direct_io', connection_supports_direct_io_stub)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "none")
directio_supported = False
# The O_DIRECT availability is cached on first use in
# LibvirtDriver, hence we re-create it here
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for guest_disk in disks:
self.assertEqual(guest_disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta,
block_device_info, wantConfig):
instance_ref = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
block_device_info)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta,
block_device_info=block_device_info)
tree = etree.fromstring(xml)
got_disks = tree.findall('./devices/disk')
got_disk_targets = tree.findall('./devices/disk/target')
for i in range(len(wantConfig)):
want_device_type = wantConfig[i][0]
want_device_bus = wantConfig[i][1]
want_device_dev = wantConfig[i][2]
got_device_type = got_disks[i].get('device')
got_device_bus = got_disk_targets[i].get('bus')
got_device_dev = got_disk_targets[i].get('dev')
self.assertEqual(got_device_type, want_device_type)
self.assertEqual(got_device_bus, want_device_bus)
self.assertEqual(got_device_dev, want_device_dev)
def _check_xml_and_uuid(self, image_meta):
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
network_info = _fake_network_info(self.stubs, 1)
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
xml = drv._get_guest_xml(self.context, instance_ref,
network_info, disk_info, image_meta)
tree = etree.fromstring(xml)
self.assertEqual(tree.find('./uuid').text,
instance_ref['uuid'])
@mock.patch.object(libvirt_driver.LibvirtDriver,
"_get_host_sysinfo_serial_hardware",)
def _check_xml_and_uri(self, instance, mock_serial,
expect_ramdisk=False, expect_kernel=False,
rescue=None, expect_xen_hvm=False, xen_only=False):
mock_serial.return_value = "cef19ce0-0ca2-11df-855d-b19fbce37686"
instance_ref = objects.Instance(**instance)
image_meta = {}
xen_vm_mode = vm_mode.XEN
if expect_xen_hvm:
xen_vm_mode = vm_mode.HVM
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'kvm': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'kvm'),
(lambda t: t.find('./os/type').text,
vm_mode.HVM),
(lambda t: t.find('./devices/emulator'), None)]),
'uml': ('uml:///system',
[(lambda t: t.find('.').get('type'), 'uml'),
(lambda t: t.find('./os/type').text,
vm_mode.UML)]),
'xen': ('xen:///',
[(lambda t: t.find('.').get('type'), 'xen'),
(lambda t: t.find('./os/type').text,
xen_vm_mode)])}
if expect_xen_hvm or xen_only:
hypervisors_to_check = ['xen']
else:
hypervisors_to_check = ['qemu', 'kvm', 'xen']
for hypervisor_type in hypervisors_to_check:
check_list = type_uri_map[hypervisor_type][1]
if rescue:
suffix = '.rescue'
else:
suffix = ''
if expect_kernel:
check = (lambda t: self.relpath(t.find('./os/kernel').text).
split('/')[1], 'kernel' + suffix)
else:
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
if expect_kernel:
check = (lambda t: "no_timer_check" in t.find('./os/cmdline').
text, hypervisor_type == "qemu")
check_list.append(check)
# Hypervisors that only support vm_mode.HVM and Xen
# should not produce configuration that results in kernel
# arguments
if not expect_kernel and (hypervisor_type in
['qemu', 'kvm', 'xen']):
check = (lambda t: t.find('./os/root'), None)
check_list.append(check)
check = (lambda t: t.find('./os/cmdline'), None)
check_list.append(check)
if expect_ramdisk:
check = (lambda t: self.relpath(t.find('./os/initrd').text).
split('/')[1], 'ramdisk' + suffix)
else:
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
xpath = "./sysinfo/system/entry"
check = (lambda t: t.findall(xpath)[0].get("name"),
"manufacturer")
check_list.append(check)
check = (lambda t: t.findall(xpath)[0].text,
version.vendor_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].get("name"),
"product")
check_list.append(check)
check = (lambda t: t.findall(xpath)[1].text,
version.product_string())
check_list.append(check)
check = (lambda t: t.findall(xpath)[2].get("name"),
"version")
check_list.append(check)
# NOTE(sirp): empty strings don't roundtrip in lxml (they are
# converted to None), so we need an `or ''` to correct for that
check = (lambda t: t.findall(xpath)[2].text or '',
version.version_string_with_package())
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].get("name"),
"serial")
check_list.append(check)
check = (lambda t: t.findall(xpath)[3].text,
"cef19ce0-0ca2-11df-855d-b19fbce37686")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].get("name"),
"uuid")
check_list.append(check)
check = (lambda t: t.findall(xpath)[4].text,
instance['uuid'])
check_list.append(check)
if hypervisor_type in ['qemu', 'kvm']:
check = (lambda t: t.findall('./devices/serial')[0].get(
'type'), 'file')
check_list.append(check)
check = (lambda t: t.findall('./devices/serial')[1].get(
'type'), 'pty')
check_list.append(check)
check = (lambda t: self.relpath(t.findall(
'./devices/serial/source')[0].get('path')).
split('/')[1], 'console.log')
check_list.append(check)
else:
check = (lambda t: t.find('./devices/console').get(
'type'), 'pty')
check_list.append(check)
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
(lambda t: t.find('./memory').text, '2097152')]
if rescue:
common_checks += [
(lambda t: self.relpath(t.findall('./devices/disk/source')[0].
get('file')).split('/')[1], 'disk.rescue'),
(lambda t: self.relpath(t.findall('./devices/disk/source')[1].
get('file')).split('/')[1], 'disk')]
else:
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[0].get('file')).split('/')[1],
'disk')]
common_checks += [(lambda t: self.relpath(t.findall(
'./devices/disk/source')[1].get('file')).split('/')[1],
'disk.local')]
for virt_type in hypervisors_to_check:
expected_uri = type_uri_map[virt_type][0]
checks = type_uri_map[virt_type][1]
self.flags(virt_type=virt_type, group='libvirt')
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), expected_uri)
network_info = _fake_network_info(self.stubs, 1)
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta,
rescue=rescue)
xml = drvr._get_guest_xml(self.context, instance_ref,
network_info, disk_info,
image_meta,
rescue=rescue)
tree = etree.fromstring(xml)
for i, (check, expected_result) in enumerate(checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed check %d' %
(check(tree), expected_result, i))
for i, (check, expected_result) in enumerate(common_checks):
self.assertEqual(check(tree),
expected_result,
'%s != %s failed common check %d' %
(check(tree), expected_result, i))
filterref = './devices/interface/filterref'
vif = network_info[0]
nic_id = vif['address'].replace(':', '')
fw = firewall.NWFilterFirewall(fake.FakeVirtAPI(), drvr)
instance_filter_name = fw._instance_filter_name(instance_ref,
nic_id)
self.assertEqual(tree.find(filterref).get('filter'),
instance_filter_name)
# This test is supposed to make sure we don't
# override a specifically set uri
#
# Deliberately not just assigning this string to CONF.connection_uri
# and checking against that later on. This way we make sure the
# implementation doesn't fiddle around with the CONF.
testuri = 'something completely different'
self.flags(connection_uri=testuri, group='libvirt')
for (virt_type, (expected_uri, checks)) in six.iteritems(type_uri_map):
self.flags(virt_type=virt_type, group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(drvr._uri(), testuri)
def test_ensure_filtering_rules_for_instance_timeout(self):
# ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
class FakeTime(object):
def __init__(self):
self.counter = 0
def sleep(self, t):
self.counter += t
fake_timer = FakeTime()
def fake_sleep(t):
fake_timer.sleep(t)
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock()
instance_ref = objects.Instance(**self.test_instance)
# Start test
self.mox.ReplayAll()
try:
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'instance_filter_exists',
fake_none)
self.stubs.Set(greenthread,
'sleep',
fake_sleep)
drvr.ensure_filtering_rules_for_instance(instance_ref,
network_info)
except exception.NovaException as e:
msg = ('The firewall filter for %s does not exist' %
instance_ref['name'])
c1 = (0 <= six.text_type(e).find(msg))
self.assertTrue(c1)
self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
"amount of time")
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_with_block_migration(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, True)
self.assertThat({"filename": "file",
'image_type': 'default',
'disk_available_mb': 409600,
"disk_over_commit": False,
"block_migration": True},
matchers.DictMatches(return_value))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_all_pass_no_block_migration(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'disk_available_least': 400,
'cpu_info': 'asdf',
}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
# No need for the src_compute_info
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, None, compute_info, False)
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file',
return_value='fake')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_compare_cpu')
def test_check_can_live_migrate_guest_cpu_none_model(
self, mock_cpu, mock_test_file):
# Tests that when instance.vcpu_model.model is None, the host cpu
# model is used for live migration.
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
instance_ref.vcpu_model.model = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
result = drvr.check_can_live_migrate_destination(
self.context, instance_ref, compute_info, compute_info)
mock_cpu.assert_called_once_with(None, 'asdf')
expected_result = {"filename": 'fake',
"image_type": CONF.libvirt.images_type,
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None}
self.assertDictEqual(expected_result, result)
@mock.patch.object(libvirt_driver.LibvirtDriver,
'_create_shared_storage_test_file')
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_no_instance_cpu_info(
self, mock_cpu, mock_test_file):
instance_ref = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': jsonutils.dumps({
"vendor": "AMD",
"arch": arch.I686,
"features": ["sse3"],
"model": "Opteron_G3",
"topology": {"cores": 2, "threads": 1, "sockets": 4}
})}
filename = "file"
# _check_cpu_match
mock_cpu.return_value = 1
# mounted_on_same_shared_storage
mock_test_file.return_value = filename
return_value = drvr.check_can_live_migrate_destination(self.context,
instance_ref, compute_info, compute_info, False)
self.assertThat({"filename": "file",
"image_type": 'default',
"block_migration": False,
"disk_over_commit": False,
"disk_available_mb": None},
matchers.DictMatches(return_value))
@mock.patch.object(fakelibvirt.Connection, 'compareCPU')
def test_check_can_live_migrate_dest_incompatible_cpu_raises(
self, mock_cpu):
instance_ref = objects.Instance(**self.test_instance)
instance_ref.vcpu_model = test_vcpu_model.fake_vcpumodel
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
compute_info = {'cpu_info': 'asdf'}
mock_cpu.side_effect = exception.InvalidCPUInfo(reason='foo')
self.assertRaises(exception.InvalidCPUInfo,
drvr.check_can_live_migrate_destination,
self.context, instance_ref,
compute_info, compute_info, False)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_compatible_host_cpu(self, mock_vconfig, mock_compare):
mock_compare.return_value = 5
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info))
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_handles_not_supported_error_gracefully(self,
mock_vconfig,
mock_compare):
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virCompareCPU',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
mock_compare.side_effect = not_supported_exc
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, jsonutils.dumps(_fake_cpu_info))
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt.LibvirtDriver,
'_vcpu_model_to_cpu_config')
def test_compare_cpu_compatible_guest_cpu(self, mock_vcpu_to_cpu,
mock_compare):
mock_compare.return_value = 6
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(jsonutils.dumps(_fake_cpu_info), None)
self.assertIsNone(ret)
def test_compare_cpu_virt_type_xen(self):
self.flags(virt_type='xen', group='libvirt')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ret = conn._compare_cpu(None, None)
self.assertIsNone(ret)
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_invalid_cpuinfo_raises(self, mock_vconfig,
mock_compare):
mock_compare.return_value = 0
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InvalidCPUInfo,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info))
@mock.patch.object(host.Host, 'compare_cpu')
@mock.patch.object(nova.virt.libvirt, 'config')
def test_compare_cpu_incompatible_cpu_raises(self, mock_vconfig,
mock_compare):
mock_compare.side_effect = fakelibvirt.libvirtError('cpu')
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationPreCheckError,
conn._compare_cpu, None,
jsonutils.dumps(_fake_cpu_info))
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
objects.Instance(**self.test_instance)
dest_check_data = {"filename": "file",
"block_migration": True,
"disk_over_commit": False,
"disk_available_mb": 1024}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_cleanup_shared_storage_test_file')
drvr._cleanup_shared_storage_test_file("file")
self.mox.ReplayAll()
drvr.check_can_live_migrate_destination_cleanup(self.context,
dest_check_data)
def _mock_can_live_migrate_source(self, block_migration=False,
is_shared_block_storage=False,
is_shared_instance_path=False,
disk_available_mb=1024,
block_device_info=None):
instance = objects.Instance(**self.test_instance)
dest_check_data = {'filename': 'file',
'image_type': 'default',
'block_migration': block_migration,
'disk_over_commit': False,
'disk_available_mb': disk_available_mb}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_is_shared_block_storage')
drvr._is_shared_block_storage(instance, dest_check_data,
block_device_info).AndReturn(is_shared_block_storage)
self.mox.StubOutWithMock(drvr, '_check_shared_storage_test_file')
drvr._check_shared_storage_test_file('file').AndReturn(
is_shared_instance_path)
return (instance, dest_check_data, drvr)
def test_check_can_live_migrate_source_block_migration(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True)
self.mox.StubOutWithMock(drvr, "_assert_dest_node_has_enough_disk")
drvr._assert_dest_node_has_enough_disk(
self.context, instance, dest_check_data['disk_available_mb'],
False, None)
self.mox.ReplayAll()
ret = drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
self.assertIsInstance(ret, dict)
self.assertIn('is_shared_block_storage', ret)
self.assertIn('is_shared_instance_path', ret)
self.assertEqual(ret['is_shared_instance_path'],
ret['is_shared_storage'])
def test_check_can_live_migrate_source_shared_block_storage(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_shared_block_storage=True)
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_shared_instance_path(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
is_shared_instance_path=True)
self.mox.ReplayAll()
drvr.check_can_live_migrate_source(self.context, instance,
dest_check_data)
def test_check_can_live_migrate_source_non_shared_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source()
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source, self.context,
instance, dest_check_data)
def test_check_can_live_migrate_source_shared_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
is_shared_block_storage=True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_shared_path_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
is_shared_instance_path=True)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidLocalStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data, None)
def test_check_can_live_migrate_non_shared_non_block_migration_fails(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source()
self.mox.ReplayAll()
self.assertRaises(exception.InvalidSharedStorage,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def test_check_can_live_migrate_source_with_dest_not_enough_disk(self):
instance, dest_check_data, drvr = self._mock_can_live_migrate_source(
block_migration=True,
disk_available_mb=0)
self.mox.StubOutWithMock(drvr, "get_instance_disk_info")
drvr.get_instance_disk_info(instance,
block_device_info=None).AndReturn(
'[{"virt_disk_size":2}]')
self.mox.ReplayAll()
self.assertRaises(exception.MigrationError,
drvr.check_can_live_migrate_source,
self.context, instance, dest_check_data)
def _is_shared_block_storage_test_create_mocks(self, disks):
# Test data
instance_xml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>{}</devices></domain>")
disks_xml = ''
for dsk in disks:
if dsk['type'] is not 'network':
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source {source}='{source_path}'/>"
"<target dev='{target_dev}' bus='virtio'/>"
"</disk>".format(**dsk)])
else:
disks_xml = ''.join([disks_xml,
"<disk type='{type}'>"
"<driver name='qemu' type='{driver}'/>"
"<source protocol='{source_proto}'"
"name='{source_image}' >"
"<host name='hostname' port='7000'/>"
"<config file='/path/to/file'/>"
"</source>"
"<target dev='{target_dev}'"
"bus='ide'/>".format(**dsk)])
# Preparing mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.XMLDesc = mock.Mock()
mock_virDomain.XMLDesc.return_value = (instance_xml.format(disks_xml))
mock_lookup = mock.Mock()
def mock_lookup_side_effect(name):
return mock_virDomain
mock_lookup.side_effect = mock_lookup_side_effect
mock_getsize = mock.Mock()
mock_getsize.return_value = "10737418240"
return (mock_getsize, mock_lookup)
def test_is_shared_block_storage_rbd(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_instance_disk_info = mock.Mock()
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertTrue(drvr._is_shared_block_storage(instance,
{'image_type': 'rbd'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_lvm(self):
self.flags(images_type='lvm', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'lvm'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_qcow2(self):
self.flags(images_type='qcow2', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'qcow2'},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_source(self):
self.flags(images_type='rbd', group='libvirt')
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'is_shared_instance_path': False},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_rbd_only_dest(self):
bdi = {'block_device_mapping': []}
instance = objects.Instance(**self.test_instance)
mock_get_instance_disk_info = mock.Mock()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertFalse(drvr._is_shared_block_storage(
instance, {'image_type': 'rbd',
'is_shared_instance_path': False},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_is_shared_block_storage_volume_backed(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_getsize, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
with mock.patch.object(host.Host, 'get_domain', mock_lookup):
self.assertTrue(drvr._is_shared_block_storage(instance,
{'is_volume_backed': True,
'is_shared_instance_path': False},
block_device_info = bdi))
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_volume_backed_with_disk(self):
disks = [{'type': 'block',
'driver': 'raw',
'source': 'dev',
'source_path': '/dev/disk',
'target_dev': 'vda'},
{'type': 'file',
'driver': 'raw',
'source': 'file',
'source_path': '/instance/disk.local',
'target_dev': 'vdb'}]
bdi = {'block_device_mapping': [
{'connection_info': 'info', 'mount_device': '/dev/vda'}]}
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
(mock_getsize, mock_lookup) =\
self._is_shared_block_storage_test_create_mocks(disks)
with contextlib.nested(
mock.patch.object(os.path, 'getsize', mock_getsize),
mock.patch.object(host.Host, 'get_domain', mock_lookup)):
self.assertFalse(drvr._is_shared_block_storage(
instance,
{'is_volume_backed': True,
'is_shared_instance_path': False},
block_device_info = bdi))
mock_getsize.assert_called_once_with('/instance/disk.local')
mock_lookup.assert_called_once_with(instance)
def test_is_shared_block_storage_nfs(self):
bdi = {'block_device_mapping': []}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_backend = mock.MagicMock()
mock_image_backend.backend.return_value = mock_backend
mock_backend.is_file_in_instance_path.return_value = True
mock_get_instance_disk_info = mock.Mock()
with mock.patch.object(drvr, 'get_instance_disk_info',
mock_get_instance_disk_info):
self.assertTrue(drvr._is_shared_block_storage('instance',
{'is_shared_instance_path': True},
block_device_info=bdi))
self.assertEqual(0, mock_get_instance_disk_info.call_count)
def test_live_migration_update_graphics_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
xml_tmpl = ("<domain type='kvm'>"
"<devices>"
"<graphics type='vnc' listen='{vnc}'>"
"<listen address='{vnc}'/>"
"</graphics>"
"<graphics type='spice' listen='{spice}'>"
"<listen address='{spice}'/>"
"</graphics>"
"</devices>"
"</domain>")
initial_xml = xml_tmpl.format(vnc='1.2.3.4',
spice='5.6.7.8')
target_xml = xml_tmpl.format(vnc='10.0.0.1',
spice='10.0.0.2')
target_xml = etree.tostring(etree.fromstring(target_xml))
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
initial_xml)
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
None,
target_xml,
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '10.0.0.1', 'spice': '10.0.0.2'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_update_volume_xml(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
# start test
migrate_data = {'pre_live_migration_result':
{'volume': {u'58a84f6d-3f0c-4e19-a0af-eb657b790657':
{'connection_info': {u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'disk_info': {'bus': u'virtio', 'type': u'disk', 'dev': u'vdb'}}}},
'graphics_listen_addrs': {}}
pre_live_migrate_data = ((migrate_data or {}).
get('pre_live_migration_result', {}))
volume = pre_live_migrate_data.get('volume')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_mock = mock.MagicMock()
with mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info') as \
mget_info,\
mock.patch.object(drvr._host, 'get_domain') as mget_domain,\
mock.patch.object(fakelibvirt.virDomain, 'migrateToURI2'),\
mock.patch.object(drvr, '_update_xml') as mupdate:
mget_info.side_effect = exception.InstanceNotFound(
instance_id='foo')
mget_domain.return_value = test_mock
test_mock.XMLDesc.return_value = target_xml
self.assertFalse(drvr._live_migration_operation(
self.context, instance_ref, 'dest', False,
migrate_data, test_mock))
mupdate.assert_called_once_with(target_xml, volume, None)
def test_update_volume_xml(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'cde.67890.opst-lun-Z')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}}
volume_xml['volume'][serial]['connection_info'] = \
{u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio',
'type': u'disk',
'dev': u'vdb'}
connection_info = volume_xml['volume'][serial]['connection_info']
disk_info = volume_xml['volume'][serial]['disk_info']
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = disk_info['type']
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
conf.source_type = "block"
conf.source_path = connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
parser = etree.XMLParser(remove_blank_text=True)
xml_doc = etree.fromstring(initial_xml, parser)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
xml_doc = etree.fromstring(target_xml, parser)
self.assertEqual(etree.tostring(xml_doc), etree.tostring(config))
def test_update_volume_xml_no_serial(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
xml_tmpl = """
<domain type='kvm'>
<devices>
<disk type='block' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source dev='{device_path}'/>
<target bus='virtio' dev='vdb'/>
<serial></serial>
<address type='pci' domain='0x0' bus='0x0' slot='0x04' \
function='0x0'/>
</disk>
</devices>
</domain>
"""
initial_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = xml_tmpl.format(device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'connection_info': {}, 'disk_info': {}}
volume_xml['volume'][serial]['connection_info'] = \
{u'driver_volume_type': u'iscsi',
'serial': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
u'data': {u'access_mode': u'rw', u'target_discovered': False,
u'target_iqn': u'ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z',
u'volume_id': u'58a84f6d-3f0c-4e19-a0af-eb657b790657',
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}}
volume_xml['volume'][serial]['disk_info'] = {'bus': u'virtio',
'type': u'disk',
'dev': u'vdb'}
connection_info = volume_xml['volume'][serial]['connection_info']
disk_info = volume_xml['volume'][serial]['disk_info']
conf = vconfig.LibvirtConfigGuestDisk()
conf.source_device = disk_info['type']
conf.driver_name = "qemu"
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
conf.source_type = "block"
conf.source_path = connection_info['data'].get('device_path')
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
self.assertEqual(target_xml, etree.tostring(config))
def test_update_volume_xml_no_connection_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
initial_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = self.device_xml_tmpl.format(
device_path='/dev/disk/by-path/'
'ip-1.2.3.4:3260-iqn.'
'abc.12345.opst-lun-X')
target_xml = etree.tostring(etree.fromstring(target_xml))
serial = "58a84f6d-3f0c-4e19-a0af-eb657b790657"
volume_xml = {'volume': {}}
volume_xml['volume'][serial] = {'info1': {}, 'info2': {}}
conf = vconfig.LibvirtConfigGuestDisk()
with mock.patch.object(drvr, '_get_volume_config',
return_value=conf):
xml_doc = etree.fromstring(initial_xml)
config = drvr._update_volume_xml(xml_doc,
volume_xml['volume'])
self.assertEqual(target_xml, etree.tostring(config))
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_uses_migrateToURI_without_migratable_flag(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '0.0.0.0', 'spice': '0.0.0.0'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_uses_migrateToURI_without_dest_listen_addrs(self):
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError("ERR"))
# start test
migrate_data = {}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
@mock.patch.object(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None,
create=True)
def test_live_migration_fails_without_migratable_flag_or_0_addr(self):
self.flags(vnc_enabled=True, vncserver_listen='1.2.3.4')
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI")
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '1.2.3.4', 'spice': '1.2.3.4'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
def test_live_migration_raises_exception(self):
# Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = dict(self.test_instance)
instance_dict.update({'host': 'fake',
'power_state': power_state.RUNNING,
'vm_state': vm_states.ACTIVE})
instance_ref = objects.Instance(**instance_dict)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "migrateToURI2")
_bandwidth = CONF.libvirt.live_migration_bandwidth
if getattr(fakelibvirt, 'VIR_DOMAIN_XML_MIGRATABLE', None) is None:
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError('ERR'))
else:
vdmock.XMLDesc(fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
FakeVirtDomain().XMLDesc(0))
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest',
None,
mox.IgnoreArg(),
mox.IgnoreArg(),
None,
_bandwidth).AndRaise(
fakelibvirt.libvirtError('ERR'))
# start test
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs':
{'vnc': '127.0.0.1', 'spice': '127.0.0.1'}}}
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(fakelibvirt.libvirtError,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
self.assertEqual(vm_states.ACTIVE, instance_ref.vm_state)
self.assertEqual(power_state.RUNNING, instance_ref.power_state)
def test_live_migration_raises_unsupported_config_exception(self):
# Tests that when migrateToURI2 fails with VIR_ERR_CONFIG_UNSUPPORTED,
# migrateToURI is used instead.
# Preparing data
instance_ref = objects.Instance(**self.test_instance)
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, 'migrateToURI2')
self.mox.StubOutWithMock(vdmock, 'migrateToURI')
_bandwidth = CONF.libvirt.live_migration_bandwidth
vdmock.XMLDesc(fakelibvirt.VIR_DOMAIN_XML_MIGRATABLE).AndReturn(
FakeVirtDomain().XMLDesc(0))
unsupported_config_error = fakelibvirt.libvirtError('ERR')
unsupported_config_error.err = (
fakelibvirt.VIR_ERR_CONFIG_UNSUPPORTED,)
# This is the first error we hit but since the error code is
# VIR_ERR_CONFIG_UNSUPPORTED we'll try migrateToURI.
vdmock.migrateToURI2(CONF.libvirt.live_migration_uri % 'dest', None,
mox.IgnoreArg(), mox.IgnoreArg(), None,
_bandwidth).AndRaise(unsupported_config_error)
# This is the second and final error that will actually kill the run,
# we use TestingException to make sure it's not the same libvirtError
# above.
vdmock.migrateToURI(CONF.libvirt.live_migration_uri % 'dest',
mox.IgnoreArg(), None,
_bandwidth).AndRaise(test.TestingException('oops'))
graphics_listen_addrs = {'vnc': '0.0.0.0', 'spice': '127.0.0.1'}
migrate_data = {'pre_live_migration_result':
{'graphics_listen_addrs': graphics_listen_addrs}}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(
drvr, '_check_graphics_addresses_can_live_migrate')
drvr._check_graphics_addresses_can_live_migrate(graphics_listen_addrs)
self.mox.ReplayAll()
# start test
self.assertRaises(test.TestingException,
drvr._live_migration_operation,
self.context, instance_ref, 'dest',
False, migrate_data, vdmock)
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists', return_value=True)
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_not_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
# destroy method may raise InstanceTerminationFailure or
# InstancePowerOffFailure, here use their base class Invalid.
mock_destroy.side_effect = exception.Invalid(reason='just test')
fake_instance_path = os.path.join(cfg.CONF.instances_path,
'/fake_instance_uuid')
mock_get_instance_path.return_value = fake_instance_path
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = {'is_shared_instance_path': False}
self.assertRaises(exception.Invalid,
drvr.rollback_live_migration_at_destination,
"context", "instance", [], None, True, migrate_data)
mock_exist.assert_called_once_with(fake_instance_path)
mock_shutil.assert_called_once_with(fake_instance_path)
@mock.patch('shutil.rmtree')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path_at_destination')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.destroy')
def test_rollback_live_migration_at_dest_shared(self, mock_destroy,
mock_get_instance_path,
mock_exist,
mock_shutil
):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
migrate_data = {'is_shared_instance_path': True}
drvr.rollback_live_migration_at_destination("context", "instance", [],
None, True, migrate_data)
mock_destroy.assert_called_once_with("context", "instance", [],
None, True, migrate_data)
self.assertFalse(mock_get_instance_path.called)
self.assertFalse(mock_exist.called)
self.assertFalse(mock_shutil.called)
@mock.patch.object(time, "sleep",
side_effect=lambda x: eventlet.sleep(0))
@mock.patch.object(host.DomainJobInfo, "for_domain")
@mock.patch.object(objects.Instance, "save")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
def _test_live_migration_monitoring(self,
job_info_records,
expect_success,
mock_running,
mock_save,
mock_job_info,
mock_sleep):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True)
finish_event = eventlet.event.Event()
def fake_job_info(hostself):
while True:
self.assertTrue(len(job_info_records) > 0)
rec = job_info_records.pop()
if type(rec) == str:
if rec == "thread-finish":
finish_event.send()
elif rec == "domain-stop":
dom.destroy()
else:
return rec
return rec
mock_job_info.side_effect = fake_job_info
dest = mock.sentinel.migrate_dest
migrate_data = mock.sentinel.migrate_data
fake_post_method = mock.MagicMock()
fake_recover_method = mock.MagicMock()
drvr._live_migration_monitor(self.context, instance,
dest,
fake_post_method,
fake_recover_method,
False,
migrate_data,
dom,
finish_event)
if expect_success:
self.assertFalse(fake_recover_method.called,
'Recover method called when success expected')
fake_post_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data)
else:
self.assertFalse(fake_post_method.called,
'Post method called when success not expected')
fake_recover_method.assert_called_once_with(
self.context, instance, dest, False, migrate_data)
def test_live_migration_monitor_success(self):
# A normal sequence where see all the normal job states
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_COMPLETED),
]
self._test_live_migration_monitoring(domain_info_records, True)
def test_live_migration_monitor_success_race(self):
# A normalish sequence but we're too slow to see the
# completed job state
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, True)
def test_live_migration_monitor_failed(self):
# A failed sequence where we see all the expected events
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_FAILED),
]
self._test_live_migration_monitoring(domain_info_records, False)
def test_live_migration_monitor_failed_race(self):
# A failed sequence where we are too slow to see the
# failed event
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
]
self._test_live_migration_monitoring(domain_info_records, False)
def test_live_migration_monitor_cancelled(self):
# A cancelled sequence where we see all the events
domain_info_records = [
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_NONE),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_UNBOUNDED),
"thread-finish",
"domain-stop",
host.DomainJobInfo(
type=fakelibvirt.VIR_DOMAIN_JOB_CANCELLED),
]
self._test_live_migration_monitoring(domain_info_records, False)
@mock.patch.object(greenthread, "spawn")
@mock.patch.object(libvirt_driver.LibvirtDriver, "_live_migration_monitor")
@mock.patch.object(host.Host, "get_domain")
@mock.patch.object(fakelibvirt.Connection, "_mark_running")
def test_live_migration_main(self, mock_running, mock_dom,
mock_monitor, mock_thread):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
dom = fakelibvirt.Domain(drvr._get_connection(), "<domain/>", True)
migrate_data = {}
mock_dom.return_value = dom
def fake_post():
pass
def fake_recover():
pass
drvr._live_migration(self.context, instance, "fakehost",
fake_post, fake_recover, False,
migrate_data)
class AnyEventletEvent(object):
def __eq__(self, other):
return type(other) == eventlet.event.Event
mock_thread.assert_called_once_with(
drvr._live_migration_operation,
self.context, instance, "fakehost", False,
migrate_data, dom)
mock_monitor.assert_called_once_with(
self.context, instance, "fakehost",
fake_post, fake_recover, False,
migrate_data, dom, AnyEventletEvent())
def _do_test_create_images_and_backing(self, disk_type):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk')
self.mox.StubOutWithMock(libvirt_driver.libvirt_utils, 'create_image')
disk_info = {'path': 'foo', 'type': disk_type,
'disk_size': 1 * 1024 ** 3,
'virt_disk_size': 20 * 1024 ** 3,
'backing_file': None}
libvirt_driver.libvirt_utils.create_image(
disk_info['type'], mox.IgnoreArg(), disk_info['virt_disk_size'])
drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance,
fallback_from_host=None)
self.mox.ReplayAll()
self.stubs.Set(os.path, 'exists', lambda *args: False)
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", [disk_info])
def test_create_images_and_backing_qcow2(self):
self._do_test_create_images_and_backing('qcow2')
def test_create_images_and_backing_raw(self):
self._do_test_create_images_and_backing('raw')
def test_create_images_and_backing_images_not_exist_no_fallback(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id="fake_id")):
self.assertRaises(exception.ImageNotFound,
conn._create_images_and_backing,
self.context, instance,
"/fake/instance/dir", disk_info)
def test_create_images_and_backing_images_not_exist_fallback(self):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824}]
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
self.test_instance.update({'user_id': 'fake-user',
'os_type': None,
'kernel_id': 'fake_kernel_id',
'ramdisk_id': 'fake_ramdisk_id',
'project_id': 'fake-project'})
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(libvirt_driver.libvirt_utils, 'copy_image'),
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image',
side_effect=exception.ImageNotFound(
image_id="fake_id")),
) as (copy_image_mock, fetch_image_mock):
conn._create_images_and_backing(self.context, instance,
"/fake/instance/dir", disk_info,
fallback_from_host="fake_host")
backfile_path = os.path.join(base_dir, 'fake_image_backing_file')
kernel_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'kernel')
ramdisk_path = os.path.join(CONF.instances_path,
self.test_instance['uuid'],
'ramdisk')
copy_image_mock.assert_has_calls([
mock.call(dest=backfile_path, src=backfile_path,
host='fake_host', receive=True),
mock.call(dest=kernel_path, src=kernel_path,
host='fake_host', receive=True),
mock.call(dest=ramdisk_path, src=ramdisk_path,
host='fake_host', receive=True)
])
fetch_image_mock.assert_has_calls([
mock.call(context=self.context,
target=backfile_path,
image_id=self.test_instance['image_ref'],
user_id=self.test_instance['user_id'],
project_id=self.test_instance['project_id'],
max_size=25165824),
mock.call(self.context, kernel_path,
self.test_instance['kernel_id'],
self.test_instance['user_id'],
self.test_instance['project_id']),
mock.call(self.context, ramdisk_path,
self.test_instance['ramdisk_id'],
self.test_instance['user_id'],
self.test_instance['project_id']),
])
def test_create_images_and_backing_ephemeral_gets_created(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
disk_info = [
{u'backing_file': u'fake_image_backing_file',
u'disk_size': 10747904,
u'path': u'disk_path',
u'type': u'qcow2',
u'virt_disk_size': 25165824},
{u'backing_file': u'ephemeral_1_default',
u'disk_size': 393216,
u'over_committed_disk_size': 1073348608,
u'path': u'disk_eph_path',
u'type': u'qcow2',
u'virt_disk_size': 1073741824}]
base_dir = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(drvr, '_fetch_instance_kernel_ramdisk'),
mock.patch.object(libvirt_driver.libvirt_utils, 'fetch_image'),
mock.patch.object(drvr, '_create_ephemeral'),
mock.patch.object(imagebackend.Image, 'verify_base_size')
) as (fetch_kernel_ramdisk_mock, fetch_image_mock,
create_ephemeral_mock, verify_base_size_mock):
drvr._create_images_and_backing(self.context, instance,
"/fake/instance/dir",
disk_info)
self.assertEqual(len(create_ephemeral_mock.call_args_list), 1)
m_args, m_kwargs = create_ephemeral_mock.call_args_list[0]
self.assertEqual(
os.path.join(base_dir, 'ephemeral_1_default'),
m_kwargs['target'])
self.assertEqual(len(fetch_image_mock.call_args_list), 1)
m_args, m_kwargs = fetch_image_mock.call_args_list[0]
self.assertEqual(
os.path.join(base_dir, 'fake_image_backing_file'),
m_kwargs['target'])
verify_base_size_mock.assert_has_calls([
mock.call(os.path.join(base_dir, 'fake_image_backing_file'),
25165824),
mock.call(os.path.join(base_dir, 'ephemeral_1_default'),
1073741824)
])
def test_create_images_and_backing_disk_info_none(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(drvr, '_fetch_instance_kernel_ramdisk')
drvr._fetch_instance_kernel_ramdisk(self.context, self.test_instance,
fallback_from_host=None)
self.mox.ReplayAll()
drvr._create_images_and_backing(self.context, self.test_instance,
"/fake/instance/dir", None)
def test_pre_live_migration_works_correctly_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': {'serial': '12345', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}},
'mount_device': '/dev/sda'},
{'connection_info': {'serial': '67890', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
class FakeNetworkInfo(object):
def fixed_ips(self):
return ["test_ip_addr"]
def fake_none(*args, **kwargs):
return
self.stubs.Set(drvr, '_create_images_and_backing', fake_none)
instance = objects.Instance(**self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(drvr, "_connect_volume")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
drvr._connect_volume(v['connection_info'],
disk_info)
self.mox.StubOutWithMock(drvr, 'plug_vifs')
drvr.plug_vifs(mox.IsA(instance), nw_info)
self.mox.ReplayAll()
result = drvr.pre_live_migration(
c, instance, vol, nw_info, None,
migrate_data={"block_migration": False})
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1', 'vnc': '127.0.0.1'},
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
'serial': '12345'},
'disk_info': {'bus': 'scsi',
'dev': 'sda',
'type': 'disk'}},
'67890': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
'serial': '67890'},
'disk_info': {'bus': 'scsi',
'dev': 'sdb',
'type': 'disk'}}}}
self.assertEqual(result, target_ret)
def test_pre_live_migration_block_with_config_drive_mocked(self):
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def fake_true(*args, **kwargs):
return True
self.stubs.Set(configdrive, 'required_by', fake_true)
instance = objects.Instance(**self.test_instance)
c = context.get_admin_context()
self.assertRaises(exception.NoLiveMigrationForConfigDriveInLibVirt,
drvr.pre_live_migration, c, instance, vol, None,
None, {'is_shared_instance_path': False,
'is_shared_block_storage': False})
@mock.patch('nova.virt.driver.block_device_info_get_mapping',
return_value=())
@mock.patch('nova.virt.configdrive.required_by',
return_value=True)
def test_pre_live_migration_block_with_config_drive_mocked_with_vfat(
self, mock_required_by, block_device_info_get_mapping):
self.flags(config_drive_format='vfat')
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
res_data = drvr.pre_live_migration(
self.context, instance, vol, [], None,
{'is_shared_instance_path': False,
'is_shared_block_storage': False})
block_device_info_get_mapping.assert_called_once_with(
{'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}
]}
)
self.assertEqual({'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
'volume': {}}, res_data)
def test_pre_live_migration_vol_backed_works_correctly_mocked(self):
# Creating testdata, using temp dir.
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
vol = {'block_device_mapping': [
{'connection_info': {'serial': '12345', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'}},
'mount_device': '/dev/sda'},
{'connection_info': {'serial': '67890', u'data':
{'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'}},
'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def fake_none(*args, **kwargs):
return
self.stubs.Set(drvr, '_create_images_and_backing', fake_none)
class FakeNetworkInfo(object):
def fixed_ips(self):
return ["test_ip_addr"]
inst_ref = objects.Instance(**self.test_instance)
c = context.get_admin_context()
nw_info = FakeNetworkInfo()
# Creating mocks
self.mox.StubOutWithMock(drvr, "_connect_volume")
for v in vol['block_device_mapping']:
disk_info = {
'bus': "scsi",
'dev': v['mount_device'].rpartition("/")[2],
'type': "disk"
}
drvr._connect_volume(v['connection_info'],
disk_info)
self.mox.StubOutWithMock(drvr, 'plug_vifs')
drvr.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.ReplayAll()
migrate_data = {'is_shared_instance_path': False,
'is_volume_backed': True,
'block_migration': False,
'instance_relative_path': inst_ref['name']
}
ret = drvr.pre_live_migration(c, inst_ref, vol, nw_info, None,
migrate_data)
target_ret = {
'graphics_listen_addrs': {'spice': '127.0.0.1',
'vnc': '127.0.0.1'},
'volume': {
'12345': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.opst-lun-X'},
'serial': '12345'},
'disk_info': {'bus': 'scsi',
'dev': 'sda',
'type': 'disk'}},
'67890': {'connection_info': {u'data': {'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.cde.67890.opst-lun-Z'},
'serial': '67890'},
'disk_info': {'bus': 'scsi',
'dev': 'sdb',
'type': 'disk'}}}}
self.assertEqual(ret, target_ret)
self.assertTrue(os.path.exists('%s/%s/' % (tmpdir,
inst_ref['name'])))
def test_pre_live_migration_plug_vifs_retry_fails(self):
self.flags(live_migration_retry_count=3)
instance = objects.Instance(**self.test_instance)
def fake_plug_vifs(instance, network_info):
raise processutils.ProcessExecutionError()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(eventlet.greenthread, 'sleep',
lambda x: eventlet.sleep(0))
disk_info_json = jsonutils.dumps({})
self.assertRaises(processutils.ProcessExecutionError,
drvr.pre_live_migration,
self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json)
def test_pre_live_migration_plug_vifs_retry_works(self):
self.flags(live_migration_retry_count=3)
called = {'count': 0}
instance = objects.Instance(**self.test_instance)
def fake_plug_vifs(instance, network_info):
called['count'] += 1
if called['count'] < CONF.live_migration_retry_count:
raise processutils.ProcessExecutionError()
else:
return
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(eventlet.greenthread, 'sleep',
lambda x: eventlet.sleep(0))
disk_info_json = jsonutils.dumps({})
drvr.pre_live_migration(self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json)
def test_pre_live_migration_image_not_created_with_shared_storage(self):
migrate_data_set = [{'is_shared_block_storage': False,
'block_migration': False},
{'is_shared_block_storage': True,
'block_migration': False},
{'is_shared_block_storage': False,
'block_migration': True}]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# creating mocks
with contextlib.nested(
mock.patch.object(drvr,
'_create_images_and_backing'),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
for migrate_data in migrate_data_set:
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
self.assertFalse(create_image_mock.called)
self.assertIsInstance(res, dict)
def test_pre_live_migration_with_not_shared_instance_path(self):
migrate_data = {'is_shared_block_storage': False,
'is_shared_instance_path': False}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
def check_instance_dir(context, instance,
instance_dir, disk_info,
fallback_from_host=False):
self.assertTrue(instance_dir)
# creating mocks
with contextlib.nested(
mock.patch.object(drvr,
'_create_images_and_backing',
side_effect=check_instance_dir),
mock.patch.object(drvr,
'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
) as (
create_image_mock,
rules_mock,
plug_mock,
):
disk_info_json = jsonutils.dumps({})
res = drvr.pre_live_migration(self.context, instance,
block_device_info=None,
network_info=[],
disk_info=disk_info_json,
migrate_data=migrate_data)
create_image_mock.assert_has_calls(
[mock.call(self.context, instance, mock.ANY, {},
fallback_from_host=instance.host)])
self.assertIsInstance(res, dict)
def test_pre_live_migration_block_migrate_fails(self):
bdms = [{
'connection_info': {
'serial': '12345',
u'data': {
'device_path':
u'/dev/disk/by-path/ip-1.2.3.4:3260-iqn.abc.12345.t-lun-X'
}
},
'mount_device': '/dev/sda'}]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, 'ensure_filtering_rules_for_instance'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=bdms)):
disk_info_json = jsonutils.dumps({})
self.assertRaises(exception.MigrationError,
drvr.pre_live_migration,
self.context, instance, block_device_info=None,
network_info=[], disk_info=disk_info_json,
migrate_data={})
def test_get_instance_disk_info_works_correctly(self):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
def test_post_live_migration(self):
vol = {'block_device_mapping': [
{'connection_info': 'dummy1', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy2', 'mount_device': '/dev/sdb'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
inst_ref = {'id': 'foo'}
cntx = context.get_admin_context()
# Set up the mock expectations
with contextlib.nested(
mock.patch.object(driver, 'block_device_info_get_mapping',
return_value=vol['block_device_mapping']),
mock.patch.object(drvr, '_disconnect_volume')
) as (block_device_info_get_mapping, _disconnect_volume):
drvr.post_live_migration(cntx, inst_ref, vol)
block_device_info_get_mapping.assert_has_calls([
mock.call(vol)])
_disconnect_volume.assert_has_calls([
mock.call(v['connection_info'],
v['mount_device'].rpartition("/")[2])
for v in vol['block_device_mapping']])
def test_get_instance_disk_info_excludes_volumes(self):
# Test data
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdc' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/fake/path/to/volume2'/>"
"<target dev='vdd' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * units.Gi
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
os.path.getsize('/test/disk.local').AndReturn((3328599655))
ret = ("image: /test/disk\n"
"file format: raw\n"
"virtual size: 20G (21474836480 bytes)\n"
"disk size: 3.1G\n"
"cluster_size: 2097152\n"
"backing file: /test/dummy (actual path: /backing/file)\n")
self.mox.StubOutWithMock(os.path, "exists")
os.path.exists('/test/disk.local').AndReturn(True)
self.mox.StubOutWithMock(utils, "execute")
utils.execute('env', 'LC_ALL=C', 'LANG=C', 'qemu-img', 'info',
'/test/disk.local').AndReturn((ret, ''))
self.mox.ReplayAll()
conn_info = {'driver_volume_type': 'fake'}
info = {'block_device_mapping': [
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance,
block_device_info=info)
info = jsonutils.loads(info)
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
self.assertEqual(info[1]['type'], 'qcow2')
self.assertEqual(info[1]['path'], '/test/disk.local')
self.assertEqual(info[1]['virt_disk_size'], 21474836480)
self.assertEqual(info[1]['backing_file'], "file")
self.assertEqual(info[1]['over_committed_disk_size'], 18146236825)
def test_get_instance_disk_info_no_bdinfo_passed(self):
# NOTE(ndipanov): _get_disk_overcomitted_size_total calls this method
# without access to Nova's block device information. We want to make
# sure that we guess volumes mostly correctly in that case as well
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='block'><driver name='qemu' type='raw'/>"
"<source file='/fake/path/to/volume1'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
# Preparing mocks
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance.name:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * units.Gi
self.mox.StubOutWithMock(os.path, "getsize")
os.path.getsize('/test/disk').AndReturn((10737418240))
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_instance_disk_info(instance)
info = jsonutils.loads(info)
self.assertEqual(1, len(info))
self.assertEqual(info[0]['type'], 'raw')
self.assertEqual(info[0]['path'], '/test/disk')
self.assertEqual(info[0]['disk_size'], 10737418240)
self.assertEqual(info[0]['backing_file'], "")
self.assertEqual(info[0]['over_committed_disk_size'], 0)
def test_spawn_with_network_info(self):
# Preparing mocks
def fake_none(*args, **kwargs):
return
def fake_getLibVersion():
return 9011
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Penryn</model>
<vendor>Intel</vendor>
<feature policy='require' name='xtpr'/>
</cpu>
"""
# _fake_network_info must be called before create_fake_libvirt_mock(),
# as _fake_network_info calls importutils.import_class() and
# create_fake_libvirt_mock() mocks importutils.import_class().
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001,
baselineCPU=fake_baselineCPU)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance = objects.Instance(**instance_ref)
image_meta = {}
# Mock out the get_info method of the LibvirtDriver so that the polling
# in the spawn method of the LibvirtDriver returns immediately
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, 'get_info')
libvirt_driver.LibvirtDriver.get_info(instance
).AndReturn(hardware.InstanceInfo(state=power_state.RUNNING))
# Start test
self.mox.ReplayAll()
with mock.patch('nova.virt.libvirt.driver.libvirt') as old_virt:
del old_virt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr.firewall_driver,
'setup_basic_filtering',
fake_none)
self.stubs.Set(drvr.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(imagebackend.Image,
'cache',
fake_none)
drvr.spawn(self.context, instance, image_meta, [], 'herp',
network_info=network_info)
path = os.path.join(CONF.instances_path, instance['name'])
if os.path.isdir(path):
shutil.rmtree(path)
path = os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name)
if os.path.isdir(path):
shutil.rmtree(os.path.join(CONF.instances_path,
CONF.image_cache_subdirectory_name))
def test_spawn_without_image_meta(self):
self.create_image_called = False
def fake_none(*args, **kwargs):
return
def fake_create_image(*args, **kwargs):
self.create_image_called = True
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
image_meta = {}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_image', fake_create_image)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
drvr.spawn(self.context, instance, image_meta, [], None)
self.assertTrue(self.create_image_called)
drvr.spawn(self.context,
instance,
{'id': instance['image_ref']},
[],
None)
self.assertTrue(self.create_image_called)
def test_spawn_from_volume_calls_cache(self):
self.cache_called_for_disk = False
def fake_none(*args, **kwargs):
return
def fake_cache(*args, **kwargs):
if kwargs.get('image_id') == 'my_fake_image':
self.cache_called_for_disk = True
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(imagebackend.Image, 'cache', fake_cache)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
block_device_info = {'root_device_name': '/dev/vda',
'block_device_mapping': [
{'mount_device': 'vda',
'boot_index': 0}
]
}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
image_meta = {}
drvr.spawn(self.context, instance, image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
# Booted from volume but with placeholder image
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['root_device_name'] = '/dev/vda'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
image_meta = {}
drvr.spawn(self.context, instance, image_meta, [], None,
block_device_info=block_device_info)
self.assertFalse(self.cache_called_for_disk)
# Booted from an image
instance_ref['image_ref'] = 'my_fake_image'
instance_ref['uuid'] = uuidutils.generate_uuid()
instance = objects.Instance(**instance_ref)
drvr.spawn(self.context, instance, image_meta, [], None)
self.assertTrue(self.cache_called_for_disk)
def test_start_lxc_from_volume(self):
self.flags(virt_type="lxc",
group='libvirt')
def check_setup_container(path, container_dir=None, use_cow=False):
self.assertEqual(path, '/dev/path/to/dev')
self.assertTrue(use_cow)
return '/dev/nbd1'
bdm = {
'guest_format': None,
'boot_index': 0,
'mount_device': '/dev/sda',
'connection_info': {
'driver_volume_type': 'iscsi',
'serial': 'afc1',
'data': {
'access_mode': 'rw',
'device_path': '/dev/path/to/dev',
'target_discovered': False,
'encrypted': False,
'qos_specs': None,
'target_iqn': 'iqn: volume-afc1',
'target_portal': 'ip: 3260',
'volume_id': 'afc1',
'target_lun': 1,
'auth_password': 'uj',
'auth_username': '47',
'auth_method': 'CHAP'
}
},
'disk_bus': 'scsi',
'device_type': 'disk',
'delete_on_termination': False
}
def _get(key, opt=None):
return bdm.get(key, opt)
def getitem(key):
return bdm[key]
def setitem(key, val):
bdm[key] = val
bdm_mock = mock.MagicMock()
bdm_mock.__getitem__.side_effect = getitem
bdm_mock.__setitem__.side_effect = setitem
bdm_mock.get = _get
disk_mock = mock.MagicMock()
disk_mock.source_path = '/dev/path/to/dev'
block_device_info = {'block_device_mapping': [bdm_mock],
'root_device_name': '/dev/sda'}
# Volume-backed instance created without image
instance_ref = self.test_instance
instance_ref['image_ref'] = ''
instance_ref['root_device_name'] = '/dev/sda'
instance_ref['ephemeral_gb'] = 0
instance_ref['uuid'] = uuidutils.generate_uuid()
instance_ref['system_metadata']['image_disk_format'] = 'qcow2'
inst_obj = objects.Instance(**instance_ref)
image_meta = {}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=disk_mock),
mock.patch.object(drvr, 'get_info',
return_value=hardware.InstanceInfo(
state=power_state.RUNNING)),
mock.patch('nova.virt.disk.api.setup_container',
side_effect=check_setup_container),
mock.patch('nova.virt.disk.api.teardown_container'),
mock.patch.object(objects.Instance, 'save')):
drvr.spawn(self.context, inst_obj, image_meta, [], None,
network_info=[],
block_device_info=block_device_info)
self.assertEqual('/dev/nbd1',
inst_obj.system_metadata.get(
'rootfs_device_name'))
def test_spawn_with_pci_devices(self):
def fake_none(*args, **kwargs):
return None
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
class FakeLibvirtPciDevice(object):
def dettach(self):
return None
def reset(self):
return None
def fake_node_device_lookup_by_name(address):
pattern = ("pci_%(hex)s{4}_%(hex)s{2}_%(hex)s{2}_%(oct)s{1}"
% dict(hex='[\da-f]', oct='[0-8]'))
pattern = re.compile(pattern)
if pattern.match(address) is None:
raise fakelibvirt.libvirtError()
return FakeLibvirtPciDevice()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_image', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
drvr._conn.nodeDeviceLookupByName = \
fake_node_device_lookup_by_name
instance_ref = self.test_instance
instance_ref['image_ref'] = 'my_fake_image'
instance = objects.Instance(**instance_ref)
instance['pci_devices'] = objects.PciDeviceList(
objects=[objects.PciDevice(address='0000:00:00.0')])
image_meta = {}
drvr.spawn(self.context, instance, image_meta, [], None)
def test_chown_disk_config_for_instance(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.mox.StubOutWithMock(fake_libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(fake_libvirt_utils, 'chown')
fake_libvirt_utils.get_instance_path(instance).AndReturn('/tmp/uuid')
os.path.exists('/tmp/uuid/disk.config').AndReturn(True)
fake_libvirt_utils.chown('/tmp/uuid/disk.config', os.getuid())
self.mox.ReplayAll()
drvr._chown_disk_config_for_instance(instance)
def _test_create_image_plain(self, os_type='', filename='', mkfs=False):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name, is_block_dev=False):
self.path = os.path.join(instance['name'], name)
self.is_block_dev = is_block_dev
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
instance['os_type'] = os_type
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
if mkfs:
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{os_type: 'mkfs.ext4 --label %(fs_label)s %(target)s'})
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
drvr._create_image(context, instance, disk_info['mapping'])
drvr._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * units.Gi},
{'filename': filename,
'size': 20 * units.Gi},
]
self.assertEqual(gotFiles, wantFiles)
def test_create_image_plain_os_type_blank(self):
self._test_create_image_plain(os_type='',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_none(self):
self._test_create_image_plain(os_type=None,
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_no_fs(self):
self._test_create_image_plain(os_type='test',
filename=self._EPHEMERAL_20_DEFAULT,
mkfs=False)
def test_create_image_plain_os_type_set_with_fs(self):
ephemeral_file_name = ('ephemeral_20_%s' % utils.get_hash_str(
'mkfs.ext4 --label %(fs_label)s %(target)s')[:7])
self._test_create_image_plain(os_type='test',
filename=ephemeral_file_name,
mkfs=True)
def test_create_image_with_swap(self):
gotFiles = []
def fake_image(self, instance, name, image_type=''):
class FakeImage(imagebackend.Image):
def __init__(self, instance, name, is_block_dev=False):
self.path = os.path.join(instance['name'], name)
self.is_block_dev = is_block_dev
def create_image(self, prepare_template, base,
size, *args, **kwargs):
pass
def cache(self, fetch_func, filename, size=None,
*args, **kwargs):
gotFiles.append({'filename': filename,
'size': size})
def snapshot(self, name):
pass
return FakeImage(instance, name)
def fake_none(*args, **kwargs):
return
def fake_get_info(instance):
return hardware.InstanceInfo(state=power_state.RUNNING)
# Stop 'libvirt_driver._create_image' touching filesystem
self.stubs.Set(nova.virt.libvirt.imagebackend.Backend, "image",
fake_image)
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
# Turn on some swap to exercise that codepath in _create_image
instance.flavor.swap = 500
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr, '_get_guest_xml', fake_none)
self.stubs.Set(drvr, '_create_domain_and_network', fake_none)
self.stubs.Set(drvr, 'get_info', fake_get_info)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
drvr._create_image(context, instance, disk_info['mapping'])
drvr._get_guest_xml(self.context, instance, None,
disk_info, image_meta)
wantFiles = [
{'filename': '356a192b7913b04c54574d18c28d46e6395428ab',
'size': 10 * units.Gi},
{'filename': self._EPHEMERAL_20_DEFAULT,
'size': 20 * units.Gi},
{'filename': 'swap_500',
'size': 500 * units.Mi},
]
self.assertEqual(gotFiles, wantFiles)
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache',
side_effect=exception.ImageNotFound(image_id='fake-id'))
def test_create_image_not_exist_no_fallback(self, mock_cache):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = {'id': instance.image_ref}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
self.assertRaises(exception.ImageNotFound,
drvr._create_image,
self.context, instance, disk_info['mapping'])
@mock.patch.object(nova.virt.libvirt.imagebackend.Image, 'cache')
def test_create_image_not_exist_fallback(self, mock_cache):
def side_effect(fetch_func, filename, size=None, *args, **kwargs):
def second_call(fetch_func, filename, size=None, *args, **kwargs):
# call copy_from_host ourselves because we mocked image.cache()
fetch_func('fake-target', 'fake-max-size')
# further calls have no side effect
mock_cache.side_effect = None
mock_cache.side_effect = second_call
# raise an error only the first call
raise exception.ImageNotFound(image_id='fake-id')
mock_cache.side_effect = side_effect
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
image_meta = {'id': instance.image_ref}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
with mock.patch.object(libvirt_driver.libvirt_utils,
'copy_image') as mock_copy:
drvr._create_image(self.context, instance, disk_info['mapping'],
fallback_from_host='fake-source-host')
mock_copy.assert_called_once_with(src='fake-target',
dest='fake-target',
host='fake-source-host',
receive=True)
@mock.patch.object(utils, 'execute')
def test_create_ephemeral_specified_fs(self, mock_exec):
self.flags(default_ephemeral_format='ext3')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True, max_size=20,
specified_fs='ext4')
mock_exec.assert_called_once_with('mkfs', '-t', 'ext4', '-F', '-L',
'myVol', '/dev/something',
run_as_root=True)
def test_create_ephemeral_specified_fs_not_valid(self):
CONF.set_override('default_ephemeral_format', 'ext4')
ephemerals = [{'device_type': 'disk',
'disk_bus': 'virtio',
'device_name': '/dev/vdb',
'guest_format': 'dummy',
'size': 1}]
block_device_info = {
'ephemerals': ephemerals}
instance_ref = self.test_instance
instance_ref['image_ref'] = 1
instance = objects.Instance(**instance_ref)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
image_meta = {'id': instance['image_ref']}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta)
disk_info['mapping'].pop('disk.local')
with contextlib.nested(
mock.patch.object(utils, 'execute'),
mock.patch.object(drvr, 'get_info'),
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(imagebackend.Image, 'verify_base_size')):
self.assertRaises(exception.InvalidBDMFormat, drvr._create_image,
context, instance, disk_info['mapping'],
block_device_info=block_device_info)
def test_create_ephemeral_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
'/dev/something', run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True, max_size=20)
def test_create_ephemeral_with_conf(self):
CONF.set_override('default_ephemeral_format', 'ext4')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs', '-t', 'ext4', '-F', '-L', 'myVol',
'/dev/something', run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_ephemeral_with_arbitrary(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{'linux': 'mkfs.ext4 --label %(fs_label)s %(target)s'})
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs.ext4', '--label', 'myVol', '/dev/something',
run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_ephemeral_with_ext3(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(nova.virt.disk.api, '_MKFS_COMMAND',
{'linux': 'mkfs.ext3 --label %(fs_label)s %(target)s'})
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkfs.ext3', '--label', 'myVol', '/dev/something',
run_as_root=True)
self.mox.ReplayAll()
drvr._create_ephemeral('/dev/something', 20, 'myVol', 'linux',
is_block_dev=True)
def test_create_swap_default(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.mox.StubOutWithMock(utils, 'execute')
utils.execute('mkswap', '/dev/something', run_as_root=False)
self.mox.ReplayAll()
drvr._create_swap('/dev/something', 1, max_size=20)
def test_get_console_output_file(self):
fake_libvirt_utils.files['console.log'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
console_log = '%s/console.log' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
with mock.patch('os.path.exists', return_value=True):
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
def test_get_console_output_file_missing(self):
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_log = os.path.join(tmpdir, instance['name'],
'non-existent.log')
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='file'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % console_log
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch('os.path.exists', return_value=False):
output = drvr.get_console_output(self.context, instance)
self.assertEqual('', output)
def test_get_console_output_pty(self):
fake_libvirt_utils.files['pty'] = '01234567890'
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456
instance = objects.Instance(**instance_ref)
console_dir = (os.path.join(tmpdir, instance['name']))
pty_file = '%s/fake_pty' % (console_dir)
fake_dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
</disk>
<console type='pty'>
<source path='%s'/>
<target port='0'/>
</console>
</devices>
</domain>
""" % pty_file
def fake_lookup(id):
return FakeVirtDomain(fake_dom_xml)
def _fake_flush(self, fake_pty):
return 'foo'
def _fake_append_to_file(self, data, fpath):
return 'pty'
self.create_fake_libvirt_mock()
libvirt_driver.LibvirtDriver._conn.lookupByName = fake_lookup
libvirt_driver.LibvirtDriver._flush_libvirt_console = _fake_flush
libvirt_driver.LibvirtDriver._append_to_file = _fake_append_to_file
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
try:
prev_max = libvirt_driver.MAX_CONSOLE_BYTES
libvirt_driver.MAX_CONSOLE_BYTES = 5
output = drvr.get_console_output(self.context, instance)
finally:
libvirt_driver.MAX_CONSOLE_BYTES = prev_max
self.assertEqual('67890', output)
def test_get_host_ip_addr(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
ip = drvr.get_host_ip_addr()
self.assertEqual(ip, CONF.my_ip)
@mock.patch.object(libvirt_driver.LOG, 'warn')
@mock.patch('nova.compute.utils.get_machine_ips')
def test_get_host_ip_addr_failure(self, mock_ips, mock_log):
mock_ips.return_value = ['8.8.8.8', '75.75.75.75']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.get_host_ip_addr()
mock_log.assert_called_once_with(u'my_ip address (%(my_ip)s) was '
u'not found on any of the '
u'interfaces: %(ifaces)s',
{'ifaces': '8.8.8.8, 75.75.75.75',
'my_ip': mock.ANY})
def test_conn_event_handler(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with contextlib.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
# verify that the driver registers for the close callback
# and re-connects after receiving the callback
self.assertRaises(exception.HypervisorUnavailable,
drvr.init_host,
"wibble")
self.assertTrue(service_mock.disabled)
def test_command_with_broken_connection(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = False
with contextlib.nested(
mock.patch.object(drvr._host, "_connect",
side_effect=fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"Failed to connect to host",
error_code=
fakelibvirt.VIR_ERR_INTERNAL_ERROR)),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
drvr.init_host("wibble")
self.assertRaises(exception.HypervisorUnavailable,
drvr.get_num_instances)
self.assertTrue(service_mock.disabled)
def test_service_resume_after_broken_connection(self):
self.mox.UnsetStubs()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
service_mock = mock.MagicMock()
service_mock.disabled.return_value = True
with contextlib.nested(
mock.patch.object(drvr._host, "_connect",
return_value=mock.MagicMock()),
mock.patch.object(drvr._host, "_init_events",
return_value=None),
mock.patch.object(host.Host, "has_min_version",
return_value=True),
mock.patch.object(drvr, "_do_quality_warnings",
return_value=None),
mock.patch.object(objects.Service, "get_by_compute_host",
return_value=service_mock)):
drvr.init_host("wibble")
drvr.get_num_instances()
self.assertTrue(not service_mock.disabled and
service_mock.disabled_reason is None)
@mock.patch.object(objects.Instance, 'save')
def test_immediate_delete(self, mock_save):
def fake_get_domain(instance):
raise exception.InstanceNotFound(instance_id=instance.name)
def fake_delete_instance_files(instance):
pass
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, {})
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(objects.Instance, 'obj_load_attr', autospec=True)
@mock.patch.object(objects.Instance, 'save', autospec=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_disconnect_volume')
@mock.patch.object(driver, 'block_device_info_get_mapping')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def _test_destroy_removes_disk(self, mock_undefine_domain, mock_mapping,
mock_disconnect_volume,
mock_delete_instance_files, mock_destroy,
mock_inst_save, mock_inst_obj_load_attr,
mock_get_by_uuid, volume_fail=False):
instance = objects.Instance(self.context, **self.test_instance)
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
mock_mapping.return_value = vol['block_device_mapping']
mock_delete_instance_files.return_value = True
mock_get_by_uuid.return_value = instance
if volume_fail:
mock_disconnect_volume.return_value = (
exception.VolumeNotFound('vol'))
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], vol)
def test_destroy_removes_disk(self):
self._test_destroy_removes_disk(volume_fail=False)
def test_destroy_removes_disk_volume_fails(self):
self._test_destroy_removes_disk(volume_fail=True)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'unplug_vifs')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_destroy')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_undefine_domain')
def test_destroy_not_removes_disk(self, mock_undefine_domain, mock_destroy,
mock_unplug_vifs):
instance = fake_instance.fake_instance_obj(
None, name='instancename', id=1,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.destroy(self.context, instance, [], None, False)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, 'get_domain')
def test_destroy_lxc_calls_teardown_container(self, mock_get_domain,
mock_teardown_container,
mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
fake_domain = FakeVirtDomain()
def destroy_side_effect(*args, **kwargs):
fake_domain._info[0] = power_state.SHUTDOWN
with mock.patch.object(fake_domain, 'destroy',
side_effect=destroy_side_effect) as mock_domain_destroy:
mock_get_domain.return_value = fake_domain
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_domain_destroy.assert_called_once_with()
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False,
None)
@mock.patch.object(libvirt_driver.LibvirtDriver, 'cleanup')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_teardown_container')
@mock.patch.object(host.Host, 'get_domain')
def test_destroy_lxc_calls_teardown_container_when_no_domain(self,
mock_get_domain, mock_teardown_container, mock_cleanup):
self.flags(virt_type='lxc', group='libvirt')
instance = objects.Instance(**self.test_instance)
inf_exception = exception.InstanceNotFound(instance_id=instance.name)
mock_get_domain.side_effect = inf_exception
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = []
drvr.destroy(self.context, instance, network_info, None, False)
mock_get_domain.assert_has_calls([mock.call(instance),
mock.call(instance)])
mock_teardown_container.assert_called_once_with(instance)
mock_cleanup.assert_called_once_with(self.context, instance,
network_info, None, False,
None)
def test_reboot_different_ids(self):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_create_called = False
# Mock domain
mock_domain = self.mox.CreateMock(fakelibvirt.virDomain)
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_domain.ID().AndReturn('some_fake_id')
mock_domain.shutdown()
mock_domain.info().AndReturn(
(libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple)
mock_domain.ID().AndReturn('some_other_fake_id')
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock_domain
def fake_create_domain(**kwargs):
self.reboot_create_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, '_create_domain', fake_create_domain)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
self.stubs.Set(pci_manager, 'get_instance_pci_devs', lambda *a: [])
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_create_called)
@mock.patch.object(pci_manager, 'get_instance_pci_devs')
@mock.patch.object(loopingcall, 'FixedIntervalLoopingCall')
@mock.patch.object(greenthread, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_reboot_same_ids(self, mock_get_domain, mock_hard_reboot,
mock_sleep, mock_loopingcall,
mock_get_instance_pci_devs):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
self.flags(wait_soft_reboot_seconds=1, group='libvirt')
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
self.reboot_hard_reboot_called = False
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_values = [(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple,
(libvirt_driver.VIR_DOMAIN_CRASHED,) + info_tuple]
mock_domain.info.side_effect = return_values
mock_domain.ID.return_value = 'some_fake_id'
mock_domain.shutdown.side_effect = mock.Mock()
def fake_hard_reboot(*args, **kwargs):
self.reboot_hard_reboot_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_hard_reboot.side_effect = fake_hard_reboot
mock_loopingcall.return_value = FakeLoopingCall()
mock_get_instance_pci_devs.return_value = []
drvr.reboot(None, instance, [], 'SOFT')
self.assertTrue(self.reboot_hard_reboot_called)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_soft_reboot_libvirt_exception(self, mock_get_domain,
mock_hard_reboot):
# Tests that a hard reboot is performed when a soft reboot results
# in raising a libvirtError.
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
# setup mocks
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = (
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
mock_virDomain.ID.return_value = 'some_fake_id'
mock_virDomain.shutdown.side_effect = fakelibvirt.libvirtError('Err')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
context = None
instance = objects.Instance(**self.test_instance)
network_info = []
mock_get_domain.return_value = mock_virDomain
drvr.reboot(context, instance, network_info, 'SOFT')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def _test_resume_state_on_host_boot_with_state(self, state,
mock_get_domain,
mock_hard_reboot):
mock_virDomain = mock.Mock(fakelibvirt.virDomain)
mock_virDomain.info.return_value = ([state, None, None, None, None])
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = mock_virDomain
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drvr.resume_state_on_host_boot(self.context, instance, network_info,
block_device_info=None)
ignored_states = (power_state.RUNNING,
power_state.SUSPENDED,
power_state.NOSTATE,
power_state.PAUSED)
self.assertEqual(mock_hard_reboot.called, state not in ignored_states)
def test_resume_state_on_host_boot_with_running_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_suspended_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.SUSPENDED)
def test_resume_state_on_host_boot_with_paused_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.PAUSED)
def test_resume_state_on_host_boot_with_nostate(self):
self._test_resume_state_on_host_boot_with_state(power_state.NOSTATE)
def test_resume_state_on_host_boot_with_shutdown_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.RUNNING)
def test_resume_state_on_host_boot_with_crashed_state(self):
self._test_resume_state_on_host_boot_with_state(power_state.CRASHED)
@mock.patch.object(libvirt_driver.LibvirtDriver, '_hard_reboot')
@mock.patch.object(host.Host, 'get_domain')
def test_resume_state_on_host_boot_with_instance_not_found_on_driver(
self, mock_get_domain, mock_hard_reboot):
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.side_effect = exception.InstanceNotFound(
instance_id='fake')
drvr.resume_state_on_host_boot(self.context, instance, network_info=[],
block_device_info=None)
mock_hard_reboot.assert_called_once_with(self.context,
instance, [], None)
@mock.patch('nova.virt.libvirt.LibvirtDriver.get_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_xml')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
def test_hard_reboot(self, mock_destroy, mock_get_instance_disk_info,
mock_get_guest_xml, mock_create_images_and_backing,
mock_create_domain_and_network, mock_get_info):
self.context.auth_token = True # any non-None value will suffice
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
block_device_info = None
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
return_values = [hardware.InstanceInfo(state=power_state.SHUTDOWN),
hardware.InstanceInfo(state=power_state.RUNNING)]
mock_get_info.side_effect = return_values
disk_info = [{"virt_disk_size": 2}]
mock_get_guest_xml.return_value = dummyxml
mock_get_instance_disk_info.return_value = disk_info
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
@mock.patch('nova.openstack.common.fileutils.ensure_tree')
@mock.patch('nova.openstack.common.loopingcall.FixedIntervalLoopingCall')
@mock.patch('nova.pci.manager.get_instance_pci_devs')
@mock.patch('nova.virt.libvirt.LibvirtDriver._prepare_pci_devices_for_use')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_domain_and_network')
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_images_and_backing')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_instance_disk_info')
@mock.patch('nova.virt.libvirt.utils.write_to_file')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
@mock.patch('nova.virt.libvirt.LibvirtDriver._get_guest_config')
@mock.patch('nova.virt.libvirt.blockinfo.get_disk_info')
@mock.patch('nova.virt.libvirt.LibvirtDriver._destroy')
def test_hard_reboot_does_not_call_glance_show(self,
mock_destroy, mock_get_disk_info, mock_get_guest_config,
mock_get_instance_path, mock_write_to_file,
mock_get_instance_disk_info, mock_create_images_and_backing,
mock_create_domand_and_network, mock_prepare_pci_devices_for_use,
mock_get_instance_pci_devs, mock_looping_call, mock_ensure_tree):
"""For a hard reboot, we shouldn't need an additional call to glance
to get the image metadata.
This is important for automatically spinning up instances on a
host-reboot, since we won't have a user request context that'll allow
the Glance request to go through. We have to rely on the cached image
metadata, instead.
https://bugs.launchpad.net/nova/+bug/1339386
"""
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
network_info = mock.MagicMock()
block_device_info = mock.MagicMock()
mock_get_disk_info.return_value = {}
mock_get_guest_config.return_value = mock.MagicMock()
mock_get_instance_path.return_value = '/foo'
mock_looping_call.return_value = mock.MagicMock()
drvr._image_api = mock.MagicMock()
drvr._hard_reboot(self.context, instance, network_info,
block_device_info)
self.assertFalse(drvr._image_api.get.called)
mock_ensure_tree.assert_called_once_with('/foo')
@mock.patch.object(time, 'sleep')
@mock.patch.object(libvirt_driver.LibvirtDriver, '_create_domain')
@mock.patch.object(host.Host, 'get_domain')
def _test_clean_shutdown(self, mock_get_domain, mock_create_domain,
mock_sleep, seconds_to_shutdown,
timeout, retry_interval,
shutdown_attempts, succeeds):
info_tuple = ('fake', 'fake', 'fake', 'also_fake')
shutdown_count = []
# Mock domain
mock_domain = mock.Mock(fakelibvirt.virDomain)
return_infos = [(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple]
return_shutdowns = [shutdown_count.append("shutdown")]
retry_countdown = retry_interval
for x in range(min(seconds_to_shutdown, timeout)):
return_infos.append(
(libvirt_driver.VIR_DOMAIN_RUNNING,) + info_tuple)
if retry_countdown == 0:
return_shutdowns.append(shutdown_count.append("shutdown"))
retry_countdown = retry_interval
else:
retry_countdown -= 1
if seconds_to_shutdown < timeout:
return_infos.append(
(libvirt_driver.VIR_DOMAIN_SHUTDOWN,) + info_tuple)
mock_domain.info.side_effect = return_infos
mock_domain.shutdown.side_effect = return_shutdowns
def fake_create_domain(**kwargs):
self.reboot_create_called = True
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_get_domain.return_value = mock_domain
mock_create_domain.side_effect = fake_create_domain
result = drvr._clean_shutdown(instance, timeout, retry_interval)
self.assertEqual(succeeds, result)
self.assertEqual(shutdown_attempts, len(shutdown_count))
def test_clean_shutdown_first_time(self):
self._test_clean_shutdown(seconds_to_shutdown=2,
timeout=5,
retry_interval=3,
shutdown_attempts=1,
succeeds=True)
def test_clean_shutdown_with_retry(self):
self._test_clean_shutdown(seconds_to_shutdown=4,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=True)
def test_clean_shutdown_failure(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=5,
retry_interval=3,
shutdown_attempts=2,
succeeds=False)
def test_clean_shutdown_no_wait(self):
self._test_clean_shutdown(seconds_to_shutdown=6,
timeout=0,
retry_interval=3,
shutdown_attempts=1,
succeeds=False)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_sriov_ports(self,
mock_get_image_metadata,
mock_ID,
mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_sriov_ports(self.context, instance, guest, network_info)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(FakeVirtDomain, 'attachDeviceFlags')
@mock.patch.object(FakeVirtDomain, 'ID', return_value=1)
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_attach_sriov_ports_with_info_cache(self,
mock_get_image_metadata,
mock_ID,
mock_attachDevice):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
guest = libvirt_guest.Guest(FakeVirtDomain())
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._attach_sriov_ports(self.context, instance, guest, None)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_attachDevice.called)
@mock.patch.object(host.Host,
'has_min_version', return_value=True)
@mock.patch.object(FakeVirtDomain, 'detachDeviceFlags')
@mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=None)
def test_detach_sriov_ports(self,
mock_get_image_metadata,
mock_detachDeviceFlags,
mock_has_min_version):
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
network_info[0]['vnic_type'] = network_model.VNIC_TYPE_DIRECT
instance.info_cache = objects.InstanceInfoCache(
network_info=network_info)
domain = FakeVirtDomain()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr._detach_sriov_ports(self.context, instance, domain)
mock_get_image_metadata.assert_called_once_with(
instance.system_metadata)
self.assertTrue(mock_detachDeviceFlags.called)
def test_resume(self):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
block_device_info = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
guest = libvirt_guest.Guest('fake_dom')
with contextlib.nested(
mock.patch.object(drvr, '_get_existing_domain_xml',
return_value=dummyxml),
mock.patch.object(drvr, '_create_domain_and_network',
return_value=guest),
mock.patch.object(drvr, '_attach_pci_devices'),
mock.patch.object(pci_manager, 'get_instance_pci_devs',
return_value='fake_pci_devs'),
mock.patch.object(utils, 'get_image_from_system_metadata'),
mock.patch.object(blockinfo, 'get_disk_info'),
) as (_get_existing_domain_xml, _create_domain_and_network,
_attach_pci_devices, get_instance_pci_devs, get_image_metadata,
get_disk_info):
get_image_metadata.return_value = {'bar': 234}
disk_info = {'foo': 123}
get_disk_info.return_value = disk_info
drvr.resume(self.context, instance, network_info,
block_device_info)
_get_existing_domain_xml.assert_has_calls([mock.call(instance,
network_info, block_device_info)])
_create_domain_and_network.assert_has_calls([mock.call(
self.context, dummyxml,
instance, network_info, disk_info,
block_device_info=block_device_info,
vifs_already_plugged=True)])
_attach_pci_devices.assert_has_calls([mock.call(guest,
'fake_pci_devs')])
@mock.patch.object(host.Host, 'get_domain')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'get_info')
@mock.patch.object(libvirt_driver.LibvirtDriver, 'delete_instance_files')
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines(self, mock_save, mock_delete_instance_files,
mock_get_info, mock_get_domain):
dom_mock = mock.MagicMock()
dom_mock.undefineFlags.return_value = 1
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_get_domain.return_value = dom_mock
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN, id=-1)
mock_delete_instance_files.return_value = None
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(rbd_utils, 'RBDDriver')
def test_cleanup_rbd(self, mock_driver):
driver = mock_driver.return_value
driver.cleanup_volumes = mock.Mock()
fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr._cleanup_rbd(fake_instance)
driver.cleanup_volumes.assert_called_once_with(fake_instance)
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_undefine_flags(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(fakelibvirt.libvirtError('Err'))
mock.ID().AndReturn(123)
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_with_managed_save(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndReturn(True)
mock.managedSaveRemove(0)
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN, id=-1)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
@mock.patch.object(objects.Instance, 'save')
def test_destroy_undefines_no_attribute_no_managed_save(self, mock_save):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy()
mock.undefineFlags(1).AndRaise(AttributeError())
mock.hasManagedSaveImage(0).AndRaise(AttributeError())
mock.undefine()
self.mox.ReplayAll()
def fake_get_domain(self, instance):
return mock
def fake_get_info(instance_name):
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_delete_instance_files(instance):
return None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.stubs.Set(drvr, 'get_info', fake_get_info)
self.stubs.Set(drvr, 'delete_instance_files',
fake_delete_instance_files)
instance = objects.Instance(self.context, **self.test_instance)
drvr.destroy(self.context, instance, [])
mock_save.assert_called_once_with()
def test_destroy_timed_out(self):
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(fakelibvirt.libvirtError("timed out"))
self.mox.ReplayAll()
def fake_get_domain(self, instance):
return mock
def fake_get_error_code(self):
return fakelibvirt.VIR_ERR_OPERATION_TIMEOUT
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.stubs.Set(fakelibvirt.libvirtError, 'get_error_code',
fake_get_error_code)
instance = objects.Instance(**self.test_instance)
self.assertRaises(exception.InstancePowerOffFailure,
drvr.destroy, self.context, instance, [])
def test_private_destroy_not_found(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
"No such domain",
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
mock = self.mox.CreateMock(fakelibvirt.virDomain)
mock.ID()
mock.destroy().AndRaise(ex)
mock.info().AndRaise(ex)
self.mox.ReplayAll()
def fake_get_domain(instance):
return mock
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr._host, 'get_domain', fake_get_domain)
instance = objects.Instance(**self.test_instance)
# NOTE(vish): verifies destroy doesn't raise if the instance disappears
drvr._destroy(instance)
def test_private_destroy_lxc_processes_refused_to_die(self):
self.flags(virt_type='lxc', group='libvirt')
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, 'get_domain') as mock_get_domain, \
mock.patch.object(conn, 'get_info') as mock_get_info:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
mock_info = mock.MagicMock()
mock_info.id = 1
mock_info.state = power_state.SHUTDOWN
mock_get_info.return_value = mock_info
instance = objects.Instance(**self.test_instance)
conn._destroy(instance)
def test_private_destroy_processes_refused_to_die_still_raises(self):
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError, "",
error_message="internal error: Some processes refused to die",
error_code=fakelibvirt.VIR_ERR_INTERNAL_ERROR)
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn._host, 'get_domain') as mock_get_domain:
mock_domain = mock.MagicMock()
mock_domain.ID.return_value = 1
mock_get_domain.return_value = mock_domain
mock_domain.destroy.side_effect = ex
instance = objects.Instance(**self.test_instance)
self.assertRaises(fakelibvirt.libvirtError, conn._destroy,
instance)
def test_private_destroy_ebusy_timeout(self):
# Tests that _destroy will retry 3 times to destroy the guest when an
# EBUSY is raised, but eventually times out and raises the libvirtError
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=ex)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
self.assertRaises(fakelibvirt.libvirtError, drvr._destroy,
instance)
self.assertEqual(3, mock_guest.poweroff.call_count)
def test_private_destroy_ebusy_multiple_attempt_ok(self):
# Tests that the _destroy attempt loop is broken when EBUSY is no
# longer raised.
ex = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
("Failed to terminate process 26425 with SIGKILL: "
"Device or resource busy"),
error_code=fakelibvirt.VIR_ERR_SYSTEM_ERROR,
int1=errno.EBUSY)
mock_guest = mock.Mock(libvirt_guest.Guest, id=1)
mock_guest.poweroff = mock.Mock(side_effect=[ex, None])
inst_info = hardware.InstanceInfo(power_state.SHUTDOWN, id=1)
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(drvr._host, 'get_guest',
return_value=mock_guest):
with mock.patch.object(drvr, 'get_info', return_value=inst_info):
drvr._destroy(instance)
self.assertEqual(2, mock_guest.poweroff.call_count)
def test_undefine_domain_with_not_found_instance(self):
def fake_get_domain(self, instance):
raise exception.InstanceNotFound(instance_id=instance.name)
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
self.mox.StubOutWithMock(fakelibvirt.libvirtError, "get_error_code")
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
# NOTE(wenjianhn): verifies undefine doesn't raise if the
# instance disappears
drvr._undefine_domain(instance)
@mock.patch.object(host.Host, "list_instance_domains")
def test_disk_over_committed_size_total(self, mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
def XMLDesc(self, flags):
return "<domain/>"
mock_list.return_value = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '0'}]}
def get_info(instance_name, xml, **kwargs):
return fake_disks.get(instance_name)
with mock.patch.object(drvr,
"_get_instance_disk_info") as mock_info:
mock_info.side_effect = get_info
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(result, 10653532160)
mock_list.assert_called_with()
self.assertTrue(mock_info.called)
@mock.patch.object(host.Host, "list_instance_domains")
def test_disk_over_committed_size_total_eperm(self, mock_list):
# Ensure destroy calls managedSaveRemove for saved instance.
class DiagFakeDomain(object):
def __init__(self, name):
self._name = name
def ID(self):
return 1
def name(self):
return self._name
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
def XMLDesc(self, flags):
return "<domain/>"
mock_list.return_value = [
DiagFakeDomain("instance0000001"),
DiagFakeDomain("instance0000002")]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
fake_disks = {'instance0000001':
[{'type': 'qcow2', 'path': '/somepath/disk1',
'virt_disk_size': '10737418240',
'backing_file': '/somepath/disk1',
'disk_size': '83886080',
'over_committed_disk_size': '10653532160'}],
'instance0000002':
[{'type': 'raw', 'path': '/somepath/disk2',
'virt_disk_size': '0',
'backing_file': '/somepath/disk2',
'disk_size': '10737418240',
'over_committed_disk_size': '21474836480'}]}
def side_effect(name, dom):
if name == 'instance0000001':
raise OSError(errno.EACCES, 'Permission denied')
if name == 'instance0000002':
return fake_disks.get(name)
get_disk_info = mock.Mock()
get_disk_info.side_effect = side_effect
drvr._get_instance_disk_info = get_disk_info
result = drvr._get_disk_over_committed_size_total()
self.assertEqual(21474836480, result)
mock_list.assert_called_with()
@mock.patch.object(host.Host, "list_instance_domains",
return_value=[mock.MagicMock(name='foo')])
@mock.patch.object(libvirt_driver.LibvirtDriver, "_get_instance_disk_info",
side_effect=exception.VolumeBDMPathNotFound(path='bar'))
def test_disk_over_committed_size_total_bdm_not_found(self,
mock_get_disk_info,
mock_list_domains):
# Tests that we handle VolumeBDMPathNotFound gracefully.
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_disk_over_committed_size_total())
def test_cpu_info(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
cpu = vconfig.LibvirtConfigCPU()
cpu.model = "Opteron_G4"
cpu.vendor = "AMD"
cpu.arch = arch.X86_64
cpu.cores = 2
cpu.threads = 1
cpu.sockets = 4
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("extapic"))
cpu.add_feature(vconfig.LibvirtConfigCPUFeature("3dnow"))
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = cpu
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = arch.X86_64
guest.domtype = ["kvm"]
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = vm_mode.HVM
guest.arch = arch.I686
guest.domtype = ["kvm"]
caps.guests.append(guest)
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
want = {"vendor": "AMD",
"features": set(["extapic", "3dnow"]),
"model": "Opteron_G4",
"arch": arch.X86_64,
"topology": {"cores": 2, "threads": 1, "sockets": 4}}
got = drvr._get_cpu_info()
self.assertEqual(want, got)
def test_get_pcidev_info(self):
def fake_nodeDeviceLookupByName(self, name):
return FakeNodeDevice(_fake_NodeDevXml[name])
self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name')
host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actualvf = drvr._get_pcidev_info("pci_0000_04_00_3")
expect_vf = {
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1521',
"dev_type": 'type-PF',
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_04_10_7")
expect_vf = {
"dev_id": "pci_0000_04_10_7",
"address": "0000:04:10.7",
"product_id": '1520',
"numa_node": None,
"vendor_id": '8086',
"label": 'label_8086_1520',
"dev_type": 'type-VF',
"phys_function": '0000:04:00.3',
}
self.assertEqual(expect_vf, actualvf)
actualvf = drvr._get_pcidev_info("pci_0000_04_11_7")
expect_vf = {
"dev_id": "pci_0000_04_11_7",
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"label": 'label_8086_1520',
"dev_type": 'type-VF',
"phys_function": '0000:04:00.3',
}
self.assertEqual(expect_vf, actualvf)
def test_list_devices_not_supported(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Handle just the NO_SUPPORT error
not_supported_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'this function is not supported by the connection driver:'
' virNodeNumOfDevices',
error_code=fakelibvirt.VIR_ERR_NO_SUPPORT)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=not_supported_exc):
self.assertEqual('[]', drvr._get_pci_passthrough_devices())
# We cache not supported status to avoid emitting too many logging
# messages. Clear this value to test the other exception case.
del drvr._list_devices_supported
# Other errors should not be caught
other_exc = fakelibvirt.make_libvirtError(
fakelibvirt.libvirtError,
'other exc',
error_code=fakelibvirt.VIR_ERR_NO_DOMAIN)
with mock.patch.object(drvr._conn, 'listDevices',
side_effect=other_exc):
self.assertRaises(fakelibvirt.libvirtError,
drvr._get_pci_passthrough_devices)
def test_get_pci_passthrough_devices(self):
def fakelistDevices(caps, fakeargs=0):
return ['pci_0000_04_00_3', 'pci_0000_04_10_7',
'pci_0000_04_11_7']
self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
libvirt_driver.LibvirtDriver._conn.listDevices = fakelistDevices
def fake_nodeDeviceLookupByName(self, name):
return FakeNodeDevice(_fake_NodeDevXml[name])
self.mox.StubOutWithMock(host.Host, 'device_lookup_by_name')
host.Host.device_lookup_by_name = fake_nodeDeviceLookupByName
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
actjson = drvr._get_pci_passthrough_devices()
expectvfs = [
{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:00.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": 'type-PF',
"phys_function": None,
"numa_node": None},
{
"dev_id": "pci_0000_04_10_7",
"domain": 0,
"address": "0000:04:10.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": None,
"dev_type": 'type-VF',
"phys_function": [('0x0000', '0x04', '0x00', '0x3')]},
{
"dev_id": "pci_0000_04_11_7",
"domain": 0,
"address": "0000:04:11.7",
"product_id": '1520',
"vendor_id": '8086',
"numa_node": 0,
"dev_type": 'type-VF',
"phys_function": [('0x0000', '0x04', '0x00', '0x3')],
}
]
actualvfs = jsonutils.loads(actjson)
for dev in range(len(actualvfs)):
for key in actualvfs[dev].keys():
if key not in ['phys_function', 'virt_functions', 'label']:
self.assertEqual(expectvfs[dev][key], actualvfs[dev][key])
def _fake_caps_numa_topology(self,
cells_per_host=4,
sockets_per_cell=1,
cores_per_socket=1,
threads_per_core=2,
kb_mem=1048576):
# Generate mempages list per cell
cell_mempages = list()
for cellid in range(cells_per_host):
mempages_0 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_0.size = 4
mempages_0.total = 1024 * cellid
mempages_1 = vconfig.LibvirtConfigCapsNUMAPages()
mempages_1.size = 2048
mempages_1.total = 0 + cellid
cell_mempages.append([mempages_0, mempages_1])
topology = fakelibvirt.HostInfo._gen_numa_topology(cells_per_host,
sockets_per_cell,
cores_per_socket,
threads_per_core,
kb_mem=kb_mem,
numa_mempages_list=cell_mempages)
return topology
def _test_get_host_numa_topology(self, mempages):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = arch.X86_64
caps.host.topology = self._fake_caps_numa_topology()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
expected_topo_dict = {'cells': [
{'cpus': '0,1', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 0},
{'cpus': '3', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 1},
{'cpus': '', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 2},
{'cpus': '', 'cpu_usage': 0,
'mem': {'total': 256, 'used': 0},
'id': 3}]}
with contextlib.nested(
mock.patch.object(host.Host, "get_capabilities",
return_value=caps),
mock.patch.object(
hardware, 'get_vcpu_pin_set',
return_value=set([0, 1, 3, 4, 5])),
mock.patch.object(host.Host, 'get_online_cpus',
return_value=set([0, 1, 2, 3, 6])),
):
got_topo = drvr._get_host_numa_topology()
got_topo_dict = got_topo._to_dict()
self.assertThat(
expected_topo_dict, matchers.DictMatches(got_topo_dict))
if mempages:
# cells 0
self.assertEqual(4, got_topo.cells[0].mempages[0].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[0].total)
self.assertEqual(2048, got_topo.cells[0].mempages[1].size_kb)
self.assertEqual(0, got_topo.cells[0].mempages[1].total)
# cells 1
self.assertEqual(4, got_topo.cells[1].mempages[0].size_kb)
self.assertEqual(1024, got_topo.cells[1].mempages[0].total)
self.assertEqual(2048, got_topo.cells[1].mempages[1].size_kb)
self.assertEqual(1, got_topo.cells[1].mempages[1].total)
else:
self.assertEqual([], got_topo.cells[0].mempages)
self.assertEqual([], got_topo.cells[1].mempages)
self.assertEqual(expected_topo_dict, got_topo_dict)
self.assertEqual(set([]), got_topo.cells[0].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[1].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[2].pinned_cpus)
self.assertEqual(set([]), got_topo.cells[3].pinned_cpus)
self.assertEqual([set([0, 1])], got_topo.cells[0].siblings)
self.assertEqual([], got_topo.cells[1].siblings)
@mock.patch.object(host.Host, 'has_min_version', return_value=True)
def test_get_host_numa_topology(self, mock_version):
self._test_get_host_numa_topology(mempages=True)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_no_mempages(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='kvm', group='libvirt')
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_HUGEPAGE_VERSION) - 1
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_QEMU
self._test_get_host_numa_topology(mempages=False)
def test_get_host_numa_topology_empty(self):
caps = vconfig.LibvirtConfigCaps()
caps.host = vconfig.LibvirtConfigCapsHost()
caps.host.cpu = vconfig.LibvirtConfigCPU()
caps.host.cpu.arch = arch.X86_64
caps.host.topology = None
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(host.Host, 'has_min_version', return_value=True),
mock.patch.object(host.Host, "get_capabilities",
return_value=caps)
) as (has_min_version, get_caps):
self.assertIsNone(drvr._get_host_numa_topology())
self.assertEqual(2, get_caps.call_count)
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_old_version(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='kvm', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION) - 1
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_QEMU
self.assertIsNone(drvr._get_host_numa_topology())
@mock.patch.object(fakelibvirt.Connection, 'getType')
@mock.patch.object(fakelibvirt.Connection, 'getVersion')
@mock.patch.object(fakelibvirt.Connection, 'getLibVersion')
def test_get_host_numa_topology_xen(self, mock_lib_version,
mock_version, mock_type):
self.flags(virt_type='xen', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_lib_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_LIBVIRT_NUMA_VERSION)
mock_version.return_value = utils.convert_version_to_int(
libvirt_driver.MIN_QEMU_NUMA_HUGEPAGE_VERSION)
mock_type.return_value = host.HV_DRIVER_XEN
self.assertIsNone(drvr._get_host_numa_topology())
def test_diagnostic_vcpus_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
raise fakelibvirt.libvirtError('vcpus missing')
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_blockstats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
raise fakelibvirt.libvirtError('blockStats missing')
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_interfacestats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
raise fakelibvirt.libvirtError('interfaceStat missing')
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_memorystats_exception(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
raise fakelibvirt.libvirtError('memoryStats missing')
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
def test_diagnostic_full(self):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self, instance):
return DiagFakeDomain()
self.stubs.Set(host.Host, "get_domain", fake_get_domain)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
timeutils.set_time_override(diags_time)
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
@mock.patch.object(timeutils, 'utcnow')
@mock.patch.object(host.Host, 'get_domain')
def test_diagnostic_full_with_multiple_interfaces(self, mock_get_domain,
mock_utcnow):
xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='filename'/>
<target dev='vda' bus='virtio'/>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio'/>
</disk>
<interface type='network'>
<mac address='52:54:00:a4:38:38'/>
<source network='default'/>
<target dev='vnet0'/>
</interface>
<interface type="bridge">
<mac address="53:55:00:a5:39:39"/>
<model type="virtio"/>
<target dev="br0"/>
</interface>
</devices>
</domain>
"""
class DiagFakeDomain(FakeVirtDomain):
def __init__(self):
super(DiagFakeDomain, self).__init__(fake_xml=xml)
def vcpus(self):
return ([(0, 1, 15340000000, 0),
(1, 1, 1640000000, 0),
(2, 1, 3040000000, 0),
(3, 1, 1420000000, 0)],
[(True, False),
(True, False),
(True, False),
(True, False)])
def blockStats(self, path):
return (169, 688640, 0, 0, -1)
def interfaceStats(self, path):
return (4408, 82, 0, 0, 0, 0, 0, 0)
def memoryStats(self):
return {'actual': 220160, 'rss': 200164}
def maxMemory(self):
return 280160
def fake_get_domain(self):
return DiagFakeDomain()
mock_get_domain.side_effect = fake_get_domain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
actual = drvr.get_diagnostics(instance)
expect = {'cpu0_time': 15340000000,
'cpu1_time': 1640000000,
'cpu2_time': 3040000000,
'cpu3_time': 1420000000,
'vda_read': 688640,
'vda_read_req': 169,
'vda_write': 0,
'vda_write_req': 0,
'vda_errors': -1,
'vdb_read': 688640,
'vdb_read_req': 169,
'vdb_write': 0,
'vdb_write_req': 0,
'vdb_errors': -1,
'memory': 280160,
'memory-actual': 220160,
'memory-rss': 200164,
'vnet0_rx': 4408,
'vnet0_rx_drop': 0,
'vnet0_rx_errors': 0,
'vnet0_rx_packets': 82,
'vnet0_tx': 0,
'vnet0_tx_drop': 0,
'vnet0_tx_errors': 0,
'vnet0_tx_packets': 0,
'br0_rx': 4408,
'br0_rx_drop': 0,
'br0_rx_errors': 0,
'br0_rx_packets': 82,
'br0_tx': 0,
'br0_tx_drop': 0,
'br0_tx_errors': 0,
'br0_tx_packets': 0,
}
self.assertEqual(actual, expect)
lt = datetime.datetime(2012, 11, 22, 12, 00, 00)
diags_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
mock_utcnow.return_value = diags_time
instance.launched_at = lt
actual = drvr.get_instance_diagnostics(instance)
expected = {'config_drive': False,
'cpu_details': [{'time': 15340000000},
{'time': 1640000000},
{'time': 3040000000},
{'time': 1420000000}],
'disk_details': [{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0},
{'errors_count': 0,
'id': '',
'read_bytes': 688640,
'read_requests': 169,
'write_bytes': 0,
'write_requests': 0}],
'driver': 'libvirt',
'hypervisor_os': 'linux',
'memory_details': {'maximum': 2048, 'used': 1234},
'nic_details': [{'mac_address': '52:54:00:a4:38:38',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0},
{'mac_address': '53:55:00:a5:39:39',
'rx_drop': 0,
'rx_errors': 0,
'rx_octets': 4408,
'rx_packets': 82,
'tx_drop': 0,
'tx_errors': 0,
'tx_octets': 0,
'tx_packets': 0}],
'state': 'running',
'uptime': 10.,
'version': '1.0'}
self.assertEqual(expected, actual.serialize())
@mock.patch.object(host.Host, "list_instance_domains")
def test_failing_vcpu_count(self, mock_list):
"""Domain can fail to return the vcpu description in case it's
just starting up or shutting down. Make sure None is handled
gracefully.
"""
class DiagFakeDomain(object):
def __init__(self, vcpus):
self._vcpus = vcpus
def vcpus(self):
if self._vcpus is None:
raise fakelibvirt.libvirtError("fake-error")
else:
return ([[1, 2, 3, 4]] * self._vcpus, [True] * self._vcpus)
def ID(self):
return 1
def name(self):
return "instance000001"
def UUIDString(self):
return "19479fee-07a5-49bb-9138-d3738280d63c"
mock_list.return_value = [
DiagFakeDomain(None), DiagFakeDomain(5)]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(5, drvr._get_vcpu_used())
mock_list.assert_called_with()
@mock.patch.object(host.Host, "list_instance_domains")
def test_failing_vcpu_count_none(self, mock_list):
"""Domain will return zero if the current number of vcpus used
is None. This is in case of VM state starting up or shutting
down. None type returned is counted as zero.
"""
class DiagFakeDomain(object):
def __init__(self):
pass
def vcpus(self):
return None
def ID(self):
return 1
def name(self):
return "instance000001"
mock_list.return_value = [DiagFakeDomain()]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(0, drvr._get_vcpu_used())
mock_list.assert_called_with()
def test_get_instance_capabilities(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
def get_host_capabilities_stub(self):
caps = vconfig.LibvirtConfigCaps()
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = arch.X86_64
guest.domtype = ['kvm', 'qemu']
caps.guests.append(guest)
guest = vconfig.LibvirtConfigGuest()
guest.ostype = 'hvm'
guest.arch = arch.I686
guest.domtype = ['kvm']
caps.guests.append(guest)
return caps
self.stubs.Set(host.Host, "get_capabilities",
get_host_capabilities_stub)
want = [(arch.X86_64, 'kvm', 'hvm'),
(arch.X86_64, 'qemu', 'hvm'),
(arch.I686, 'kvm', 'hvm')]
got = drvr._get_instance_capabilities()
self.assertEqual(want, got)
def test_set_cache_mode(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'directsync')
def test_set_cache_mode_invalid_mode(self):
self.flags(disk_cachemodes=['file=FAKE'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuestDisk()
fake_conf.source_type = 'file'
drvr._set_cache_mode(fake_conf)
self.assertIsNone(fake_conf.driver_cache)
def test_set_cache_mode_invalid_object(self):
self.flags(disk_cachemodes=['file=directsync'], group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
fake_conf = FakeConfigGuest()
fake_conf.driver_cache = 'fake'
drvr._set_cache_mode(fake_conf)
self.assertEqual(fake_conf.driver_cache, 'fake')
@mock.patch('os.unlink')
@mock.patch.object(os.path, 'exists')
def _test_shared_storage_detection(self, is_same,
mock_exists, mock_unlink):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
drvr.get_host_ip_addr = mock.MagicMock(return_value='bar')
mock_exists.return_value = is_same
with mock.patch('nova.utils.ssh_execute') as mock_ssh_method:
result = drvr._is_storage_shared_with('foo', '/path')
mock_ssh_method.assert_any_call('foo', 'touch', mock.ANY)
if is_same:
mock_unlink.assert_called_once_with(mock.ANY)
else:
self.assertEqual(2, mock_ssh_method.call_count)
mock_ssh_method.assert_called_with('foo', 'rm', mock.ANY)
return result
def test_shared_storage_detection_same_host(self):
self.assertTrue(self._test_shared_storage_detection(True))
def test_shared_storage_detection_different_host(self):
self.assertFalse(self._test_shared_storage_detection(False))
def test_shared_storage_detection_easy(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.mox.StubOutWithMock(drvr, 'get_host_ip_addr')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(os, 'unlink')
drvr.get_host_ip_addr().AndReturn('foo')
self.mox.ReplayAll()
self.assertTrue(drvr._is_storage_shared_with('foo', '/path'))
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_get_domain_info_with_more_return(self, mock_get_domain):
instance = objects.Instance(**self.test_instance)
dom_mock = mock.MagicMock()
dom_mock.info.return_value = [
1, 2048, 737, 8, 12345, 888888
]
dom_mock.ID.return_value = mock.sentinel.instance_id
mock_get_domain.return_value = dom_mock
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
info = drvr.get_info(instance)
self.assertEqual(1, info.state)
self.assertEqual(2048, info.max_mem_kb)
self.assertEqual(737, info.mem_kb)
self.assertEqual(8, info.num_cpu)
self.assertEqual(12345, info.cpu_time_ns)
self.assertEqual(mock.sentinel.instance_id, info.id)
dom_mock.info.assert_called_once_with()
dom_mock.ID.assert_called_once_with()
mock_get_domain.assert_called_once_with(instance)
def test_create_domain(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_domain = mock.MagicMock()
guest = drvr._create_domain(domain=mock_domain)
self.assertEqual(mock_domain, guest._domain)
mock_domain.createWithFlags.assert_has_calls([mock.call(0)])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('nova.openstack.common.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc(self, mock_get_inst_path, mock_ensure_tree,
mock_setup_container, mock_get_info, mock_clean):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call('/tmp/test.img',
container_dir='/tmp/rootfs',
use_cow=CONF.use_cow_images)
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.clean_lxc_namespace')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch.object(fake_libvirt_utils, 'chown_for_id_maps')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('nova.openstack.common.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_id_maps(self, mock_get_inst_path,
mock_ensure_tree, mock_setup_container,
mock_chown, mock_get_info, mock_clean):
self.flags(virt_type='lxc', uid_maps=["0:1000:100"],
gid_maps=["0:1000:100"], group='libvirt')
def chown_side_effect(path, id_maps):
self.assertEqual('/tmp/rootfs', path)
self.assertIsInstance(id_maps[0], vconfig.LibvirtConfigGuestUIDMap)
self.assertEqual(0, id_maps[0].start)
self.assertEqual(1000, id_maps[0].target)
self.assertEqual(100, id_maps[0].count)
self.assertIsInstance(id_maps[1], vconfig.LibvirtConfigGuestGIDMap)
self.assertEqual(0, id_maps[1].start)
self.assertEqual(1000, id_maps[1].target)
self.assertEqual(100, id_maps[1].count)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_chown.side_effect = chown_side_effect
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.RUNNING)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call('/tmp/test.img',
container_dir='/tmp/rootfs',
use_cow=CONF.use_cow_images)
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
mock_clean.assert_has_calls([mock.call(container_dir='/tmp/rootfs')])
@mock.patch('nova.virt.disk.api.teardown_container')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_info')
@mock.patch('nova.virt.disk.api.setup_container')
@mock.patch('nova.openstack.common.fileutils.ensure_tree')
@mock.patch.object(fake_libvirt_utils, 'get_instance_path')
def test_create_domain_lxc_not_running(self, mock_get_inst_path,
mock_ensure_tree,
mock_setup_container,
mock_get_info, mock_teardown):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
mock_instance = mock.MagicMock()
inst_sys_meta = dict()
mock_instance.system_metadata = inst_sys_meta
mock_get_inst_path.return_value = '/tmp/'
mock_image_backend = mock.MagicMock()
drvr.image_backend = mock_image_backend
mock_image = mock.MagicMock()
mock_image.path = '/tmp/test.img'
drvr.image_backend.image.return_value = mock_image
mock_setup_container.return_value = '/dev/nbd0'
mock_get_info.return_value = hardware.InstanceInfo(
state=power_state.SHUTDOWN)
with contextlib.nested(
mock.patch.object(drvr, '_create_images_and_backing'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver, 'prepare_instance_filter'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter')):
drvr._create_domain_and_network(self.context, 'xml',
mock_instance, [], None)
self.assertEqual('/dev/nbd0', inst_sys_meta['rootfs_device_name'])
self.assertFalse(mock_instance.called)
mock_get_inst_path.assert_has_calls([mock.call(mock_instance)])
mock_ensure_tree.assert_has_calls([mock.call('/tmp/rootfs')])
drvr.image_backend.image.assert_has_calls([mock.call(mock_instance,
'disk')])
setup_container_call = mock.call('/tmp/test.img',
container_dir='/tmp/rootfs',
use_cow=CONF.use_cow_images)
mock_setup_container.assert_has_calls([setup_container_call])
mock_get_info.assert_has_calls([mock.call(mock_instance)])
teardown_call = mock.call(container_dir='/tmp/rootfs')
mock_teardown.assert_has_calls([teardown_call])
def test_create_domain_define_xml_fails(self):
"""Tests that the xml is logged when defining the domain fails."""
fake_xml = "<test>this is a test</test>"
def fake_defineXML(xml):
self.assertEqual(fake_xml, xml)
raise fakelibvirt.libvirtError('virDomainDefineXML() failed')
def fake_safe_decode(text, *args, **kwargs):
return text + 'safe decoded'
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.assertIn('safe decoded', msg % args)
self.stubs.Set(encodeutils, 'safe_decode', fake_safe_decode)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock(defineXML=fake_defineXML)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
fake_xml)
self.assertTrue(self.log_error_called)
def test_create_domain_with_flags_fails(self):
"""Tests that the xml is logged when creating the domain with flags
fails
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_createWithFlags(launch_flags):
raise fakelibvirt.libvirtError('virDomainCreateWithFlags() failed')
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stubs.Set(fake_domain, 'createWithFlags', fake_createWithFlags)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertRaises(fakelibvirt.libvirtError, drvr._create_domain,
domain=fake_domain)
self.assertTrue(self.log_error_called)
def test_create_domain_enable_hairpin_fails(self):
"""Tests that the xml is logged when enabling hairpin mode for the
domain fails.
"""
fake_xml = "<test>this is a test</test>"
fake_domain = FakeVirtDomain(fake_xml)
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError('error')
def fake_get_interfaces(*args):
return ["dev"]
self.log_error_called = False
def fake_error(msg, *args, **kwargs):
self.log_error_called = True
self.assertIn(fake_xml, msg % args)
self.stubs.Set(nova.virt.libvirt.guest.LOG, 'error', fake_error)
self.create_fake_libvirt_mock()
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.stubs.Set(nova.utils, 'execute', fake_execute)
self.stubs.Set(
nova.virt.libvirt.guest.Guest, 'get_interfaces',
fake_get_interfaces)
self.assertRaises(processutils.ProcessExecutionError,
drvr._create_domain,
domain=fake_domain,
power_on=False)
self.assertTrue(self.log_error_called)
def test_get_vnc_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='vnc' port='5900'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
vnc_dict = drvr.get_vnc_console(self.context, instance)
self.assertEqual(vnc_dict.port, '5900')
def test_get_vnc_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_vnc_console, self.context, instance)
def test_get_spice_console(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<graphics type='spice' port='5950'/>"
"</devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
spice_dict = drvr.get_spice_console(self.context, instance)
self.assertEqual(spice_dict.port, '5950')
def test_get_spice_console_unavailable(self):
instance = objects.Instance(**self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices></devices></domain>")
vdmock = self.mox.CreateMock(fakelibvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
vdmock.XMLDesc(0).AndReturn(dummyxml)
def fake_lookup(instance_name):
if instance_name == instance['name']:
return vdmock
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ConsoleTypeUnavailable,
drvr.get_spice_console, self.context, instance)
def test_detach_volume_with_instance_not_found(self):
# Test that detach_volume() method does not raise exception,
# if the instance does not exist.
instance = objects.Instance(**self.test_instance)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(host.Host, 'get_domain',
side_effect=exception.InstanceNotFound(
instance_id=instance.name)),
mock.patch.object(drvr, '_disconnect_volume')
) as (_get_domain, _disconnect_volume):
connection_info = {'driver_volume_type': 'fake'}
drvr.detach_volume(connection_info, instance, '/dev/sda')
_get_domain.assert_called_once_with(instance)
_disconnect_volume.assert_called_once_with(connection_info,
'sda')
def _test_attach_detach_interface_get_config(self, method_name):
"""Tests that the get_config() method is properly called in
attach_interface() and detach_interface().
method_name: either \"attach_interface\" or \"detach_interface\"
depending on the method to test.
"""
self.stubs.Set(host.Host, "get_domain", lambda a, b: FakeVirtDomain())
instance = objects.Instance(**self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
if method_name == "attach_interface":
fake_image_meta = {'id': instance['image_ref']}
elif method_name == "detach_interface":
fake_image_meta = None
else:
raise ValueError("Unhandled method %s" % method_name)
if method_name == "attach_interface":
self.mox.StubOutWithMock(drvr.firewall_driver,
'setup_basic_filtering')
drvr.firewall_driver.setup_basic_filtering(instance, network_info)
expected = drvr.vif_driver.get_config(instance, network_info[0],
fake_image_meta,
instance.get_flavor(),
CONF.libvirt.virt_type)
self.mox.StubOutWithMock(drvr.vif_driver, 'get_config')
drvr.vif_driver.get_config(instance, network_info[0],
fake_image_meta,
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type).\
AndReturn(expected)
self.mox.ReplayAll()
if method_name == "attach_interface":
drvr.attach_interface(instance, fake_image_meta,
network_info[0])
elif method_name == "detach_interface":
drvr.detach_interface(instance, network_info[0])
else:
raise ValueError("Unhandled method %s" % method_name)
@mock.patch.object(lockutils, "external_lock")
def test_attach_interface_get_config(self, mock_lock):
"""Tests that the get_config() method is properly called in
attach_interface().
"""
mock_lock.return_value = threading.Semaphore()
self._test_attach_detach_interface_get_config("attach_interface")
def test_detach_interface_get_config(self):
"""Tests that the get_config() method is properly called in
detach_interface().
"""
self._test_attach_detach_interface_get_config("detach_interface")
def test_default_root_device_name(self):
instance = {'uuid': 'fake_instance'}
image_meta = {'id': 'fake'}
root_bdm = {'source_type': 'image',
'detination_type': 'volume',
'image_id': 'fake_id'}
self.flags(virt_type='fake_libvirt_type', group='libvirt')
self.mox.StubOutWithMock(blockinfo, 'get_disk_bus_for_device_type')
self.mox.StubOutWithMock(blockinfo, 'get_root_info')
blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
image_meta,
'disk').InAnyOrder().\
AndReturn('virtio')
blockinfo.get_disk_bus_for_device_type('fake_libvirt_type',
image_meta,
'cdrom').InAnyOrder().\
AndReturn('ide')
blockinfo.get_root_info('fake_libvirt_type',
image_meta, root_bdm,
'virtio', 'ide').AndReturn({'dev': 'vda'})
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.assertEqual(drvr.default_root_device_name(instance, image_meta,
root_bdm), '/dev/vda')
@mock.patch.object(driver, "get_block_device_info")
@mock.patch.object(blockinfo, "default_device_names")
@mock.patch.object(utils, "get_image_from_system_metadata")
def test_default_device_names_for_instance(
self, mock_meta, mock_devnames, mock_blockinfo):
instance = objects.Instance(**self.test_instance)
image_meta = {}
instance.root_device_name = '/dev/vda'
ephemerals = [{'device_name': 'vdb'}]
swap = [{'device_name': 'vdc'}]
block_device_mapping = [{'device_name': 'vdc'}]
self.flags(virt_type='fake_libvirt_type', group='libvirt')
mock_meta.return_value = image_meta
mock_blockinfo.return_value = 'fake-block-device-info'
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.default_device_names_for_instance(instance,
instance.root_device_name,
ephemerals, swap,
block_device_mapping,
image_meta)
mock_devnames.assert_called_once_with(
"fake_libvirt_type", mock.ANY,
instance, 'fake-block-device-info',
image_meta)
def test_is_supported_fs_format(self):
supported_fs = [disk.FS_FORMAT_EXT2, disk.FS_FORMAT_EXT3,
disk.FS_FORMAT_EXT4, disk.FS_FORMAT_XFS]
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertTrue(drvr.is_supported_fs_format(fs))
supported_fs = ['', 'dummy']
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
for fs in supported_fs:
self.assertFalse(drvr.is_supported_fs_format(fs))
def test_post_live_migration_at_destination_with_block_device_info(self):
# Preparing mocks
mock_domain = self.mox.CreateMock(fakelibvirt.virDomain)
self.resultXML = None
def fake_getLibVersion():
return 9011
def fake_getCapabilities():
return """
<capabilities>
<host>
<uuid>cef19ce0-0ca2-11df-855d-b19fbce37686</uuid>
<cpu>
<arch>x86_64</arch>
<model>Penryn</model>
<vendor>Intel</vendor>
<topology sockets='1' cores='2' threads='1'/>
<feature name='xtpr'/>
</cpu>
</host>
</capabilities>
"""
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
if image_meta is None:
image_meta = {}
conf = drvr._get_guest_config(instance, network_info, image_meta,
disk_info, rescue, block_device_info)
self.resultXML = conf.to_xml()
return self.resultXML
def fake_get_domain(instance):
return mock_domain
def fake_baselineCPU(cpu, flag):
return """<cpu mode='custom' match='exact'>
<model fallback='allow'>Westmere</model>
<vendor>Intel</vendor>
<feature policy='require' name='aes'/>
</cpu>
"""
network_info = _fake_network_info(self.stubs, 1)
self.create_fake_libvirt_mock(getLibVersion=fake_getLibVersion,
getCapabilities=fake_getCapabilities,
getVersion=lambda: 1005001,
listDefinedDomains=lambda: [],
numOfDomains=lambda: 0,
baselineCPU=fake_baselineCPU)
instance_ref = self.test_instance
instance_ref['image_ref'] = 123456 # we send an int to test sha1 call
instance = objects.Instance(**instance_ref)
self.mox.ReplayAll()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(drvr,
'_get_guest_xml',
fake_to_xml)
self.stubs.Set(host.Host,
'get_domain',
fake_get_domain)
block_device_info = {'block_device_mapping':
driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict(
{'id': 1, 'guest_format': None,
'boot_index': 0,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vda',
'disk_bus': 'virtio',
'device_type': 'disk',
'delete_on_termination': False}),
])}
block_device_info['block_device_mapping'][0]['connection_info'] = (
{'driver_volume_type': 'iscsi'})
with contextlib.nested(
mock.patch.object(
driver_block_device.DriverVolumeBlockDevice, 'save'),
mock.patch.object(objects.Instance, 'save')
) as (mock_volume_save, mock_instance_save):
drvr.post_live_migration_at_destination(
self.context, instance, network_info, True,
block_device_info=block_device_info)
self.assertIn('fake', self.resultXML)
mock_volume_save.assert_called_once_with()
def test_create_propagates_exceptions(self):
self.flags(virt_type='lxc', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(id=1, uuid='fake-uuid',
image_ref='my_fake_image')
with contextlib.nested(
mock.patch.object(drvr, '_create_domain_setup_lxc'),
mock.patch.object(drvr, '_create_domain_cleanup_lxc'),
mock.patch.object(drvr, '_is_booted_from_volume',
return_value=False),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain',
side_effect=exception.NovaException),
mock.patch.object(drvr, 'cleanup')):
self.assertRaises(exception.NovaException,
drvr._create_domain_and_network,
self.context,
'xml',
instance, None, None)
def test_create_without_pause(self):
self.flags(virt_type='lxc', group='libvirt')
@contextlib.contextmanager
def fake_lxc_disk_handler(*args, **kwargs):
yield
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
with contextlib.nested(
mock.patch.object(drvr, '_lxc_disk_handler',
side_effect=fake_lxc_disk_handler),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr, 'firewall_driver'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr, 'cleanup')) as (
_handler, cleanup, firewall_driver, create, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, None, None)
self.assertEqual(0, create.call_args_list[0][1]['pause'])
self.assertEqual(0, domain.resume.call_count)
def _test_create_with_network_events(self, neutron_failure=None,
power_on=True):
generated_events = []
def wait_timeout():
event = mock.MagicMock()
if neutron_failure == 'timeout':
raise eventlet.timeout.Timeout()
elif neutron_failure == 'error':
event.status = 'failed'
else:
event.status = 'completed'
return event
def fake_prepare(instance, event_name):
m = mock.MagicMock()
m.instance = instance
m.event_name = event_name
m.wait.side_effect = wait_timeout
generated_events.append(m)
return m
virtapi = manager.ComputeVirtAPI(mock.MagicMock())
prepare = virtapi._compute.instance_events.prepare_for_instance_event
prepare.side_effect = fake_prepare
drvr = libvirt_driver.LibvirtDriver(virtapi, False)
instance = objects.Instance(**self.test_instance)
vifs = [{'id': 'vif1', 'active': False},
{'id': 'vif2', 'active': False}]
@mock.patch.object(drvr, 'plug_vifs')
@mock.patch.object(drvr, 'firewall_driver')
@mock.patch.object(drvr, '_create_domain')
@mock.patch.object(drvr, 'cleanup')
def test_create(cleanup, create, fw_driver, plug_vifs):
domain = drvr._create_domain_and_network(self.context, 'xml',
instance, vifs, None,
power_on=power_on)
plug_vifs.assert_called_with(instance, vifs)
pause = self._get_pause_flag(drvr, vifs, power_on=power_on)
self.assertEqual(pause,
create.call_args_list[0][1]['pause'])
if pause:
domain.resume.assert_called_once_with()
if neutron_failure and CONF.vif_plugging_is_fatal:
cleanup.assert_called_once_with(self.context,
instance, network_info=vifs,
block_device_info=None)
test_create()
if utils.is_neutron() and CONF.vif_plugging_timeout and power_on:
prepare.assert_has_calls([
mock.call(instance, 'network-vif-plugged-vif1'),
mock.call(instance, 'network-vif-plugged-vif2')])
for event in generated_events:
if neutron_failure and generated_events.index(event) != 0:
self.assertEqual(0, event.call_count)
elif (neutron_failure == 'error' and
not CONF.vif_plugging_is_fatal):
event.wait.assert_called_once_with()
else:
self.assertEqual(0, prepare.call_count)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron(self, is_neutron):
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_power_off(self,
is_neutron):
# Tests that we don't wait for events if we don't start the instance.
self._test_create_with_network_events(power_on=False)
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_nowait(self, is_neutron):
self.flags(vif_plugging_timeout=0)
self._test_create_with_network_events()
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_timeout(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_timeout(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='timeout')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_nonfatal_error(
self, is_neutron):
self.flags(vif_plugging_is_fatal=False)
self._test_create_with_network_events(neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=True)
def test_create_with_network_events_neutron_failed_fatal_error(
self, is_neutron):
self.assertRaises(exception.VirtualInterfaceCreateException,
self._test_create_with_network_events,
neutron_failure='error')
@mock.patch('nova.utils.is_neutron', return_value=False)
def test_create_with_network_events_non_neutron(self, is_neutron):
self._test_create_with_network_events()
@mock.patch('nova.volume.encryptors.get_encryption_metadata')
@mock.patch('nova.virt.libvirt.blockinfo.get_info_from_bdm')
def test_create_with_bdm(self, get_info_from_bdm, get_encryption_metadata):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
mock_dom = mock.MagicMock()
mock_encryption_meta = mock.MagicMock()
get_encryption_metadata.return_value = mock_encryption_meta
fake_xml = """
<domain>
<name>instance-00000001</name>
<memory>1048576</memory>
<vcpu>1</vcpu>
<devices>
<disk type='file' device='disk'>
<driver name='qemu' type='raw' cache='none'/>
<source file='/path/fake-volume1'/>
<target dev='vda' bus='virtio'/>
</disk>
</devices>
</domain>
"""
fake_volume_id = "fake-volume-id"
connection_info = {"driver_volume_type": "fake",
"data": {"access_mode": "rw",
"volume_id": fake_volume_id}}
def fake_getitem(*args, **kwargs):
fake_bdm = {'connection_info': connection_info,
'mount_device': '/dev/vda'}
return fake_bdm.get(args[0])
mock_volume = mock.MagicMock()
mock_volume.__getitem__.side_effect = fake_getitem
block_device_info = {'block_device_mapping': [mock_volume]}
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
with contextlib.nested(
mock.patch.object(drvr, '_get_volume_encryptor'),
mock.patch.object(drvr, 'plug_vifs'),
mock.patch.object(drvr.firewall_driver, 'setup_basic_filtering'),
mock.patch.object(drvr.firewall_driver,
'prepare_instance_filter'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(drvr.firewall_driver, 'apply_instance_filter'),
) as (get_volume_encryptor, plug_vifs, setup_basic_filtering,
prepare_instance_filter, create_domain, apply_instance_filter):
create_domain.return_value = libvirt_guest.Guest(mock_dom)
guest = drvr._create_domain_and_network(
self.context, fake_xml, instance, network_info, None,
block_device_info=block_device_info)
get_encryption_metadata.assert_called_once_with(self.context,
drvr._volume_api, fake_volume_id, connection_info)
get_volume_encryptor.assert_called_once_with(connection_info,
mock_encryption_meta)
plug_vifs.assert_called_once_with(instance, network_info)
setup_basic_filtering.assert_called_once_with(instance,
network_info)
prepare_instance_filter.assert_called_once_with(instance,
network_info)
pause = self._get_pause_flag(drvr, network_info)
create_domain.assert_called_once_with(
fake_xml, pause=pause, power_on=True)
self.assertEqual(mock_dom, guest._domain)
def test_get_guest_storage_config(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
test_instance = copy.deepcopy(self.test_instance)
test_instance["default_swap_device"] = None
instance = objects.Instance(**test_instance)
image_meta = {}
flavor = instance.get_flavor()
conn_info = {'driver_volume_type': 'fake', 'data': {}}
bdi = {'block_device_mapping':
driver_block_device.convert_volumes([
fake_block_device.FakeDbBlockDeviceDict({
'id': 1,
'source_type': 'volume',
'destination_type': 'volume',
'device_name': '/dev/vdc'})
])}
bdm = bdi['block_device_mapping'][0]
bdm['connection_info'] = conn_info
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
bdi)
mock_conf = mock.MagicMock(source_path='fake')
with contextlib.nested(
mock.patch.object(driver_block_device.DriverVolumeBlockDevice,
'save'),
mock.patch.object(drvr, '_connect_volume'),
mock.patch.object(drvr, '_get_volume_config',
return_value=mock_conf),
mock.patch.object(drvr, '_set_cache_mode')
) as (volume_save, connect_volume, get_volume_config, set_cache_mode):
devices = drvr._get_guest_storage_config(instance, None,
disk_info, False, bdi, flavor, "hvm")
self.assertEqual(3, len(devices))
self.assertEqual('/dev/vdb', instance.default_ephemeral_device)
self.assertIsNone(instance.default_swap_device)
connect_volume.assert_called_with(bdm['connection_info'],
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
get_volume_config.assert_called_with(bdm['connection_info'],
{'bus': 'virtio', 'type': 'disk', 'dev': 'vdc'})
volume_save.assert_called_once_with()
self.assertEqual(3, set_cache_mode.call_count)
def test_get_neutron_events(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
network_info = [network_model.VIF(id='1'),
network_model.VIF(id='2', active=True)]
events = drvr._get_neutron_events(network_info)
self.assertEqual([('network-vif-plugged', '1')], events)
def test_unplug_vifs_ignores_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
drvr._unplug_vifs('inst', [1], ignore_errors=True)
vif_driver.unplug.assert_called_once_with('inst', 1)
def test_unplug_vifs_reports_errors(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
with mock.patch.object(drvr, 'vif_driver') as vif_driver:
vif_driver.unplug.side_effect = exception.AgentError(
method='unplug')
self.assertRaises(exception.AgentError,
drvr.unplug_vifs, 'inst', [1])
vif_driver.unplug.assert_called_once_with('inst', 1)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_pass_with_no_mount_device(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = mock.Mock()
drvr._disconnect_volume = mock.Mock()
fake_inst = {'name': 'foo'}
fake_bdms = [{'connection_info': 'foo',
'mount_device': None}]
with mock.patch('nova.virt.driver'
'.block_device_info_get_mapping',
return_value=fake_bdms):
drvr.cleanup('ctxt', fake_inst, 'netinfo', destroy_disks=False)
self.assertTrue(drvr._disconnect_volume.called)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._unplug_vifs')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_wants_vif_errors_ignored(self, undefine, unplug):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
fake_inst = {'name': 'foo'}
with mock.patch.object(drvr._conn, 'lookupByName') as lookup:
lookup.return_value = fake_inst
# NOTE(danms): Make unplug cause us to bail early, since
# we only care about how it was called
unplug.side_effect = test.TestingException
self.assertRaises(test.TestingException,
drvr.cleanup, 'ctxt', fake_inst, 'netinfo')
unplug.assert_called_once_with(fake_inst, 'netinfo', True)
@mock.patch('nova.virt.driver.block_device_info_get_mapping')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.'
'_get_serial_ports_from_instance')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._undefine_domain')
def test_cleanup_serial_console_enabled(
self, undefine, get_ports,
block_device_info_get_mapping):
self.flags(enabled="True", group='serial_console')
instance = 'i1'
network_info = {}
bdm_info = {}
firewall_driver = mock.MagicMock()
get_ports.return_value = iter([('127.0.0.1', 10000)])
block_device_info_get_mapping.return_value = ()
# We want to ensure undefine_domain is called after
# lookup_domain.
def undefine_domain(instance):
get_ports.side_effect = Exception("domain undefined")
undefine.side_effect = undefine_domain
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
drvr.firewall_driver = firewall_driver
drvr.cleanup(
'ctx', instance, network_info,
block_device_info=bdm_info,
destroy_disks=False, destroy_vifs=False)
get_ports.assert_called_once_with(instance)
undefine.assert_called_once_with(instance)
firewall_driver.unfilter_instance.assert_called_once_with(
instance, network_info=network_info)
block_device_info_get_mapping.assert_called_once_with(bdm_info)
def test_swap_volume(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
with mock.patch.object(drvr._conn, 'defineXML',
create=True) as mock_define:
xmldoc = "<domain/>"
srcfile = "/first/path"
dstfile = "/second/path"
mock_dom.XMLDesc.return_value = xmldoc
mock_dom.isPersistent.return_value = True
mock_dom.blockJobInfo.return_value = {}
drvr._swap_volume(mock_dom, srcfile, dstfile, 1)
mock_dom.XMLDesc.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE)
mock_dom.blockRebase.assert_called_once_with(
srcfile, dstfile, 0,
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT)
mock_dom.blockResize.assert_called_once_with(
srcfile, 1 * units.Gi / units.Ki)
mock_define.assert_called_once_with(xmldoc)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._swap_volume')
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_id')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._get_volume_config')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._connect_volume')
@mock.patch('nova.virt.libvirt.host.Host.get_domain')
def test_swap_volume_driver_bdm_save(self, get_domain,
connect_volume, get_volume_config,
get_by_volume_id, volume_save,
swap_volume, disconnect_volume):
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
instance = objects.Instance(**self.test_instance)
old_connection_info = {'driver_volume_type': 'fake',
'serial': 'old-volume-id',
'data': {'device_path': '/fake-old-volume',
'access_mode': 'rw'}}
new_connection_info = {'driver_volume_type': 'fake',
'serial': 'new-volume-id',
'data': {'device_path': '/fake-new-volume',
'access_mode': 'rw'}}
mock_dom = mock.MagicMock()
mock_dom.XMLDesc.return_value = """<domain>
<devices>
<disk type='file'>
<source file='/fake-old-volume'/>
<target dev='vdb' bus='virtio'/>
</disk>
</devices>
</domain>
"""
get_domain.return_value = mock_dom
disk_info = {'bus': 'virtio', 'type': 'disk', 'dev': 'vdb'}
get_volume_config.return_value = mock.MagicMock(
source_path='/fake-new-volume')
bdm = objects.BlockDeviceMapping(self.context,
**fake_block_device.FakeDbBlockDeviceDict(
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/vdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-2',
'boot_index': 0}))
get_by_volume_id.return_value = bdm
conn.swap_volume(old_connection_info, new_connection_info, instance,
'/dev/vdb', 1)
get_domain.assert_called_once_with(instance)
connect_volume.assert_called_once_with(new_connection_info, disk_info)
swap_volume.assert_called_once_with(mock_dom, 'vdb',
'/fake-new-volume', 1)
disconnect_volume.assert_called_once_with(old_connection_info, 'vdb')
volume_save.assert_called_once_with()
def test_live_snapshot(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI())
mock_dom = mock.MagicMock()
with contextlib.nested(
mock.patch.object(drvr._conn, 'defineXML', create=True),
mock.patch.object(fake_libvirt_utils, 'get_disk_size'),
mock.patch.object(fake_libvirt_utils, 'get_disk_backing_file'),
mock.patch.object(fake_libvirt_utils, 'create_cow_image'),
mock.patch.object(fake_libvirt_utils, 'chown'),
mock.patch.object(fake_libvirt_utils, 'extract_snapshot'),
) as (mock_define, mock_size, mock_backing, mock_create_cow,
mock_chown, mock_snapshot):
xmldoc = "<domain/>"
srcfile = "/first/path"
dstfile = "/second/path"
bckfile = "/other/path"
dltfile = dstfile + ".delta"
mock_dom.XMLDesc.return_value = xmldoc
mock_dom.isPersistent.return_value = True
mock_size.return_value = 1004009
mock_backing.return_value = bckfile
drvr._live_snapshot(self.context, self.test_instance, mock_dom,
srcfile, dstfile, "qcow2", {})
mock_dom.XMLDesc.assert_called_once_with(
fakelibvirt.VIR_DOMAIN_XML_INACTIVE |
fakelibvirt.VIR_DOMAIN_XML_SECURE)
mock_dom.blockRebase.assert_called_once_with(
srcfile, dltfile, 0,
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_COPY |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT |
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
mock_size.assert_called_once_with(srcfile)
mock_backing.assert_called_once_with(srcfile, basename=False)
mock_create_cow.assert_called_once_with(bckfile, dltfile, 1004009)
mock_chown.assert_called_once_with(dltfile, os.getuid())
mock_snapshot.assert_called_once_with(dltfile, "qcow2",
dstfile, "qcow2")
mock_define.assert_called_once_with(xmldoc)
@mock.patch.object(greenthread, "spawn")
def test_live_migration_hostname_valid(self, mock_spawn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
drvr.live_migration(self.context, self.test_instance,
"host1.example.com",
lambda x: x,
lambda x: x)
self.assertEqual(1, mock_spawn.call_count)
@mock.patch.object(greenthread, "spawn")
@mock.patch.object(fake_libvirt_utils, "is_valid_hostname")
def test_live_migration_hostname_invalid(self, mock_hostname, mock_spawn):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
mock_hostname.return_value = False
self.assertRaises(exception.InvalidHostname,
drvr.live_migration,
self.context, self.test_instance,
"foo/?com=/bin/sh",
lambda x: x,
lambda x: x)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('tempfile.mkstemp')
@mock.patch('os.close', return_value=None)
def test_check_instance_shared_storage_local_raw(self,
mock_close,
mock_mkstemp,
mock_exists):
instance_uuid = str(uuid.uuid4())
self.flags(images_type='raw', group='libvirt')
self.flags(instances_path='/tmp')
mock_mkstemp.return_value = (-1,
'/tmp/{0}/file'.format(instance_uuid))
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
temp_file = driver.check_instance_shared_storage_local(self.context,
instance)
self.assertEqual('/tmp/{0}/file'.format(instance_uuid),
temp_file['filename'])
def test_check_instance_shared_storage_local_rbd(self):
self.flags(images_type='rbd', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(**self.test_instance)
self.assertIsNone(driver.
check_instance_shared_storage_local(self.context,
instance))
def test_version_to_string(self):
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
string_ver = driver._version_to_string((4, 33, 173))
self.assertEqual("4.33.173", string_ver)
def test_parallels_min_version_fail(self):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(driver._conn, 'getLibVersion',
return_value=1002011):
self.assertRaises(exception.NovaException,
driver.init_host, 'wibble')
def test_parallels_min_version_ok(self):
self.flags(virt_type='parallels', group='libvirt')
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(driver._conn, 'getLibVersion',
return_value=1002012):
driver.init_host('wibble')
def test_get_guest_config_parallels_vm(self):
self.flags(virt_type='parallels', group='libvirt')
self.flags(images_type='ploop', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
instance_ref = objects.Instance(**self.test_instance)
image_meta = {}
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance_ref,
image_meta)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, disk_info)
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.HVM, cfg.os_type)
self.assertIsNone(cfg.os_root)
self.assertEqual(6, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestDisk)
self.assertEqual(cfg.devices[0].driver_format, "ploop")
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestDisk)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestInput)
self.assertIsInstance(cfg.devices[4],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[5],
vconfig.LibvirtConfigGuestVideo)
def test_get_guest_config_parallels_ct(self):
self.flags(virt_type='parallels', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
ct_instance = self.test_instance.copy()
ct_instance["vm_mode"] = vm_mode.EXE
instance_ref = objects.Instance(**ct_instance)
cfg = drvr._get_guest_config(instance_ref,
_fake_network_info(self.stubs, 1),
None, {'mapping': {}})
self.assertEqual("parallels", cfg.virt_type)
self.assertEqual(instance_ref["uuid"], cfg.uuid)
self.assertEqual(2 * units.Mi, cfg.memory)
self.assertEqual(1, cfg.vcpus)
self.assertEqual(vm_mode.EXE, cfg.os_type)
self.assertEqual("/sbin/init", cfg.os_init_path)
self.assertIsNone(cfg.os_root)
self.assertEqual(4, len(cfg.devices))
self.assertIsInstance(cfg.devices[0],
vconfig.LibvirtConfigGuestFilesys)
fs = cfg.devices[0]
self.assertEqual(fs.source_type, "file")
self.assertEqual(fs.driver_type, "ploop")
self.assertEqual(fs.target_dir, "/")
self.assertIsInstance(cfg.devices[1],
vconfig.LibvirtConfigGuestInterface)
self.assertIsInstance(cfg.devices[2],
vconfig.LibvirtConfigGuestGraphics)
self.assertIsInstance(cfg.devices[3],
vconfig.LibvirtConfigGuestVideo)
class HostStateTestCase(test.NoDBTestCase):
cpu_info = {"vendor": "Intel", "model": "pentium", "arch": "i686",
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov", "mca", "pge",
"mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}}
instance_caps = [(arch.X86_64, "kvm", "hvm"),
(arch.I686, "kvm", "hvm")]
pci_devices = [{
"dev_id": "pci_0000_04_00_3",
"address": "0000:04:10.3",
"product_id": '1521',
"vendor_id": '8086',
"dev_type": 'type-PF',
"phys_function": None}]
numa_topology = objects.NUMATopology(
cells=[objects.NUMACell(
id=1, cpuset=set([1, 2]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=2, cpuset=set([3, 4]), memory=1024,
cpu_usage=0, memory_usage=0,
mempages=[], siblings=[],
pinned_cpus=set([]))])
class FakeConnection(libvirt_driver.LibvirtDriver):
"""Fake connection object."""
def __init__(self):
super(HostStateTestCase.FakeConnection,
self).__init__(fake.FakeVirtAPI(), True)
self._host = host.Host("qemu:///system")
def _get_memory_mb_total():
return 497
def _get_memory_mb_used():
return 88
self._host.get_memory_mb_total = _get_memory_mb_total
self._host.get_memory_mb_used = _get_memory_mb_used
def _get_vcpu_total(self):
return 1
def _get_vcpu_used(self):
return 0
def _get_cpu_info(self):
return HostStateTestCase.cpu_info
def _get_disk_over_committed_size_total(self):
return 0
def _get_local_gb_info(self):
return {'total': 100, 'used': 20, 'free': 80}
def get_host_uptime(self):
return ('10:01:16 up 1:36, 6 users, '
'load average: 0.21, 0.16, 0.19')
def _get_disk_available_least(self):
return 13091
def _get_instance_capabilities(self):
return HostStateTestCase.instance_caps
def _get_pci_passthrough_devices(self):
return jsonutils.dumps(HostStateTestCase.pci_devices)
def _get_host_numa_topology(self):
return HostStateTestCase.numa_topology
@mock.patch.object(fakelibvirt, "openAuth")
def test_update_status(self, mock_open):
mock_open.return_value = fakelibvirt.Connection("qemu:///system")
drvr = HostStateTestCase.FakeConnection()
stats = drvr.get_available_resource("compute1")
self.assertEqual(stats["vcpus"], 1)
self.assertEqual(stats["memory_mb"], 497)
self.assertEqual(stats["local_gb"], 100)
self.assertEqual(stats["vcpus_used"], 0)
self.assertEqual(stats["memory_mb_used"], 88)
self.assertEqual(stats["local_gb_used"], 20)
self.assertEqual(stats["hypervisor_type"], 'QEMU')
self.assertEqual(stats["hypervisor_version"], 1001000)
self.assertEqual(stats["hypervisor_hostname"], 'compute1')
cpu_info = jsonutils.loads(stats["cpu_info"])
self.assertEqual(cpu_info,
{"vendor": "Intel", "model": "pentium",
"arch": arch.I686,
"features": ["ssse3", "monitor", "pni", "sse2", "sse",
"fxsr", "clflush", "pse36", "pat", "cmov",
"mca", "pge", "mtrr", "sep", "apic"],
"topology": {"cores": "1", "threads": "1", "sockets": "1"}
})
self.assertEqual(stats["disk_available_least"], 80)
self.assertEqual(jsonutils.loads(stats["pci_passthrough_devices"]),
HostStateTestCase.pci_devices)
self.assertThat(objects.NUMATopology.obj_from_db_obj(
stats['numa_topology'])._to_dict(),
matchers.DictMatches(
HostStateTestCase.numa_topology._to_dict()))
class LibvirtDriverTestCase(test.NoDBTestCase):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtDriver."""
def setUp(self):
super(LibvirtDriverTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(
fake.FakeVirtAPI(), read_only=True)
self.context = context.get_admin_context()
def _create_instance(self, params=None):
"""Create a test instance."""
if not params:
params = {}
flavor = objects.Flavor(memory_mb=512,
swap=0,
vcpu_weight=None,
root_gb=10,
id=2,
name=u'm1.tiny',
ephemeral_gb=20,
rxtx_factor=1.0,
flavorid=u'1',
vcpus=1)
inst = {}
inst['id'] = 1
inst['uuid'] = '52d3b512-1152-431f-a8f7-28f0288a622b'
inst['os_type'] = 'linux'
inst['image_ref'] = '1'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = 2
inst['ami_launch_index'] = 0
inst['host'] = 'host1'
inst['root_gb'] = flavor.root_gb
inst['ephemeral_gb'] = flavor.ephemeral_gb
inst['config_drive'] = True
inst['kernel_id'] = 2
inst['ramdisk_id'] = 3
inst['key_data'] = 'ABCDEFG'
inst['system_metadata'] = {}
inst['metadata'] = {}
inst.update(params)
return objects.Instance(flavor=flavor,
old_flavor=None, new_flavor=None,
**inst)
@staticmethod
def _disk_info():
# 10G root and 512M swap disk
disk_info = [{'disk_size': 1, 'type': 'qcow2',
'virt_disk_size': 10737418240, 'path': '/test/disk',
'backing_file': '/base/disk'},
{'disk_size': 1, 'type': 'qcow2',
'virt_disk_size': 536870912, 'path': '/test/disk.swap',
'backing_file': '/base/swap_512'}]
return jsonutils.dumps(disk_info)
def test_migrate_disk_and_power_off_exception(self):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
self.counter = 0
self.checked_shared_storage = False
def fake_get_instance_disk_info(instance,
block_device_info=None):
return '[]'
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
self.counter += 1
if self.counter == 1:
assert False, "intentional failure"
def fake_os_path_exists(path):
return True
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self.assertRaises(AssertionError,
self.drvr.migrate_disk_and_power_off,
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None)
def _test_migrate_disk_and_power_off(self, flavor_obj,
block_device_info=None,
params_for_instance=None):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
disk_info = self._disk_info()
def fake_get_instance_disk_info(instance,
block_device_info=None):
return disk_info
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
self.stubs.Set(utils, 'execute', fake_execute)
ins_ref = self._create_instance(params=params_for_instance)
# dest is different host case
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), ins_ref, '10.0.0.2',
flavor_obj, None, block_device_info=block_device_info)
self.assertEqual(out, disk_info)
# dest is same host case
out = self.drvr.migrate_disk_and_power_off(
context.get_admin_context(), ins_ref, '10.0.0.1',
flavor_obj, None, block_device_info=block_device_info)
self.assertEqual(out, disk_info)
def test_migrate_disk_and_power_off(self):
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(flavor_obj)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._disconnect_volume')
def test_migrate_disk_and_power_off_boot_from_volume(self,
disconnect_volume):
info = {'block_device_mapping': [{'boot_index': None,
'mount_device': '/dev/vdd',
'connection_info': None},
{'boot_index': 0,
'mount_device': '/dev/vda',
'connection_info': None}]}
flavor = {'root_gb': 1, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
# Note(Mike_D): The size of instance's ephemeral_gb is 0 gb.
self._test_migrate_disk_and_power_off(
flavor_obj, block_device_info=info,
params_for_instance={'image_ref': None, 'ephemeral_gb': 0})
disconnect_volume.assert_called_with(
info['block_device_mapping'][1]['connection_info'], 'vda')
@mock.patch('nova.utils.execute')
@mock.patch('nova.virt.libvirt.utils.copy_image')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver._destroy')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver.get_host_ip_addr')
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_swap(self, mock_get_disk_info,
get_host_ip_addr,
mock_destroy,
mock_copy_image,
mock_execute):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.migrate_disk_and_power_off.
"""
self.copy_or_move_swap_called = False
disk_info = self._disk_info()
mock_get_disk_info.return_value = disk_info
get_host_ip_addr.return_value = '10.0.0.1'
def fake_copy_image(*args, **kwargs):
# disk.swap should not be touched since it is skipped over
if '/test/disk.swap' in list(args):
self.copy_or_move_swap_called = True
def fake_execute(*args, **kwargs):
# disk.swap should not be touched since it is skipped over
if set(['mv', '/test/disk.swap']).issubset(list(args)):
self.copy_or_move_swap_called = True
mock_copy_image.side_effect = fake_copy_image
mock_execute.side_effect = fake_execute
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
# Original instance config
instance = self._create_instance({'root_gb': 10,
'ephemeral_gb': 0})
# Re-size fake instance to 20G root and 1024M swap disk
flavor = {'root_gb': 20, 'ephemeral_gb': 0, 'swap': 1024}
flavor_obj = objects.Flavor(**flavor)
# Destination is same host
out = drvr.migrate_disk_and_power_off(context.get_admin_context(),
instance, '10.0.0.1',
flavor_obj, None)
mock_get_disk_info.assert_called_once_with(instance,
block_device_info=None)
self.assertTrue(get_host_ip_addr.called)
mock_destroy.assert_called_once_with(instance)
self.assertFalse(self.copy_or_move_swap_called)
self.assertEqual(disk_info, out)
def _test_migrate_disk_and_power_off_resize_check(self, expected_exc):
"""Test for nova.virt.libvirt.libvirt_driver.LibvirtConnection
.migrate_disk_and_power_off.
"""
def fake_get_instance_disk_info(instance, xml=None,
block_device_info=None):
return self._disk_info()
def fake_destroy(instance):
pass
def fake_get_host_ip_addr():
return '10.0.0.1'
self.stubs.Set(self.drvr, 'get_instance_disk_info',
fake_get_instance_disk_info)
self.stubs.Set(self.drvr, '_destroy', fake_destroy)
self.stubs.Set(self.drvr, 'get_host_ip_addr',
fake_get_host_ip_addr)
ins_ref = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 20}
flavor_obj = objects.Flavor(**flavor)
# Migration is not implemented for LVM backed instances
self.assertRaises(expected_exc,
self.drvr.migrate_disk_and_power_off,
None, ins_ref, '10.0.0.1', flavor_obj, None)
def test_migrate_disk_and_power_off_lvm(self):
self.flags(images_type='lvm', group='libvirt')
def fake_execute(*args, **kwargs):
pass
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
def test_migrate_disk_and_power_off_resize_cannot_ssh(self):
def fake_execute(*args, **kwargs):
raise processutils.ProcessExecutionError()
def fake_is_storage_shared(dest, inst_base):
self.checked_shared_storage = True
return False
self.stubs.Set(self.drvr, '_is_storage_shared_with',
fake_is_storage_shared)
self.stubs.Set(utils, 'execute', fake_execute)
expected_exc = exception.InstanceFaultRollback
self._test_migrate_disk_and_power_off_resize_check(expected_exc)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error(self, mock_get_disk_info):
instance = self._create_instance()
flavor = {'root_gb': 5, 'ephemeral_gb': 10}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
def test_migrate_disk_and_power_off_resize_error_default_ephemeral(
self, mock_get_disk_info):
# Note(Mike_D): The size of this instance's ephemeral_gb is 20 gb.
instance = self._create_instance()
flavor = {'root_gb': 10, 'ephemeral_gb': 0}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
@mock.patch('nova.virt.libvirt.driver.LibvirtDriver'
'.get_instance_disk_info')
@mock.patch('nova.virt.driver.block_device_info_get_ephemerals')
def test_migrate_disk_and_power_off_resize_error_eph(self, mock_get,
mock_get_disk_info):
mappings = [
{
'device_name': '/dev/sdb4',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': 'swap',
'boot_index': -1,
'volume_size': 1
},
{
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'device_type': 'disk',
'volume_id': 1,
'guest_format': None,
'boot_index': 1,
'volume_size': 6
},
{
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': 1,
'device_type': 'disk',
'guest_format': None,
'boot_index': 0,
'volume_size': 4
},
{
'device_name': '/dev/sda3',
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_size': 3
}
]
mock_get.return_value = mappings
instance = self._create_instance()
# Old flavor, eph is 20, real disk is 3, target is 2, fail
flavor = {'root_gb': 10, 'ephemeral_gb': 2}
flavor_obj = objects.Flavor(**flavor)
mock_get_disk_info.return_value = self._disk_info()
self.assertRaises(
exception.InstanceFaultRollback,
self.drvr.migrate_disk_and_power_off,
'ctx', instance, '10.0.0.1', flavor_obj, None)
# Old flavor, eph is 20, real disk is 3, target is 4
flavor = {'root_gb': 10, 'ephemeral_gb': 4}
flavor_obj = objects.Flavor(**flavor)
self._test_migrate_disk_and_power_off(flavor_obj)
def test_wait_for_running(self):
def fake_get_info(instance):
if instance['name'] == "not_found":
raise exception.InstanceNotFound(instance_id=instance['uuid'])
elif instance['name'] == "running":
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
# instance not found case
self.assertRaises(exception.InstanceNotFound,
self.drvr._wait_for_running,
{'name': 'not_found',
'uuid': 'not_found_uuid'})
# instance is running case
self.assertRaises(loopingcall.LoopingCallDone,
self.drvr._wait_for_running,
{'name': 'running',
'uuid': 'running_uuid'})
# else case
self.drvr._wait_for_running({'name': 'else',
'uuid': 'other_uuid'})
def test_disk_size_from_instance_disk_info(self):
instance_data = {'root_gb': 10, 'ephemeral_gb': 20, 'swap_gb': 30}
inst = objects.Instance(**instance_data)
info = {'path': '/path/disk'}
self.assertEqual(10 * units.Gi,
self.drvr._disk_size_from_instance(inst, info))
info = {'path': '/path/disk.local'}
self.assertEqual(20 * units.Gi,
self.drvr._disk_size_from_instance(inst, info))
info = {'path': '/path/disk.swap'}
self.assertEqual(0,
self.drvr._disk_size_from_instance(inst, info))
@mock.patch('nova.utils.execute')
def test_disk_raw_to_qcow2(self, mock_execute):
path = '/test/disk'
_path_qcow = path + '_qcow'
self.drvr._disk_raw_to_qcow2(path)
mock_execute.assert_has_calls([
mock.call('qemu-img', 'convert', '-f', 'raw',
'-O', 'qcow2', path, _path_qcow),
mock.call('mv', _path_qcow, path)])
@mock.patch('nova.utils.execute')
def test_disk_qcow2_to_raw(self, mock_execute):
path = '/test/disk'
_path_raw = path + '_raw'
self.drvr._disk_qcow2_to_raw(path)
mock_execute.assert_has_calls([
mock.call('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'raw', path, _path_raw),
mock.call('mv', _path_raw, path)])
@mock.patch('nova.virt.disk.api.extend')
def test_disk_resize_raw(self, mock_extend):
info = {'type': 'raw', 'path': '/test/disk'}
self.drvr._disk_resize(info, 50)
mock_extend.assert_called_once_with(info['path'], 50, use_cow=False)
@mock.patch('nova.virt.disk.api.can_resize_image')
@mock.patch('nova.virt.disk.api.is_image_extendable')
@mock.patch('nova.virt.disk.api.extend')
def test_disk_resize_qcow2(
self, mock_extend, mock_can_resize, mock_is_image_extendable):
info = {'type': 'qcow2', 'path': '/test/disk'}
with contextlib.nested(
mock.patch.object(
self.drvr, '_disk_qcow2_to_raw'),
mock.patch.object(
self.drvr, '_disk_raw_to_qcow2'))\
as (mock_disk_qcow2_to_raw, mock_disk_raw_to_qcow2):
mock_can_resize.return_value = True
mock_is_image_extendable.return_value = True
self.drvr._disk_resize(info, 50)
mock_disk_qcow2_to_raw.assert_called_once_with(info['path'])
mock_extend.assert_called_once_with(
info['path'], 50, use_cow=False)
mock_disk_raw_to_qcow2.assert_called_once_with(info['path'])
def _test_finish_migration(self, power_on, resize_instance=False):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
self.fake_disk_resize_called = False
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None, write_to_disk=False):
return ""
def fake_plug_vifs(instance, network_info):
pass
def fake_create_image(context, inst,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, inject_files=True,
fallback_from_host=None):
self.assertFalse(inject_files)
def fake_create_domain_and_network(
context, xml, instance, network_info, disk_info,
block_device_info=None, power_on=True, reboot=False,
vifs_already_plugged=False):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
self.assertTrue(vifs_already_plugged)
def fake_enable_hairpin():
pass
def fake_execute(*args, **kwargs):
pass
def fake_get_info(instance):
if powered_on:
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_disk_resize(info, size):
self.fake_disk_resize_called = True
self.flags(use_cow_images=True)
self.stubs.Set(self.drvr, '_disk_resize',
fake_disk_resize)
self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(self.drvr, '_create_image',
fake_create_image)
self.stubs.Set(self.drvr, '_create_domain_and_network',
fake_create_domain_and_network)
self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.drvr, 'firewall_driver', fw)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
ins_ref = self._create_instance()
image_meta = {}
migration = objects.Migration()
migration.source_compute = 'fake-source-compute'
migration.dest_compute = 'fake-dest-compute'
migration.source_node = 'fake-source-node'
migration.dest_node = 'fake-dest-node'
self.drvr.finish_migration(
context.get_admin_context(), migration, ins_ref,
self._disk_info(), [], image_meta,
resize_instance, None, power_on)
self.assertTrue(self.fake_create_domain_called)
self.assertEqual(
resize_instance, self.fake_disk_resize_called)
def test_finish_migration_resize(self):
self._test_finish_migration(True, resize_instance=True)
def test_finish_migration_power_on(self):
self._test_finish_migration(True)
def test_finish_migration_power_off(self):
self._test_finish_migration(False)
def _test_finish_revert_migration(self, power_on):
"""Test for nova.virt.libvirt.libvirt_driver.LivirtConnection
.finish_revert_migration.
"""
powered_on = power_on
self.fake_create_domain_called = False
def fake_execute(*args, **kwargs):
pass
def fake_plug_vifs(instance, network_info):
pass
def fake_create_domain(context, xml, instance, network_info,
disk_info, block_device_info=None,
power_on=None,
vifs_already_plugged=None):
self.fake_create_domain_called = True
self.assertEqual(powered_on, power_on)
self.assertTrue(vifs_already_plugged)
return mock.MagicMock()
def fake_enable_hairpin():
pass
def fake_get_info(instance):
if powered_on:
return hardware.InstanceInfo(state=power_state.RUNNING)
else:
return hardware.InstanceInfo(state=power_state.SHUTDOWN)
def fake_to_xml(context, instance, network_info, disk_info,
image_meta=None, rescue=None,
block_device_info=None):
return ""
self.stubs.Set(self.drvr, '_get_guest_xml', fake_to_xml)
self.stubs.Set(self.drvr, 'plug_vifs', fake_plug_vifs)
self.stubs.Set(utils, 'execute', fake_execute)
fw = base_firewall.NoopFirewallDriver()
self.stubs.Set(self.drvr, 'firewall_driver', fw)
self.stubs.Set(self.drvr, '_create_domain_and_network',
fake_create_domain)
self.stubs.Set(nova.virt.libvirt.guest.Guest, 'enable_hairpin',
fake_enable_hairpin)
self.stubs.Set(self.drvr, 'get_info',
fake_get_info)
self.stubs.Set(utils, 'get_image_from_system_metadata', lambda *a: {})
with utils.tempdir() as tmpdir:
self.flags(instances_path=tmpdir)
ins_ref = self._create_instance()
os.mkdir(os.path.join(tmpdir, ins_ref['name']))
libvirt_xml_path = os.path.join(tmpdir,
ins_ref['name'],
'libvirt.xml')
f = open(libvirt_xml_path, 'w')
f.close()
self.drvr.finish_revert_migration(
context.get_admin_context(), ins_ref,
[], None, power_on)
self.assertTrue(self.fake_create_domain_called)
def test_finish_revert_migration_power_on(self):
self._test_finish_revert_migration(True)
def test_finish_revert_migration_power_off(self):
self._test_finish_revert_migration(False)
def _test_finish_revert_migration_after_crash(self, backup_made=True,
del_inst_failed=False):
class FakeLoopingCall(object):
def start(self, *a, **k):
return self
def wait(self):
return None
context = 'fake_context'
instance = self._create_instance()
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(os.path, 'exists')
self.mox.StubOutWithMock(shutil, 'rmtree')
self.mox.StubOutWithMock(utils, 'execute')
self.stubs.Set(blockinfo, 'get_disk_info', lambda *a: None)
self.stubs.Set(self.drvr, '_get_guest_xml',
lambda *a, **k: None)
self.stubs.Set(self.drvr, '_create_domain_and_network',
lambda *a, **kw: None)
self.stubs.Set(loopingcall, 'FixedIntervalLoopingCall',
lambda *a, **k: FakeLoopingCall())
libvirt_utils.get_instance_path(instance).AndReturn('/fake/foo')
os.path.exists('/fake/foo_resize').AndReturn(backup_made)
if backup_made:
if del_inst_failed:
os_error = OSError(errno.ENOENT, 'No such file or directory')
shutil.rmtree('/fake/foo').AndRaise(os_error)
else:
shutil.rmtree('/fake/foo')
utils.execute('mv', '/fake/foo_resize', '/fake/foo')
self.mox.ReplayAll()
self.drvr.finish_revert_migration(context, instance, [])
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(backup_made=True)
def test_finish_revert_migration_after_crash_before_backup(self):
self._test_finish_revert_migration_after_crash(backup_made=False)
def test_finish_revert_migration_after_crash_delete_failed(self):
self._test_finish_revert_migration_after_crash(backup_made=True,
del_inst_failed=True)
def test_finish_revert_migration_preserves_disk_bus(self):
def fake_get_guest_xml(context, instance, network_info, disk_info,
image_meta, block_device_info=None):
self.assertEqual('ide', disk_info['disk_bus'])
image_meta = {"properties": {"hw_disk_bus": "ide"}}
instance = self._create_instance()
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(drvr, '_create_domain_and_network'),
mock.patch.object(utils, 'get_image_from_system_metadata',
return_value=image_meta),
mock.patch.object(drvr, '_get_guest_xml',
side_effect=fake_get_guest_xml)):
drvr.finish_revert_migration('', instance, None, power_on=False)
def test_cleanup_failed_migration(self):
self.mox.StubOutWithMock(shutil, 'rmtree')
shutil.rmtree('/fake/inst')
self.mox.ReplayAll()
self.drvr._cleanup_failed_migration('/fake/inst')
def test_confirm_migration(self):
ins_ref = self._create_instance()
self.mox.StubOutWithMock(self.drvr, "_cleanup_resize")
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
self.mox.ReplayAll()
self.drvr.confirm_migration("migration_ref", ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_same_host(self):
CONF.set_override('policy_dirs', [])
ins_ref = self._create_instance({'host': CONF.host})
def fake_os_path_exists(path):
return True
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
libvirt_utils.get_instance_path(ins_ref,
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_cleanup_resize_not_same_host(self):
CONF.set_override('policy_dirs', [])
host = 'not' + CONF.host
ins_ref = self._create_instance({'host': host})
def fake_os_path_exists(path):
return True
def fake_undefine_domain(instance):
pass
def fake_unplug_vifs(instance, network_info, ignore_errors=False):
pass
def fake_unfilter_instance(instance, network_info):
pass
self.stubs.Set(os.path, 'exists', fake_os_path_exists)
self.stubs.Set(self.drvr, '_undefine_domain',
fake_undefine_domain)
self.stubs.Set(self.drvr, 'unplug_vifs',
fake_unplug_vifs)
self.stubs.Set(self.drvr.firewall_driver,
'unfilter_instance', fake_unfilter_instance)
self.mox.StubOutWithMock(libvirt_utils, 'get_instance_path')
self.mox.StubOutWithMock(utils, 'execute')
libvirt_utils.get_instance_path(ins_ref,
forceold=True).AndReturn('/fake/inst')
utils.execute('rm', '-rf', '/fake/inst_resize', delay_on_retry=True,
attempts=5)
self.mox.ReplayAll()
self.drvr._cleanup_resize(ins_ref,
_fake_network_info(self.stubs, 1))
def test_get_instance_disk_info_exception(self):
instance = self._create_instance()
class FakeExceptionDomain(FakeVirtDomain):
def __init__(self):
super(FakeExceptionDomain, self).__init__()
def XMLDesc(self, *args):
raise fakelibvirt.libvirtError("Libvirt error")
def fake_get_domain(self, instance):
return FakeExceptionDomain()
self.stubs.Set(host.Host, 'get_domain',
fake_get_domain)
self.assertRaises(exception.InstanceNotFound,
self.drvr.get_instance_disk_info,
instance)
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.lvm.list_volumes')
def test_lvm_disks(self, listlvs, exists):
instance = objects.Instance(uuid='fake-uuid', id=1)
self.flags(images_volume_group='vols', group='libvirt')
exists.return_value = True
listlvs.return_value = ['fake-uuid_foo',
'other-uuid_foo']
disks = self.drvr._lvm_disks(instance)
self.assertEqual(['/dev/vols/fake-uuid_foo'], disks)
def test_is_booted_from_volume(self):
func = libvirt_driver.LibvirtDriver._is_booted_from_volume
instance, disk_mapping = {}, {}
self.assertTrue(func(instance, disk_mapping))
disk_mapping['disk'] = 'map'
self.assertTrue(func(instance, disk_mapping))
instance['image_ref'] = 'uuid'
self.assertFalse(func(instance, disk_mapping))
@mock.patch('nova.virt.netutils.get_injected_network_template')
@mock.patch('nova.virt.disk.api.inject_data')
def _test_inject_data(self, driver_params, disk_params,
disk_inject_data, inj_network,
called=True):
class ImageBackend(object):
path = '/path'
def check_image_exists(self):
if self.path == '/fail/path':
return False
return True
def fake_inj_network(*args, **kwds):
return args[0] or None
inj_network.side_effect = fake_inj_network
image_backend = ImageBackend()
image_backend.path = disk_params[0]
with mock.patch.object(
self.drvr.image_backend,
'image',
return_value=image_backend):
self.flags(inject_partition=0, group='libvirt')
self.drvr._inject_data(**driver_params)
if called:
disk_inject_data.assert_called_once_with(
*disk_params,
partition=None, mandatory=('files',), use_cow=True)
self.assertEqual(disk_inject_data.called, called)
def _test_inject_data_default_driver_params(self, **params):
return {
'instance': self._create_instance(params=params),
'network_info': None,
'admin_pass': None,
'files': None,
'suffix': ''
}
def test_inject_data_adminpass(self):
self.flags(inject_password=True, group='libvirt')
driver_params = self._test_inject_data_default_driver_params()
driver_params['admin_pass'] = 'foobar'
disk_params = [
'/path', # injection_path
None, # key
None, # net
{}, # metadata
'foobar', # admin_pass
None, # files
]
self._test_inject_data(driver_params, disk_params)
# Test with the configuration setted to false.
self.flags(inject_password=False, group='libvirt')
self._test_inject_data(driver_params, disk_params, called=False)
def test_inject_data_key(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['instance']['key_data'] = 'key-content'
self.flags(inject_key=True, group='libvirt')
disk_params = [
'/path', # injection_path
'key-content', # key
None, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, disk_params)
# Test with the configuration setted to false.
self.flags(inject_key=False, group='libvirt')
self._test_inject_data(driver_params, disk_params, called=False)
def test_inject_data_metadata(self):
instance_metadata = {'metadata': {'data': 'foo'}}
driver_params = self._test_inject_data_default_driver_params(
**instance_metadata
)
disk_params = [
'/path', # injection_path
None, # key
None, # net
{'data': 'foo'}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, disk_params)
def test_inject_data_files(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['files'] = ['file1', 'file2']
disk_params = [
'/path', # injection_path
None, # key
None, # net
{}, # metadata
None, # admin_pass
['file1', 'file2'], # files
]
self._test_inject_data(driver_params, disk_params)
def test_inject_data_net(self):
driver_params = self._test_inject_data_default_driver_params()
driver_params['network_info'] = {'net': 'eno1'}
disk_params = [
'/path', # injection_path
None, # key
{'net': 'eno1'}, # net
{}, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, disk_params)
def test_inject_not_exist_image(self):
driver_params = self._test_inject_data_default_driver_params()
disk_params = [
'/fail/path', # injection_path
'key-content', # key
None, # net
None, # metadata
None, # admin_pass
None, # files
]
self._test_inject_data(driver_params, disk_params, called=False)
def _test_attach_detach_interface(self, method, power_state,
expected_flags):
instance = self._create_instance()
network_info = _fake_network_info(self.stubs, 1)
domain = FakeVirtDomain()
self.mox.StubOutWithMock(host.Host, 'get_domain')
self.mox.StubOutWithMock(self.drvr.firewall_driver,
'setup_basic_filtering')
self.mox.StubOutWithMock(domain, 'attachDeviceFlags')
self.mox.StubOutWithMock(domain, 'info')
host.Host.get_domain(instance).AndReturn(domain)
if method == 'attach_interface':
self.drvr.firewall_driver.setup_basic_filtering(
instance, [network_info[0]])
if method == 'attach_interface':
fake_image_meta = {'id': instance.image_ref}
elif method == 'detach_interface':
fake_image_meta = None
expected = self.drvr.vif_driver.get_config(
instance, network_info[0], fake_image_meta, instance.flavor,
CONF.libvirt.virt_type)
self.mox.StubOutWithMock(self.drvr.vif_driver,
'get_config')
self.drvr.vif_driver.get_config(
instance, network_info[0],
fake_image_meta,
mox.IsA(objects.Flavor),
CONF.libvirt.virt_type).AndReturn(expected)
domain.info().AndReturn([power_state])
if method == 'attach_interface':
domain.attachDeviceFlags(expected.to_xml(), flags=expected_flags)
elif method == 'detach_interface':
domain.detachDeviceFlags(expected.to_xml(), expected_flags)
self.mox.ReplayAll()
if method == 'attach_interface':
self.drvr.attach_interface(
instance, fake_image_meta, network_info[0])
elif method == 'detach_interface':
self.drvr.detach_interface(
instance, network_info[0])
self.mox.VerifyAll()
def test_attach_interface_with_running_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_pause_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_attach_interface_with_shutdown_instance(self):
self._test_attach_detach_interface(
'attach_interface', power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
def test_detach_interface_with_running_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.RUNNING,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_pause_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.PAUSED,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG |
fakelibvirt.VIR_DOMAIN_AFFECT_LIVE))
def test_detach_interface_with_shutdown_instance(self):
self._test_attach_detach_interface(
'detach_interface', power_state.SHUTDOWN,
expected_flags=(fakelibvirt.VIR_DOMAIN_AFFECT_CONFIG))
def test_rescue(self):
instance = self._create_instance({'config_drive': None})
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
network_info = _fake_network_info(self.stubs, 1)
self.mox.StubOutWithMock(self.drvr,
'_get_existing_domain_xml')
self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
self.mox.StubOutWithMock(self.drvr, '_get_guest_xml')
self.mox.StubOutWithMock(self.drvr, '_destroy')
self.mox.StubOutWithMock(self.drvr, '_create_domain')
self.drvr._get_existing_domain_xml(mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.rescue', 'default'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg()).MultipleTimes()
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
size=None, user_id=mox.IgnoreArg())
image_meta = {'id': 'fake', 'name': 'fake'}
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
image_meta, rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
self.drvr._destroy(instance)
self.drvr._create_domain(mox.IgnoreArg())
self.mox.ReplayAll()
rescue_password = 'fake_password'
self.drvr.rescue(self.context, instance,
network_info, image_meta, rescue_password)
self.mox.VerifyAll()
@mock.patch.object(libvirt_utils, 'get_instance_path')
@mock.patch.object(libvirt_utils, 'load_file')
@mock.patch.object(host.Host, "get_domain")
def test_unrescue(self, mock_get_domain, mock_load_file,
mock_get_instance_path):
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='block' device='disk'>"
"<source dev='/dev/some-vg/some-lv'/>"
"<target dev='vda' bus='virtio'/></disk>"
"</devices></domain>")
mock_get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake=uuid', id=1)
fake_dom = FakeVirtDomain(fake_xml=dummyxml)
mock_get_domain.return_value = fake_dom
mock_load_file.return_value = "fake_unrescue_xml"
unrescue_xml_path = os.path.join('/path', 'unrescue.xml')
rescue_file = os.path.join('/path', 'rescue.file')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
with contextlib.nested(
mock.patch.object(drvr, '_destroy'),
mock.patch.object(drvr, '_create_domain'),
mock.patch.object(libvirt_utils, 'file_delete'),
mock.patch.object(drvr, '_lvm_disks',
return_value=['lvm.rescue']),
mock.patch.object(lvm, 'remove_volumes'),
mock.patch.object(glob, 'iglob', return_value=[rescue_file])
) as (mock_destroy, mock_create, mock_del, mock_lvm_disks,
mock_remove_volumes, mock_glob):
drvr.unrescue(instance, None)
mock_destroy.assert_called_once_with(instance)
mock_create.assert_called_once_with("fake_unrescue_xml",
fake_dom)
self.assertEqual(2, mock_del.call_count)
self.assertEqual(unrescue_xml_path,
mock_del.call_args_list[0][0][0])
self.assertEqual(rescue_file, mock_del.call_args_list[1][0][0])
mock_remove_volumes.assert_called_once_with(['lvm.rescue'])
@mock.patch(
'nova.virt.configdrive.ConfigDriveBuilder.add_instance_metadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder.make_drive')
def test_rescue_config_drive(self, mock_make, mock_add):
instance = self._create_instance()
uuid = instance.uuid
configdrive_path = uuid + '/disk.config.rescue'
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
network_info = _fake_network_info(self.stubs, 1)
self.mox.StubOutWithMock(self.drvr,
'_get_existing_domain_xml')
self.mox.StubOutWithMock(libvirt_utils, 'write_to_file')
self.mox.StubOutWithMock(imagebackend.Backend, 'image')
self.mox.StubOutWithMock(imagebackend.Image, 'cache')
self.mox.StubOutWithMock(instance_metadata.InstanceMetadata,
'__init__')
self.mox.StubOutWithMock(self.drvr, '_get_guest_xml')
self.mox.StubOutWithMock(self.drvr, '_destroy')
self.mox.StubOutWithMock(self.drvr, '_create_domain')
self.drvr._get_existing_domain_xml(mox.IgnoreArg(),
mox.IgnoreArg()).MultipleTimes().AndReturn(dummyxml)
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg())
libvirt_utils.write_to_file(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
imagebackend.Backend.image(instance, 'kernel.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'ramdisk.rescue', 'raw'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Backend.image(instance, 'disk.rescue', 'default'
).AndReturn(fake_imagebackend.Raw())
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
user_id=mox.IgnoreArg()).MultipleTimes()
imagebackend.Image.cache(context=mox.IgnoreArg(),
fetch_func=mox.IgnoreArg(),
filename=mox.IgnoreArg(),
image_id=mox.IgnoreArg(),
project_id=mox.IgnoreArg(),
size=None, user_id=mox.IgnoreArg())
instance_metadata.InstanceMetadata.__init__(mox.IgnoreArg(),
content=mox.IgnoreArg(),
extra_md=mox.IgnoreArg(),
network_info=mox.IgnoreArg())
image_meta = {'id': 'fake', 'name': 'fake'}
self.drvr._get_guest_xml(mox.IgnoreArg(), instance,
network_info, mox.IgnoreArg(),
image_meta, rescue=mox.IgnoreArg(),
write_to_disk=mox.IgnoreArg()
).AndReturn(dummyxml)
self.drvr._destroy(instance)
self.drvr._create_domain(mox.IgnoreArg())
self.mox.ReplayAll()
rescue_password = 'fake_password'
self.drvr.rescue(self.context, instance, network_info,
image_meta, rescue_password)
self.mox.VerifyAll()
mock_add.assert_any_call(mock.ANY)
expected_call = [mock.call(os.path.join(CONF.instances_path,
configdrive_path))]
mock_make.assert_has_calls(expected_call)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files(self, get_instance_path, exists, exe,
shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resize(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = [Exception(), None]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')]
self.assertEqual(expected, exe.mock_calls)
shutil.assert_called_with('/path_del')
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_failed(self, get_instance_path, exists, exe,
shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
exists.side_effect = [False, False, True, True]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
exe.assert_called_with('mv', '/path', '/path_del')
shutil.assert_called_with('/path_del')
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_mv_failed(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [True, True]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertFalse(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_resume(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_none(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = Exception()
exists.side_effect = [False, False, False, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')] * 2
self.assertEqual(expected, exe.mock_calls)
self.assertEqual(0, len(shutil.mock_calls))
self.assertTrue(result)
@mock.patch('shutil.rmtree')
@mock.patch('nova.utils.execute')
@mock.patch('os.path.exists')
@mock.patch('nova.virt.libvirt.utils.get_instance_path')
def test_delete_instance_files_concurrent(self, get_instance_path, exists,
exe, shutil):
get_instance_path.return_value = '/path'
instance = objects.Instance(uuid='fake-uuid', id=1)
nova.utils.execute.side_effect = [Exception(), Exception(), None]
exists.side_effect = [False, False, True, False]
result = self.drvr.delete_instance_files(instance)
get_instance_path.assert_called_with(instance)
expected = [mock.call('mv', '/path', '/path_del'),
mock.call('mv', '/path_resize', '/path_del')]
expected.append(expected[0])
self.assertEqual(expected, exe.mock_calls)
shutil.assert_called_with('/path_del')
self.assertTrue(result)
def _assert_on_id_map(self, idmap, klass, start, target, count):
self.assertIsInstance(idmap, klass)
self.assertEqual(start, idmap.start)
self.assertEqual(target, idmap.target)
self.assertEqual(count, idmap.count)
def test_get_id_maps(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.virt_type = "lxc"
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(len(idmaps), 4)
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
self._assert_on_id_map(idmaps[2],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[3],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_get_id_maps_not_lxc(self):
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(0, len(idmaps))
def test_get_id_maps_only_uid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = ["0:10000:1", "1:20000:10"]
CONF.libvirt.gid_maps = []
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestUIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestUIDMap,
1, 20000, 10)
def test_get_id_maps_only_gid(self):
self.flags(virt_type="lxc", group="libvirt")
CONF.libvirt.uid_maps = []
CONF.libvirt.gid_maps = ["0:10000:1", "1:20000:10"]
idmaps = self.drvr._get_guest_idmaps()
self.assertEqual(2, len(idmaps))
self._assert_on_id_map(idmaps[0],
vconfig.LibvirtConfigGuestGIDMap,
0, 10000, 1)
self._assert_on_id_map(idmaps[1],
vconfig.LibvirtConfigGuestGIDMap,
1, 20000, 10)
def test_instance_on_disk(self):
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid='fake-uuid', id=1)
self.assertFalse(drvr.instance_on_disk(instance))
def test_instance_on_disk_rbd(self):
self.flags(images_type='rbd', group='libvirt')
drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
instance = objects.Instance(uuid='fake-uuid', id=1)
self.assertTrue(drvr.instance_on_disk(instance))
def test_get_interfaces(self):
dom_xml = """
<domain type="qemu">
<devices>
<interface type="ethernet">
<mac address="fe:eb:da:ed:ef:ac"/>
<model type="virtio"/>
<target dev="eth0"/>
</interface>
<interface type="bridge">
<mac address="ca:fe:de:ad:be:ef"/>
<model type="virtio"/>
<target dev="br0"/>
</interface>
</devices>
</domain>"""
list_interfaces = ['eth0', 'br0']
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
self.assertEqual(list_interfaces, drv._get_interfaces(dom_xml))
def test_get_disk_xml(self):
dom_xml = """
<domain type="kvm">
<devices>
<disk type="file">
<source file="disk1_file"/>
<target dev="vda" bus="virtio"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type="block">
<source dev="/path/to/dev/1"/>
<target dev="vdb" bus="virtio" serial="1234"/>
</disk>
</devices>
</domain>
"""
diska_xml = """<disk type="file" device="disk">
<source file="disk1_file"/>
<target bus="virtio" dev="vda"/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>"""
diskb_xml = """<disk type="block" device="disk">
<source dev="/path/to/dev/1"/>
<target bus="virtio" dev="vdb"/>
</disk>"""
dom = mock.MagicMock()
dom.XMLDesc.return_value = dom_xml
guest = libvirt_guest.Guest(dom)
# NOTE(gcb): etree.tostring(node) returns an extra line with
# some white spaces, need to strip it.
actual_diska_xml = guest.get_disk('vda').to_xml()
self.assertEqual(diska_xml.strip(), actual_diska_xml.strip())
actual_diskb_xml = guest.get_disk('vdb').to_xml()
self.assertEqual(diskb_xml.strip(), actual_diskb_xml.strip())
self.assertIsNone(guest.get_disk('vdc'))
def test_vcpu_model_from_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
vcpu_model = drv._cpu_config_to_vcpu_model(None, None)
self.assertIsNone(vcpu_model)
cpu = vconfig.LibvirtConfigGuestCPU()
feature1 = vconfig.LibvirtConfigGuestCPUFeature()
feature2 = vconfig.LibvirtConfigGuestCPUFeature()
feature1.name = 'sse'
feature1.policy = cpumodel.POLICY_REQUIRE
feature2.name = 'aes'
feature2.policy = cpumodel.POLICY_REQUIRE
cpu.features = set([feature1, feature2])
cpu.mode = cpumodel.MODE_CUSTOM
cpu.sockets = 1
cpu.cores = 2
cpu.threads = 4
vcpu_model = drv._cpu_config_to_vcpu_model(cpu, None)
self.assertEqual(cpumodel.MATCH_EXACT, vcpu_model.match)
self.assertEqual(cpumodel.MODE_CUSTOM, vcpu_model.mode)
self.assertEqual(4, vcpu_model.topology.threads)
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in vcpu_model.features]))
cpu.mode = cpumodel.MODE_HOST_MODEL
vcpu_model_1 = drv._cpu_config_to_vcpu_model(cpu, vcpu_model)
self.assertEqual(cpumodel.MODE_HOST_MODEL, vcpu_model.mode)
self.assertEqual(vcpu_model, vcpu_model_1)
def test_vcpu_model_to_config(self):
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
feature = objects.VirtCPUFeature(policy=cpumodel.POLICY_REQUIRE,
name='sse')
feature_1 = objects.VirtCPUFeature(policy=cpumodel.POLICY_FORBID,
name='aes')
topo = objects.VirtCPUTopology(sockets=1, cores=2, threads=4)
vcpu_model = objects.VirtCPUModel(mode=cpumodel.MODE_HOST_MODEL,
features=[feature, feature_1],
topology=topo)
cpu = drv._vcpu_model_to_cpu_config(vcpu_model)
self.assertEqual(cpumodel.MODE_HOST_MODEL, cpu.mode)
self.assertEqual(1, cpu.sockets)
self.assertEqual(4, cpu.threads)
self.assertEqual(2, len(cpu.features))
self.assertEqual(set(['sse', 'aes']),
set([f.name for f in cpu.features]))
self.assertEqual(set([cpumodel.POLICY_REQUIRE,
cpumodel.POLICY_FORBID]),
set([f.policy for f in cpu.features]))
class LibvirtVolumeUsageTestCase(test.NoDBTestCase):
"""Test for LibvirtDriver.get_all_volume_usage."""
def setUp(self):
super(LibvirtVolumeUsageTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.ins_ref = objects.Instance(
id=1729,
uuid='875a8070-d0b9-4949-8b31-104d125c9a64'
)
# verify bootable volume device path also
self.bdms = [{'volume_id': 1,
'device_name': '/dev/vde'},
{'volume_id': 2,
'device_name': 'vda'}]
def test_get_all_volume_usage(self):
def fake_block_stats(instance_name, disk):
return (169, 688640, 0, 0, -1)
self.stubs.Set(self.drvr, 'block_stats', fake_block_stats)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
expected_usage = [{'volume': 1,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0},
{'volume': 2,
'instance': self.ins_ref,
'rd_bytes': 688640, 'wr_req': 0,
'rd_req': 169, 'wr_bytes': 0}]
self.assertEqual(vol_usage, expected_usage)
def test_get_all_volume_usage_device_not_found(self):
def fake_get_domain(self, instance):
raise exception.InstanceNotFound(instance_id="fakedom")
self.stubs.Set(host.Host, 'get_domain', fake_get_domain)
vol_usage = self.drvr.get_all_volume_usage(self.c,
[dict(instance=self.ins_ref, instance_bdms=self.bdms)])
self.assertEqual(vol_usage, [])
class LibvirtNonblockingTestCase(test.NoDBTestCase):
"""Test libvirtd calls are nonblocking."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(connection_uri="test:///default",
group='libvirt')
def test_connection_to_primitive(self):
# Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
drvr = libvirt_driver.LibvirtDriver('')
drvr.set_host_enabled = mock.Mock()
jsonutils.to_primitive(drvr._conn, convert_instances=True)
def test_tpool_execute_calls_libvirt(self):
conn = fakelibvirt.virConnect()
conn.is_expected = True
self.mox.StubOutWithMock(eventlet.tpool, 'execute')
eventlet.tpool.execute(
fakelibvirt.openAuth,
'test:///default',
mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(conn)
eventlet.tpool.execute(
conn.domainEventRegisterAny,
None,
fakelibvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
mox.IgnoreArg(),
mox.IgnoreArg())
if hasattr(fakelibvirt.virConnect, 'registerCloseCallback'):
eventlet.tpool.execute(
conn.registerCloseCallback,
mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
c = driver._get_connection()
self.assertEqual(True, c.is_expected)
class LibvirtVolumeSnapshotTestCase(test.NoDBTestCase):
"""Tests for libvirtDriver.volume_snapshot_create/delete."""
def setUp(self):
super(LibvirtVolumeSnapshotTestCase, self).setUp()
self.drvr = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.c = context.get_admin_context()
self.flags(instance_name_template='instance-%s')
self.flags(qemu_allowed_storage_drivers=[], group='libvirt')
# creating instance
self.inst = {}
self.inst['uuid'] = uuidutils.generate_uuid()
self.inst['id'] = '1'
# create domain info
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source dev='/path/to/dev/1'/>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
# alternate domain info with network-backed snapshot chain
self.dom_netdisk_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67eaffffff</serial>
</disk>
<disk type='network' device='disk'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/root.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='1'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap.img'>
<host name='server1' port='24007'/>
</source>
<backingStore type='network' index='2'>
<driver name='qemu' type='qcow2'/>
<source protocol='gluster' name='vol1/snap-b.img'>
<host name='server1' port='24007'/>
</source>
<backingStore/>
</backingStore>
</backingStore>
<target dev='vdb' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
</devices>
</domain>
"""
self.create_info = {'type': 'qcow2',
'snapshot_id': '1234-5678',
'new_file': 'new-file'}
self.volume_uuid = '0e38683e-f0af-418f-a3f1-6b67ea0f919d'
self.snapshot_id = '9c3ca9f4-9f4e-4dba-bedd-5c5e4b52b162'
self.delete_info_1 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': None}
self.delete_info_2 = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'other-snap.img'}
self.delete_info_netdisk = {'type': 'qcow2',
'file_to_merge': 'snap.img',
'merge_target_file': 'root.img'}
self.delete_info_invalid_type = {'type': 'made_up_type',
'file_to_merge': 'some_file',
'merge_target_file':
'some_other_file'}
def tearDown(self):
super(LibvirtVolumeSnapshotTestCase, self).tearDown()
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.'
'refresh_connection_info')
@mock.patch('nova.objects.block_device.BlockDeviceMapping.'
'get_by_volume_id')
def test_volume_refresh_connection_info(self, mock_get_by_volume_id,
mock_refresh_connection_info):
fake_bdm = fake_block_device.FakeDbBlockDeviceDict({
'id': 123,
'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': '{"fake": "connection_info"}'})
mock_get_by_volume_id.return_value = fake_bdm
self.drvr._volume_refresh_connection_info(self.c, self.inst,
self.volume_uuid)
mock_get_by_volume_id.assert_called_once_with(self.c, self.volume_uuid)
mock_refresh_connection_info.assert_called_once_with(self.c, self.inst,
self.drvr._volume_api, self.drvr)
def test_volume_snapshot_create(self, quiesce=True):
"""Test snapshot creation with file-based disk."""
self.flags(instance_name_template='instance-%s')
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
if quiesce:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
else:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
AndRaise(fakelibvirt.libvirtError(
'quiescing failed, no qemu-ga'))
domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
self.mox.ReplayAll()
self.drvr._volume_snapshot_create(self.c, instance, domain,
self.volume_uuid, new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_libgfapi(self, quiesce=True):
"""Test snapshot creation with libgfapi network disk."""
self.flags(instance_name_template = 'instance-%s')
self.flags(qemu_allowed_storage_drivers = ['gluster'], group='libvirt')
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.dom_xml = """
<domain type='kvm'>
<devices>
<disk type='file'>
<source file='disk1_file'/>
<target dev='vda' bus='virtio'/>
<serial>0e38683e-f0af-418f-a3f1-6b67ea0f919d</serial>
</disk>
<disk type='block'>
<source protocol='gluster' name='gluster1/volume-1234'>
<host name='127.3.4.5' port='24007'/>
</source>
<target dev='vdb' bus='virtio' serial='1234'/>
</disk>
</devices>
</domain>"""
instance = objects.Instance(**self.inst)
new_file = 'new-file'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
self.mox.StubOutWithMock(domain, 'snapshotCreateXML')
domain.XMLDesc(0).AndReturn(self.dom_xml)
snap_xml_src = (
'<domainsnapshot>\n'
' <disks>\n'
' <disk name="disk1_file" snapshot="external" type="file">\n'
' <source file="new-file"/>\n'
' </disk>\n'
' <disk name="vdb" snapshot="no"/>\n'
' </disks>\n'
'</domainsnapshot>\n')
# Older versions of libvirt may be missing these.
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT = 32
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE = 64
snap_flags = (fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT)
snap_flags_q = (snap_flags |
fakelibvirt.VIR_DOMAIN_SNAPSHOT_CREATE_QUIESCE)
if quiesce:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q)
else:
domain.snapshotCreateXML(snap_xml_src, snap_flags_q).\
AndRaise(fakelibvirt.libvirtError(
'quiescing failed, no qemu-ga'))
domain.snapshotCreateXML(snap_xml_src, snap_flags).AndReturn(0)
self.mox.ReplayAll()
self.drvr._volume_snapshot_create(self.c, instance, domain,
self.volume_uuid, new_file)
self.mox.VerifyAll()
def test_volume_snapshot_create_noquiesce(self):
self.test_volume_snapshot_create(quiesce=False)
def test_volume_snapshot_create_outer_success(self):
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._volume_snapshot_create(self.c,
instance,
domain,
self.volume_uuid,
self.create_info['new_file'])
self.drvr._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'creating')
self.mox.StubOutWithMock(self.drvr._volume_api, 'get_snapshot')
self.drvr._volume_api.get_snapshot(self.c,
self.create_info['snapshot_id']).AndReturn({'status': 'available'})
self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info')
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
self.mox.ReplayAll()
self.drvr.volume_snapshot_create(self.c, instance, self.volume_uuid,
self.create_info)
def test_volume_snapshot_create_outer_failure(self):
instance = objects.Instance(**self.inst)
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_create')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._volume_snapshot_create(self.c,
instance,
domain,
self.volume_uuid,
self.create_info['new_file']).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, self.create_info['snapshot_id'], 'error')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_create,
self.c,
instance,
self.volume_uuid,
self.create_info)
def test_volume_snapshot_delete_1(self):
"""Deleting newest snapshot -- blockRebase."""
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE flag
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0, 0)
domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', 0).AndReturn({'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
def test_volume_snapshot_delete_relative_1(self):
"""Deleting newest snapshot -- blockRebase using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vda', 'snap.img', 0,
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', 0).AndReturn({'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_2(self):
"""Deleting older snapshot -- blockCommit."""
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.drvr._volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_2)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4})
def test_volume_snapshot_delete_relative_2(self):
"""Deleting older snapshot -- blockCommit using relative flag"""
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(0).AndReturn(self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vda', 'other-snap.img', 'snap.img', 0,
fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
domain.blockJobInfo('vda', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vda', 0).AndReturn({})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_2)
self.mox.VerifyAll()
def test_volume_snapshot_delete_outer_success(self):
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1)
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'deleting')
self.mox.StubOutWithMock(self.drvr, '_volume_refresh_connection_info')
self.drvr._volume_refresh_connection_info(self.c, instance,
self.volume_uuid)
self.mox.ReplayAll()
self.drvr.volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_outer_failure(self):
instance = objects.Instance(**self.inst)
snapshot_id = '1234-9876'
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr, '_volume_snapshot_delete')
self.drvr._volume_snapshot_delete(self.c,
instance,
self.volume_uuid,
snapshot_id,
delete_info=self.delete_info_1).\
AndRaise(exception.NovaException('oops'))
self.drvr._volume_api.update_snapshot_status(
self.c, snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_invalid_type(self):
instance = objects.Instance(**self.inst)
FakeVirtDomain(fake_xml=self.dom_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr, '_volume_api')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.drvr._volume_api.update_snapshot_status(
self.c, self.snapshot_id, 'error_deleting')
self.mox.ReplayAll()
self.assertRaises(exception.NovaException,
self.drvr.volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
self.snapshot_id,
self.delete_info_invalid_type)
def test_volume_snapshot_delete_netdisk_1(self):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, *args):
return self.dom_netdisk_xml
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_REBASE_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_REBASE_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vdb', 'vdb[1]', 0, 0)
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_REBASE_RELATIVE': 8})
def test_volume_snapshot_delete_netdisk_relative_1(self):
"""Delete newest snapshot -- blockRebase for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, *args):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockRebase('vdb', 'vdb[1]', 0,
fakelibvirt.VIR_DOMAIN_BLOCK_REBASE_RELATIVE)
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id, self.delete_info_1)
self.mox.VerifyAll()
def test_volume_snapshot_delete_netdisk_2(self):
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, *args):
return self.dom_netdisk_xml
# libvirt lib doesn't have VIR_DOMAIN_BLOCK_COMMIT_RELATIVE
fakelibvirt.__dict__.pop('VIR_DOMAIN_BLOCK_COMMIT_RELATIVE')
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
self.mox.ReplayAll()
self.assertRaises(exception.Invalid,
self.drvr._volume_snapshot_delete,
self.c,
instance,
self.volume_uuid,
snapshot_id,
self.delete_info_netdisk)
fakelibvirt.__dict__.update({'VIR_DOMAIN_BLOCK_COMMIT_RELATIVE': 4})
def test_volume_snapshot_delete_netdisk_relative_2(self):
"""Delete older snapshot -- blockCommit for libgfapi/network disk."""
class FakeNetdiskDomain(FakeVirtDomain):
def __init__(self, *args, **kwargs):
super(FakeNetdiskDomain, self).__init__(*args, **kwargs)
def XMLDesc(self, *args):
return self.dom_netdisk_xml
self.stubs.Set(libvirt_driver, 'libvirt', fakelibvirt)
instance = objects.Instance(**self.inst)
snapshot_id = 'snapshot-1234'
domain = FakeNetdiskDomain(fake_xml=self.dom_netdisk_xml)
self.mox.StubOutWithMock(domain, 'XMLDesc')
domain.XMLDesc(0).AndReturn(self.dom_netdisk_xml)
self.mox.StubOutWithMock(self.drvr._host, 'get_domain')
self.mox.StubOutWithMock(self.drvr._host, 'has_min_version')
self.mox.StubOutWithMock(domain, 'blockRebase')
self.mox.StubOutWithMock(domain, 'blockCommit')
self.mox.StubOutWithMock(domain, 'blockJobInfo')
self.drvr._host.get_domain(instance).AndReturn(domain)
self.drvr._host.has_min_version(mox.IgnoreArg()).AndReturn(True)
domain.blockCommit('vdb', 'vdb[0]', 'vdb[1]', 0,
fakelibvirt.VIR_DOMAIN_BLOCK_COMMIT_RELATIVE)
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1, 'end': 1000})
domain.blockJobInfo('vdb', 0).AndReturn({'cur': 1000, 'end': 1000})
self.mox.ReplayAll()
self.drvr._volume_snapshot_delete(self.c, instance, self.volume_uuid,
snapshot_id,
self.delete_info_netdisk)
self.mox.VerifyAll()
def _fake_convert_image(source, dest, out_format,
run_as_root=True):
libvirt_driver.libvirt_utils.files[dest] = ''
class _BaseSnapshotTests(test.NoDBTestCase):
def setUp(self):
super(_BaseSnapshotTests, self).setUp()
self.flags(snapshots_directory='./', group='libvirt')
self.context = context.get_admin_context()
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.imagebackend.libvirt_utils',
fake_libvirt_utils))
self.image_service = nova.tests.unit.image.fake.stub_out_image_service(
self.stubs)
self.mock_update_task_state = mock.Mock()
test_instance = _create_test_instance()
self.instance_ref = objects.Instance(**test_instance)
self.instance_ref.info_cache = objects.InstanceInfoCache(
network_info=None)
def _assert_snapshot(self, snapshot, disk_format,
expected_properties=None):
self.mock_update_task_state.assert_has_calls([
mock.call(task_state=task_states.IMAGE_PENDING_UPLOAD),
mock.call(task_state=task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD)])
props = snapshot['properties']
self.assertEqual(props['image_state'], 'available')
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['disk_format'], disk_format)
self.assertEqual(snapshot['name'], 'test-snap')
if expected_properties:
for expected_key, expected_value in \
six.iteritems(expected_properties):
self.assertEqual(expected_value, props[expected_key])
def _create_image(self, extra_properties=None):
properties = {'instance_id': self.instance_ref['id'],
'user_id': str(self.context.user_id)}
if extra_properties:
properties.update(extra_properties)
sent_meta = {'name': 'test-snap',
'is_public': False,
'status': 'creating',
'properties': properties}
# Create new image. It will be updated in snapshot method
# To work with it from snapshot, the single image_service is needed
recv_meta = self.image_service.create(self.context, sent_meta)
return recv_meta
@mock.patch.object(imagebackend.Image, 'resolve_driver_format')
@mock.patch.object(host.Host, 'get_domain')
def _snapshot(self, image_id, mock_get_domain, mock_resolve):
mock_get_domain.return_value = FakeVirtDomain()
driver = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
driver.snapshot(self.context, self.instance_ref, image_id,
self.mock_update_task_state)
snapshot = self.image_service.show(self.context, image_id)
return snapshot
def _test_snapshot(self, disk_format, extra_properties=None):
recv_meta = self._create_image(extra_properties=extra_properties)
snapshot = self._snapshot(recv_meta['id'])
self._assert_snapshot(snapshot, disk_format=disk_format,
expected_properties=extra_properties)
class LibvirtSnapshotTests(_BaseSnapshotTests):
def test_ami(self):
# Assign different image_ref from nova/images/fakes for testing ami
self.instance_ref.image_ref = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
self.instance_ref.system_metadata = \
utils.get_system_metadata_from_image(
{'disk_format': 'ami'})
self._test_snapshot(disk_format='ami')
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='raw')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
def test_raw(self, mock_convert_image):
self._test_snapshot(disk_format='raw')
def test_qcow2(self):
self._test_snapshot(disk_format='qcow2')
def test_no_image_architecture(self):
self.instance_ref.image_ref = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
self._test_snapshot(disk_format='qcow2')
def test_no_original_image(self):
self.instance_ref.image_ref = '661122aa-1234-dede-fefe-babababababa'
self._test_snapshot(disk_format='qcow2')
def test_snapshot_metadata_image(self):
# Assign an image with an architecture defined (x86_64)
self.instance_ref.image_ref = 'a440c04b-79fa-479c-bed1-0b816eaec379'
extra_properties = {'architecture': 'fake_arch',
'key_a': 'value_a',
'key_b': 'value_b',
'os_type': 'linux'}
self._test_snapshot(disk_format='qcow2',
extra_properties=extra_properties)
class LXCSnapshotTests(LibvirtSnapshotTests):
"""Repeat all of the Libvirt snapshot tests, but with LXC enabled"""
def setUp(self):
super(LXCSnapshotTests, self).setUp()
self.flags(virt_type='lxc', group='libvirt')
class LVMSnapshotTests(_BaseSnapshotTests):
@mock.patch.object(fake_libvirt_utils, 'disk_type', new='lvm')
@mock.patch.object(libvirt_driver.imagebackend.images,
'convert_image',
side_effect=_fake_convert_image)
@mock.patch.object(libvirt_driver.imagebackend.lvm, 'volume_info')
def _test_lvm_snapshot(self, disk_format, mock_volume_info,
mock_convert_image):
self.flags(images_type='lvm',
images_volume_group='nova-vg', group='libvirt')
self._test_snapshot(disk_format=disk_format)
mock_volume_info.assert_has_calls([mock.call('/dev/nova-vg/lv')])
mock_convert_image.assert_called_once_with(
'/dev/nova-vg/lv', mock.ANY, disk_format, run_as_root=True)
def test_raw(self):
self._test_lvm_snapshot('raw')
def test_qcow2(self):
self.flags(snapshot_image_format='qcow2', group='libvirt')
self._test_lvm_snapshot('qcow2')
| {
"content_hash": "afb6dad05bad7dffd99d067ec206dd99",
"timestamp": "",
"source": "github",
"line_count": 13712,
"max_line_length": 97,
"avg_line_length": 44.123322637106185,
"alnum_prop": 0.5402243565904542,
"repo_name": "tealover/nova",
"id": "86e5c74c32bdd62ef949f62c71fa0669748b9a31",
"size": "605675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/libvirt/test_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16143433"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "316543"
}
],
"symlink_target": ""
} |
'''
Created on Mar 10, 2011
@author: tomchristie
'''
# http://ericholscher.com/blog/2009/jun/29/enable-setuppy-test-your-django-apps/
# http://www.travisswicegood.com/2010/01/17/django-virtualenv-pip-and-fabric/
# http://code.djangoproject.com/svn/django/trunk/tests/runtests.py
import os
import sys
os.environ['DJANGO_SETTINGS_MODULE'] = 'djangorestframework.runtests.settings'
from django.conf import settings
from django.test.utils import get_runner
def usage():
return """
Usage: python runtests.py [UnitTestClass].[method]
You can pass the Class name of the `UnitTestClass` you want to test.
Append a method name if you only want to test a specific method of that class.
"""
def main():
TestRunner = get_runner(settings)
test_runner = TestRunner()
if len(sys.argv) == 2:
test_case = '.' + sys.argv[1]
elif len(sys.argv) == 1:
test_case = ''
else:
print usage()
sys.exit(1)
failures = test_runner.run_tests(['djangorestframework' + test_case])
sys.exit(failures)
if __name__ == '__main__':
main()
| {
"content_hash": "9437377d2e4f9b909e0904352737f3a9",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 82,
"avg_line_length": 26.682926829268293,
"alnum_prop": 0.6709323583180987,
"repo_name": "seanfisk/buzzword-bingo-server",
"id": "9f5cc7aa69e3ab43db2a563c584245d371076da1",
"size": "1094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djangorestframework/runtests/runtests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "92014"
},
{
"name": "Python",
"bytes": "4681017"
}
],
"symlink_target": ""
} |
"""
A module contains an interface which represent a WAMP part of a config
"""
from zope.interface import Interface, Attribute
__author__ = 'dimd'
class IWamp(Interface):
"""
Interface that describe a wamp part of config
"""
user = Attribute('WAMP CRA User')
password = Attribute('WAMP CRA password')
realm = Attribute('WAMP Realm')
retry_interval = Attribute('If connection is lost, we will trying to reconnect via this interrval')
path = Attribute('Pato from wamp url')
| {
"content_hash": "b5784ec6a68e473afbe083b74e08fc55",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 103,
"avg_line_length": 26.789473684210527,
"alnum_prop": 0.6915520628683693,
"repo_name": "dimddev/NetCatKS",
"id": "2388d24bf5f9284d2a3debd0043c2626f4d130d6",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NetCatKS/Config/api/interfaces/confguration/wamp/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "182697"
}
],
"symlink_target": ""
} |
import os
import unittest
import random
import threading
import subprocess32 as subprocess
import imath
import IECore
import IECoreImage
import Gaffer
import GafferTest
import GafferDispatch
import GafferImage
import GafferImageTest
class DisplayTest( GafferImageTest.ImageTestCase ) :
# Utility class for sending images to Display nodes.
# This abstracts away the different image orientations between
# Gaffer and Cortex. All Driver methods expect data with the
# usual Gaffer conventions.
class Driver( object ) :
def __init__( self, format, dataWindow, channelNames, port, extraParameters = {} ) :
self.__format = format
parameters = {
"displayHost" : "localHost",
"displayPort" : str( port ),
"remoteDisplayType" : "GafferImage::GafferDisplayDriver",
}
parameters.update( extraParameters )
with GafferTest.ParallelAlgoTest.UIThreadCallHandler() as h :
self.__driver = IECoreImage.ClientDisplayDriver(
self.__format.toEXRSpace( self.__format.getDisplayWindow() ),
self.__format.toEXRSpace( dataWindow ),
list( channelNames ),
parameters,
)
# Expect UI thread call used to emit Display::driverCreatedSignal()
h.assertCalled()
h.assertDone()
# The channelData argument is a list of FloatVectorData
# per channel.
def sendBucket( self, bucketWindow, channelData ) :
bucketSize = bucketWindow.size()
bucketData = IECore.FloatVectorData()
for by in range( bucketSize.y - 1, -1, -1 ) :
for bx in range( 0, bucketSize.x ) :
i = by * bucketSize.x + bx
for c in channelData :
bucketData.append( c[i] )
with GafferTest.ParallelAlgoTest.UIThreadCallHandler() as h :
self.__driver.imageData(
self.__format.toEXRSpace( bucketWindow ),
bucketData
)
# Expect UI thread call used to increment updateCount plug
h.assertCalled()
h.assertDone()
def close( self ) :
with GafferTest.ParallelAlgoTest.UIThreadCallHandler() as h :
self.__driver.imageClose()
# Expect UI thread call used to emit Display::imageReceivedSignal()
h.assertCalled()
h.assertDone()
@classmethod
def sendImage( cls, image, port, extraParameters = {}, close = True ) :
dataWindow = image["dataWindow"].getValue()
channelNames = image["channelNames"].getValue()
parameters = IECore.CompoundData()
parameters.update( { "header:" + k : v for k, v in image["metadata"].getValue().items() } )
parameters.update( extraParameters )
driver = DisplayTest.Driver(
image["format"].getValue(),
dataWindow,
channelNames,
port, parameters
)
tileSize = GafferImage.ImagePlug.tileSize()
minTileOrigin = GafferImage.ImagePlug.tileOrigin( dataWindow.min() )
maxTileOrigin = GafferImage.ImagePlug.tileOrigin( dataWindow.max() - imath.V2i( 1 ) )
for y in range( minTileOrigin.y, maxTileOrigin.y + 1, tileSize ) :
for x in range( minTileOrigin.x, maxTileOrigin.x + 1, tileSize ) :
tileOrigin = imath.V2i( x, y )
channelData = []
for channelName in channelNames :
channelData.append( image.channelData( channelName, tileOrigin ) )
driver.sendBucket( imath.Box2i( tileOrigin, tileOrigin + imath.V2i( tileSize ) ), channelData )
if close :
driver.close()
return driver
def testDefaultFormat( self ) :
d = GafferImage.Display()
with Gaffer.Context() as c :
self.assertEqual( d["out"]["format"].getValue(), GafferImage.FormatPlug.getDefaultFormat( c ) )
GafferImage.FormatPlug.setDefaultFormat( c, GafferImage.Format( 200, 150, 1. ) )
self.assertEqual( d["out"]["format"].getValue(), GafferImage.FormatPlug.getDefaultFormat( c ) )
def testDeep( self ) :
d = GafferImage.Display()
self.assertEqual( d["out"]["deep"].getValue(), False )
def testTileHashes( self ) :
node = GafferImage.Display()
server = IECoreImage.DisplayDriverServer()
driverCreatedConnection = GafferImage.Display.driverCreatedSignal().connect( lambda driver, parameters : node.setDriver( driver ), scoped = True )
dataWindow = imath.Box2i( imath.V2i( -100, -200 ), imath.V2i( 303, 557 ) )
driver = self.Driver(
GafferImage.Format( dataWindow ),
dataWindow,
[ "Y" ],
port = server.portNumber(),
)
for i in range( 0, 100 ) :
h1 = self.__tileHashes( node, "Y" )
t1 = self.__tiles( node, "Y" )
bucketWindow = imath.Box2i()
while GafferImage.BufferAlgo.empty( bucketWindow ) :
bucketWindow.extendBy(
imath.V2i(
int( random.uniform( dataWindow.min().x, dataWindow.max().x ) ),
int( random.uniform( dataWindow.min().y, dataWindow.max().y ) ),
)
)
numPixels = ( bucketWindow.size().x + 1 ) * ( bucketWindow.size().y + 1 )
bucketData = IECore.FloatVectorData()
bucketData.resize( numPixels, i + 1 )
driver.sendBucket( bucketWindow, [ bucketData ] )
h2 = self.__tileHashes( node, "Y" )
t2 = self.__tiles( node, "Y" )
self.__assertTilesChangedInRegion( t1, t2, bucketWindow )
self.__assertTilesChangedInRegion( h1, h2, bucketWindow )
driver.close()
def testTransferChecker( self ) :
self.__testTransferImage( "$GAFFER_ROOT/python/GafferImageTest/images/checker.exr" )
def testTransferWithDataWindow( self ) :
self.__testTransferImage( "$GAFFER_ROOT/python/GafferImageTest/images/checkerWithNegativeDataWindow.200x150.exr" )
def testAccessOutsideDataWindow( self ) :
node = self.__testTransferImage( "$GAFFER_ROOT/python/GafferImageTest/images/checker.exr" )
blackTile = IECore.FloatVectorData( [ 0 ] * GafferImage.ImagePlug.tileSize() * GafferImage.ImagePlug.tileSize() )
self.assertEqual(
node["out"].channelData( "R", -imath.V2i( GafferImage.ImagePlug.tileSize() ) ),
blackTile
)
self.assertEqual(
node["out"].channelData( "R", 10 * imath.V2i( GafferImage.ImagePlug.tileSize() ) ),
blackTile
)
def testNoErrorOnBackgroundDispatch( self ) :
s = Gaffer.ScriptNode()
s["d"] = GafferImage.Display()
s["p"] = GafferDispatch.PythonCommand()
s["p"]["command"].setValue( "pass" )
s["fileName"].setValue( self.temporaryDirectory() + "test.gfr" )
s.save()
output = subprocess.check_output(
[ "gaffer", "execute", self.temporaryDirectory() + "test.gfr", "-nodes", "p" ],
stderr = subprocess.STDOUT, universal_newlines = True
)
self.assertEqual( output, "" )
def testSetDriver( self ) :
driversCreated = GafferTest.CapturingSlot( GafferImage.Display.driverCreatedSignal() )
server = IECoreImage.DisplayDriverServer()
dataWindow = imath.Box2i( imath.V2i( 0 ), imath.V2i( GafferImage.ImagePlug.tileSize() ) )
driver = self.Driver(
GafferImage.Format( dataWindow ),
dataWindow,
[ "Y" ],
port = server.portNumber()
)
try:
self.assertTrue( len( driversCreated ), 1 )
display = GafferImage.Display()
self.assertTrue( display.getDriver() is None )
dirtiedPlugs = GafferTest.CapturingSlot( display.plugDirtiedSignal() )
display.setDriver( driversCreated[0][0] )
self.assertTrue( display.getDriver().isSame( driversCreated[0][0] ) )
# Ensure all the output plugs have been dirtied
expectedDirty = { "__driverCount", "__channelDataCount", "out" }.union( { c.getName() for c in display["out"].children() } )
self.assertEqual( expectedDirty, set( e[0].getName() for e in dirtiedPlugs ) )
del dirtiedPlugs[:]
driver.sendBucket( dataWindow, [ IECore.FloatVectorData( [ 0.5 ] * dataWindow.size().x * dataWindow.size().y ) ] )
self.assertEqual( display["out"]["format"].getValue().getDisplayWindow(), dataWindow )
self.assertEqual( display["out"]["dataWindow"].getValue(), dataWindow )
self.assertEqual( display["out"]["channelNames"].getValue(), IECore.StringVectorData( [ "Y" ] ) )
self.assertEqual(
display["out"].channelData( "Y", imath.V2i( 0 ) ),
IECore.FloatVectorData( [ 0.5 ] * GafferImage.ImagePlug.tileSize() * GafferImage.ImagePlug.tileSize() )
)
# Ensure only channel data has been dirtied
expectedDirty = { "channelData", "__channelDataCount", "out" }
self.assertEqual( set( e[0].getName() for e in dirtiedPlugs ), expectedDirty )
display2 = GafferImage.Display()
display2.setDriver( display.getDriver(), copy = True )
self.assertImagesEqual( display["out"], display2["out"] )
driver.sendBucket( dataWindow, [ IECore.FloatVectorData( [ 1 ] * dataWindow.size().x * dataWindow.size().y ) ] )
self.assertEqual(
display["out"].channelData( "Y", imath.V2i( 0 ) ),
IECore.FloatVectorData( [ 1 ] * GafferImage.ImagePlug.tileSize() * GafferImage.ImagePlug.tileSize() )
)
self.assertEqual(
display2["out"].channelData( "Y", imath.V2i( 0 ) ),
IECore.FloatVectorData( [ 0.5 ] * GafferImage.ImagePlug.tileSize() * GafferImage.ImagePlug.tileSize() )
)
finally:
driver.close()
def __testTransferImage( self, fileName ) :
imageReader = GafferImage.ImageReader()
imageReader["fileName"].setValue( os.path.expandvars( fileName ) )
imagesReceived = GafferTest.CapturingSlot( GafferImage.Display.imageReceivedSignal() )
node = GafferImage.Display()
server = IECoreImage.DisplayDriverServer()
driverCreatedConnection = GafferImage.Display.driverCreatedSignal().connect( lambda driver, parameters : node.setDriver( driver ), scoped = True )
self.assertEqual( len( imagesReceived ), 0 )
self.Driver.sendImage( imageReader["out"], port = server.portNumber() )
self.assertImagesEqual( imageReader["out"], node["out"] )
self.assertEqual( len( imagesReceived ), 1 )
self.assertEqual( imagesReceived[0][0], node["out"] )
return node
def __tiles( self, node, channelName ) :
dataWindow = node["out"]["dataWindow"].getValue()
minTileOrigin = GafferImage.ImagePlug.tileOrigin( dataWindow.min() )
maxTileOrigin = GafferImage.ImagePlug.tileOrigin( dataWindow.max() )
tiles = {}
for y in range( minTileOrigin.y, maxTileOrigin.y, GafferImage.ImagePlug.tileSize() ) :
for x in range( minTileOrigin.x, maxTileOrigin.x, GafferImage.ImagePlug.tileSize() ) :
tiles[( x, y )] = node["out"].channelData( channelName, imath.V2i( x, y ) )
return tiles
def __tileHashes( self, node, channelName ) :
dataWindow = node["out"]["dataWindow"].getValue()
minTileOrigin = GafferImage.ImagePlug.tileOrigin( dataWindow.min() )
maxTileOrigin = GafferImage.ImagePlug.tileOrigin( dataWindow.max() )
hashes = {}
for y in range( minTileOrigin.y, maxTileOrigin.y, GafferImage.ImagePlug.tileSize() ) :
for x in range( minTileOrigin.x, maxTileOrigin.x, GafferImage.ImagePlug.tileSize() ) :
hashes[( x, y )] = node["out"].channelDataHash( channelName, imath.V2i( x, y ) )
return hashes
def __assertTilesChangedInRegion( self, t1, t2, region ) :
for tileOriginTuple in t1.keys() :
tileOrigin = imath.V2i( *tileOriginTuple )
tileRegion = imath.Box2i( tileOrigin, tileOrigin + imath.V2i( GafferImage.ImagePlug.tileSize() ) )
if GafferImage.BufferAlgo.intersects( tileRegion, region ) :
self.assertNotEqual( t1[tileOriginTuple], t2[tileOriginTuple] )
else :
self.assertEqual( t1[tileOriginTuple], t2[tileOriginTuple] )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "31b804b6a2a5f62d854d36fc6a70654f",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 148,
"avg_line_length": 32.89349112426036,
"alnum_prop": 0.6942795466810577,
"repo_name": "hradec/gaffer",
"id": "a735d287dde54e4f28208ce37e73f6be8969a163",
"size": "12926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/GafferImageTest/DisplayTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "54696"
},
{
"name": "C++",
"bytes": "8682649"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "9458935"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14299"
}
],
"symlink_target": ""
} |
import numpy as np
def hashTable(patch,Qangle,Qstrenth,Qcoherence):
[gx,gy] = np.gradient(patch)
G = np.matrix((gx.ravel(),gy.ravel())).T
x = G.T*G
[eigenvalues,eigenvectors] = np.linalg.eig(x)
#For angle
angle = np.math.atan2(eigenvectors[0,1],eigenvectors[0,0])
if angle<0:
angle += np.pi
#For strength
strength = eigenvalues.max()/(eigenvalues.sum()+0.0001)
#For coherence
lamda1 = np.math.sqrt(eigenvalues.max())
lamda2 = np.math.sqrt(eigenvalues.min())
coherence = np.abs((lamda1-lamda2)/(lamda1+lamda2+0.0001))
#Quantization
angle = np.floor(angle/(np.pi/Qangle)-1)
strength = np.floor(strength/(1.0/Qstrenth)-1)
coherence = np.floor(coherence/(1.0/Qcoherence)-1)
return int(angle),int(strength),int(coherence) | {
"content_hash": "b4a820ac868a7b3098b5627c512fc32e",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 62,
"avg_line_length": 30.444444444444443,
"alnum_prop": 0.6362530413625304,
"repo_name": "MKFMIKU/RAISR",
"id": "071de355d4fb834fc3ae469a0e90d2279ba1c4ae",
"size": "847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/hashTable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6036"
},
{
"name": "Shell",
"bytes": "340"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
import itertools
from django.test import TestCase
from django.db import IntegrityError
from django.db.models import Prefetch
from modelcluster.models import get_all_child_relations
from modelcluster.queryset import FakeQuerySet
from modelcluster.utils import ManyToManyTraversalError
from tests.models import Band, BandMember, Chef, Feature, Place, Restaurant, SeafoodRestaurant, \
Review, Album, Article, Author, Category, Person, Room, House, Log, Dish, MenuItem, Wine
class ClusterTest(TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.gordon_ramsay = Chef.objects.create(name="Gordon Ramsay")
cls.strawberry_fields = Restaurant.objects.create(name="Strawberry Fields", proprietor=cls.gordon_ramsay)
cls.marco_pierre_white = Chef.objects.create(name="Marco Pierre White")
cls.the_yellow_submarine = Restaurant.objects.create(name="The Yellow Submarine", proprietor=cls.marco_pierre_white)
def test_can_create_cluster(self):
beatles = Band(name='The Beatles')
self.assertEqual(0, beatles.members.count())
beatles.members = [
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
]
# we should be able to query this relation using (some) queryset methods
self.assertEqual(2, beatles.members.count())
self.assertEqual('John Lennon', beatles.members.all()[0].name)
self.assertEqual('Paul McCartney', beatles.members.filter(name='Paul McCartney')[0].name)
self.assertEqual('Paul McCartney', beatles.members.filter(name__exact='Paul McCartney')[0].name)
self.assertEqual('Paul McCartney', beatles.members.filter(name__iexact='paul mccartNEY')[0].name)
self.assertEqual(0, beatles.members.filter(name__lt='B').count())
self.assertEqual(1, beatles.members.filter(name__lt='M').count())
self.assertEqual('John Lennon', beatles.members.filter(name__lt='M')[0].name)
self.assertEqual(1, beatles.members.filter(name__lt='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.filter(name__lt='Paul McCartney')[0].name)
self.assertEqual(2, beatles.members.filter(name__lt='Z').count())
self.assertEqual(0, beatles.members.filter(name__lte='B').count())
self.assertEqual(1, beatles.members.filter(name__lte='M').count())
self.assertEqual('John Lennon', beatles.members.filter(name__lte='M')[0].name)
self.assertEqual(2, beatles.members.filter(name__lte='Paul McCartney').count())
self.assertEqual(2, beatles.members.filter(name__lte='Z').count())
self.assertEqual(2, beatles.members.filter(name__gt='B').count())
self.assertEqual(1, beatles.members.filter(name__gt='M').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__gt='M')[0].name)
self.assertEqual(0, beatles.members.filter(name__gt='Paul McCartney').count())
self.assertEqual(2, beatles.members.filter(name__gte='B').count())
self.assertEqual(1, beatles.members.filter(name__gte='M').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__gte='M')[0].name)
self.assertEqual(1, beatles.members.filter(name__gte='Paul McCartney').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__gte='Paul McCartney')[0].name)
self.assertEqual(0, beatles.members.filter(name__gte='Z').count())
self.assertEqual(1, beatles.members.filter(name__contains='Cart').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__contains='Cart')[0].name)
self.assertEqual(1, beatles.members.filter(name__icontains='carT').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__icontains='carT')[0].name)
self.assertEqual(1, beatles.members.filter(name__in=['Paul McCartney', 'Linda McCartney']).count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__in=['Paul McCartney', 'Linda McCartney'])[0].name)
self.assertEqual(1, beatles.members.filter(name__startswith='Paul').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__startswith='Paul')[0].name)
self.assertEqual(1, beatles.members.filter(name__istartswith='pauL').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__istartswith='pauL')[0].name)
self.assertEqual(1, beatles.members.filter(name__endswith='ney').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__endswith='ney')[0].name)
self.assertEqual(1, beatles.members.filter(name__iendswith='Ney').count())
self.assertEqual('Paul McCartney', beatles.members.filter(name__iendswith='Ney')[0].name)
self.assertEqual('Paul McCartney', beatles.members.get(name='Paul McCartney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__exact='Paul McCartney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__iexact='paul mccartNEY').name)
self.assertEqual('John Lennon', beatles.members.get(name__lt='Paul McCartney').name)
self.assertEqual('John Lennon', beatles.members.get(name__lte='M').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__gt='M').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__gte='Paul McCartney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__contains='Cart').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__icontains='carT').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__in=['Paul McCartney', 'Linda McCartney']).name)
self.assertEqual('Paul McCartney', beatles.members.get(name__startswith='Paul').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__istartswith='pauL').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__endswith='ney').name)
self.assertEqual('Paul McCartney', beatles.members.get(name__iendswith='Ney').name)
self.assertEqual('John Lennon', beatles.members.get(name__regex=r'n{2}').name)
self.assertEqual('John Lennon', beatles.members.get(name__iregex=r'N{2}').name)
self.assertRaises(BandMember.DoesNotExist, lambda: beatles.members.get(name='Reginald Dwight'))
self.assertRaises(BandMember.MultipleObjectsReturned, lambda: beatles.members.get())
self.assertTrue(beatles.members.filter(name='Paul McCartney').exists())
self.assertFalse(beatles.members.filter(name='Reginald Dwight').exists())
self.assertEqual('John Lennon', beatles.members.first().name)
self.assertEqual('Paul McCartney', beatles.members.last().name)
self.assertTrue('John Lennon', beatles.members.order_by('name').first())
self.assertTrue('Paul McCartney', beatles.members.order_by('-name').first())
# these should not exist in the database yet
self.assertFalse(Band.objects.filter(name='The Beatles').exists())
self.assertFalse(BandMember.objects.filter(name='John Lennon').exists())
beatles.save()
# this should create database entries
self.assertTrue(Band.objects.filter(name='The Beatles').exists())
self.assertTrue(BandMember.objects.filter(name='John Lennon').exists())
john_lennon = BandMember.objects.get(name='John Lennon')
beatles.members = [john_lennon]
# reassigning should take effect on the in-memory record
self.assertEqual(1, beatles.members.count())
# but not the database
self.assertEqual(2, Band.objects.get(name='The Beatles').members.count())
beatles.save()
# now updated in the database
self.assertEqual(1, Band.objects.get(name='The Beatles').members.count())
self.assertEqual(1, BandMember.objects.filter(name='John Lennon').count())
# removed member should be deleted from the db entirely
self.assertEqual(0, BandMember.objects.filter(name='Paul McCartney').count())
# queries on beatles.members should now revert to SQL
self.assertTrue(beatles.members.extra(where=["tests_bandmember.name='John Lennon'"]).exists())
def test_values_list(self):
beatles = Band(
name="The Beatles",
members=[
BandMember(name="John Lennon", favourite_restaurant=self.strawberry_fields),
BandMember(name="Paul McCartney", favourite_restaurant=self.the_yellow_submarine),
BandMember(name="George Harrison"),
BandMember(name="Ringo Starr"),
],
)
# Not specifying 'fields' should return a tuple of all field values
self.assertEqual(
[
# ID, band_id, name, favourite_restaurant_id
(None, None, 'Paul McCartney', self.the_yellow_submarine.id)
],
list(beatles.members.filter(name='Paul McCartney').values_list())
)
NAME_ONLY_TUPLE = ('Paul McCartney',)
# Specifying 'fields' should return a tuple of just those field values
self.assertEqual([NAME_ONLY_TUPLE], list(beatles.members.filter(name='Paul McCartney').values_list('name')))
# 'fields' can span relationships using '__'
members = beatles.members.all().values_list('name', 'favourite_restaurant__proprietor__name')
self.assertEqual(
list(members),
[
("John Lennon", "Gordon Ramsay"),
("Paul McCartney", "Marco Pierre White"),
("George Harrison", None),
("Ringo Starr", None),
]
)
# Ordering on the related fields will work too, and items with `None`` values will appear first
self.assertEqual(
list(members.order_by('favourite_restaurant__proprietor__name')),
[
("George Harrison", None),
("Ringo Starr", None),
("John Lennon", "Gordon Ramsay"),
("Paul McCartney", "Marco Pierre White"),
]
)
# get() should return a tuple if used after values_list()
self.assertEqual(NAME_ONLY_TUPLE, beatles.members.filter(name='Paul McCartney').values_list('name').get())
# first() should return a tuple if used after values_list()
self.assertEqual(NAME_ONLY_TUPLE, beatles.members.filter(name='Paul McCartney').values_list('name').first())
# last() should return a tuple if used after values_list()
self.assertEqual(NAME_ONLY_TUPLE, beatles.members.filter(name='Paul McCartney').values_list('name').last())
# And the 'flat' argument should work as it does in Django
self.assertEqual(['Paul McCartney'], list(beatles.members.filter(name='Paul McCartney').values_list('name', flat=True)))
# Filtering or ordering after using values_list() should not raise an error
beatles.members.values_list("name").filter(name__contains="n").order_by("name")
def test_values(self):
beatles = Band(
name="The Beatles",
members=[
BandMember(name="John Lennon", favourite_restaurant=self.strawberry_fields),
BandMember(name="Paul McCartney", favourite_restaurant=self.the_yellow_submarine),
BandMember(name="George Harrison"),
BandMember(name="Ringo Starr"),
],
)
# Not specifying 'fields' should return dictionaries with all field values
self.assertEqual(
[
{"id": None, "band": None, "name": "Paul McCartney", "favourite_restaurant": self.the_yellow_submarine.id}
],
list(beatles.members.filter(name='Paul McCartney').values())
)
NAME_ONLY_DICT = {"name": "Paul McCartney"}
# Specifying 'fields' should return a dictionary of just those field values
self.assertEqual([NAME_ONLY_DICT], list(beatles.members.filter(name='Paul McCartney').values('name')))
# 'fields' can span relationships using '__'
members = beatles.members.all().values('name', 'favourite_restaurant__proprietor__name')
self.assertEqual(
list(members),
[
{"name": "John Lennon", "favourite_restaurant__proprietor__name": "Gordon Ramsay"},
{"name": "Paul McCartney", "favourite_restaurant__proprietor__name": "Marco Pierre White"},
{"name": "George Harrison", "favourite_restaurant__proprietor__name": None},
{"name": "Ringo Starr", "favourite_restaurant__proprietor__name": None},
]
)
# Ordering on the related fields will work too, and items with `None`` values will appear first
self.assertEqual(
list(members.order_by('favourite_restaurant__proprietor__name')),
[
{"name": "George Harrison", "favourite_restaurant__proprietor__name": None},
{"name": "Ringo Starr", "favourite_restaurant__proprietor__name": None},
{"name": "John Lennon", "favourite_restaurant__proprietor__name": "Gordon Ramsay"},
{"name": "Paul McCartney", "favourite_restaurant__proprietor__name": "Marco Pierre White"},
]
)
# get() should return a dict if used after values()
self.assertEqual(NAME_ONLY_DICT, beatles.members.filter(name='Paul McCartney').values('name').get())
# first() should return a dict if used after values_list()
self.assertEqual(NAME_ONLY_DICT, beatles.members.filter(name='Paul McCartney').values('name').first())
# last() should return a dict if used after values_list()
self.assertEqual(NAME_ONLY_DICT, beatles.members.filter(name='Paul McCartney').values('name').last())
# Filtering or ordering after using values() should not raise an error
beatles.members.values("name").filter(name__contains="n").order_by("name")
def test_related_manager_assignment_ops(self):
beatles = Band(name='The Beatles')
john = BandMember(name='John Lennon')
paul = BandMember(name='Paul McCartney')
beatles.members.add(john)
self.assertEqual(1, beatles.members.count())
beatles.members.add(paul)
self.assertEqual(2, beatles.members.count())
# ensure that duplicates are filtered
beatles.members.add(paul)
self.assertEqual(2, beatles.members.count())
beatles.members.remove(john)
self.assertEqual(1, beatles.members.count())
self.assertEqual(paul, beatles.members.all()[0])
george = beatles.members.create(name='George Harrison')
self.assertEqual(2, beatles.members.count())
self.assertEqual('George Harrison', george.name)
beatles.members.set([john])
self.assertEqual(1, beatles.members.count())
self.assertEqual(john, beatles.members.all()[0])
def test_can_pass_child_relations_as_constructor_kwargs(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
self.assertEqual(2, beatles.members.count())
self.assertEqual(beatles, beatles.members.all()[0].band)
def test_can_access_child_relations_of_superclass(self):
fat_duck = Restaurant(name='The Fat Duck', serves_hot_dogs=False, reviews=[
Review(author='Michael Winner', body='Rubbish.')
])
self.assertEqual(1, fat_duck.reviews.count())
self.assertEqual(fat_duck.reviews.first().author, 'Michael Winner')
self.assertEqual(fat_duck, fat_duck.reviews.all()[0].place)
fat_duck.save()
# ensure relations have been saved to the database
fat_duck = Restaurant.objects.get(id=fat_duck.id)
self.assertEqual(1, fat_duck.reviews.count())
self.assertEqual(fat_duck.reviews.first().author, 'Michael Winner')
def test_can_only_commit_on_saved_parent(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
self.assertRaises(IntegrityError, lambda: beatles.members.commit())
beatles.save()
beatles.members.commit()
def test_integrity_error_with_none_pk(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
beatles.save()
beatles.pk = None
self.assertRaises(IntegrityError, lambda: beatles.members.commit())
# this should work fine, as Django will end up cloning this entity
beatles.save()
self.assertEqual(Band.objects.get(pk=beatles.pk).name, 'The Beatles')
def test_model_with_zero_pk(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
])
beatles.save()
beatles.pk = 0
beatles.members.commit()
beatles.save()
self.assertEqual(Band.objects.get(pk=0).name, 'The Beatles')
def test_save_with_update_fields(self):
beatles = Band(name='The Beatles', members=[
BandMember(name='John Lennon'),
BandMember(name='Paul McCartney'),
], albums=[
Album(name='Please Please Me', sort_order=1),
Album(name='With The Beatles', sort_order=2),
Album(name='Abbey Road', sort_order=3),
])
beatles.save()
# modify both relations, but only commit the change to members
beatles.members.clear()
beatles.albums.clear()
beatles.name = 'The Rutles'
beatles.save(update_fields=['name', 'members'])
updated_beatles = Band.objects.get(pk=beatles.pk)
self.assertEqual(updated_beatles.name, 'The Rutles')
self.assertEqual(updated_beatles.members.count(), 0)
self.assertEqual(updated_beatles.albums.count(), 3)
def test_queryset_filtering(self):
beatles = Band(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
self.assertEqual('Paul McCartney', beatles.members.get(id=2).name)
self.assertEqual('Paul McCartney', beatles.members.get(id='2').name)
self.assertEqual(1, beatles.members.filter(name='Paul McCartney').count())
# also need to be able to filter on foreign fields that return a model instance
# rather than a simple python value
self.assertEqual(2, beatles.members.filter(band=beatles).count())
# and ensure that the comparison is not treating all unsaved instances as identical
rutles = Band(name='The Rutles')
self.assertEqual(0, beatles.members.filter(band=rutles).count())
# and the comparison must be on the model instance's ID where available,
# not by reference
beatles.save()
beatles.members.add(BandMember(id=3, name='George Harrison')) # modify the relation so that we're not to a plain database-backed queryset
also_beatles = Band.objects.get(id=beatles.id)
self.assertEqual(3, beatles.members.filter(band=also_beatles).count())
def test_queryset_filtering_on_models_with_inheritance(self):
john = BandMember(name='John Lennon', favourite_restaurant=self.strawberry_fields)
ringo = BandMember(name='Ringo Starr', favourite_restaurant=Restaurant.objects.get(name='The Yellow Submarine'))
beatles = Band(name='The Beatles', members=[john, ringo])
# queried instance is less specific
self.assertEqual(
list(beatles.members.filter(favourite_restaurant=Place.objects.get(name='Strawberry Fields'))),
[john]
)
# queried instance is more specific
self.assertEqual(
list(beatles.members.filter(favourite_restaurant=self.the_yellow_submarine)),
[ringo]
)
def test_queryset_exclude_filtering(self):
beatles = Band(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
self.assertEqual(1, beatles.members.exclude(name='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__exact='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__exact='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__iexact='paul mccartNEY').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__iexact='paul mccartNEY').first().name)
self.assertEqual(1, beatles.members.exclude(name__lt='M').count())
self.assertEqual('Paul McCartney', beatles.members.exclude(name__lt='M').first().name)
self.assertEqual(1, beatles.members.exclude(name__lt='Paul McCartney').count())
self.assertEqual('Paul McCartney', beatles.members.exclude(name__lt='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__lte='John Lennon').count())
self.assertEqual('Paul McCartney', beatles.members.exclude(name__lte='John Lennon').first().name)
self.assertEqual(1, beatles.members.exclude(name__gt='M').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__gt='M').first().name)
self.assertEqual(1, beatles.members.exclude(name__gte='Paul McCartney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__gte='Paul McCartney').first().name)
self.assertEqual(1, beatles.members.exclude(name__contains='Cart').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__contains='Cart').first().name)
self.assertEqual(1, beatles.members.exclude(name__icontains='carT').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__icontains='carT').first().name)
self.assertEqual(1, beatles.members.exclude(name__in=['Paul McCartney', 'Linda McCartney']).count())
self.assertEqual('John Lennon', beatles.members.exclude(name__in=['Paul McCartney', 'Linda McCartney'])[0].name)
self.assertEqual(1, beatles.members.exclude(name__startswith='Paul').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__startswith='Paul').first().name)
self.assertEqual(1, beatles.members.exclude(name__istartswith='pauL').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__istartswith='pauL').first().name)
self.assertEqual(1, beatles.members.exclude(name__endswith='ney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__endswith='ney').first().name)
self.assertEqual(1, beatles.members.exclude(name__iendswith='Ney').count())
self.assertEqual('John Lennon', beatles.members.exclude(name__iendswith='Ney').first().name)
def test_queryset_filter_with_nulls(self):
tmbg = Band(name="They Might Be Giants", albums=[
Album(name="Flood", release_date=datetime.date(1990, 1, 1)),
Album(name="John Henry", release_date=datetime.date(1994, 7, 21)),
Album(name="Factory Showroom", release_date=datetime.date(1996, 3, 30)),
Album(name="", release_date=None),
Album(name=None, release_date=None),
])
self.assertEqual(tmbg.albums.get(name="Flood").name, "Flood")
self.assertEqual(tmbg.albums.get(name="").name, "")
self.assertEqual(tmbg.albums.get(name=None).name, None)
self.assertEqual(tmbg.albums.get(name__exact="Flood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__exact="").name, "")
self.assertEqual(tmbg.albums.get(name__exact=None).name, None)
self.assertEqual(tmbg.albums.get(name__iexact="flood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__iexact="").name, "")
self.assertEqual(tmbg.albums.get(name__iexact=None).name, None)
self.assertEqual(tmbg.albums.get(name__contains="loo").name, "Flood")
self.assertEqual(tmbg.albums.get(name__icontains="LOO").name, "Flood")
self.assertEqual(tmbg.albums.get(name__startswith="Flo").name, "Flood")
self.assertEqual(tmbg.albums.get(name__istartswith="flO").name, "Flood")
self.assertEqual(tmbg.albums.get(name__endswith="ood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__iendswith="Ood").name, "Flood")
self.assertEqual(tmbg.albums.get(name__lt="A").name, "")
self.assertEqual(tmbg.albums.get(name__lte="A").name, "")
self.assertEqual(tmbg.albums.get(name__gt="J").name, "John Henry")
self.assertEqual(tmbg.albums.get(name__gte="J").name, "John Henry")
self.assertEqual(tmbg.albums.get(name__in=["Flood", "Mink Car"]).name, "Flood")
self.assertEqual(tmbg.albums.get(name__in=["", "Mink Car"]).name, "")
self.assertEqual(tmbg.albums.get(name__in=[None, "Mink Car"]).name, None)
self.assertEqual(tmbg.albums.filter(name__isnull=True).count(), 1)
self.assertEqual(tmbg.albums.filter(name__isnull=False).count(), 4)
self.assertEqual(tmbg.albums.get(name__regex=r'l..d').name, "Flood")
self.assertEqual(tmbg.albums.get(name__iregex=r'f..o').name, "Flood")
def test_date_filters(self):
tmbg = Band(name="They Might Be Giants", albums=[
Album(name="Flood", release_date=datetime.date(1990, 1, 1)),
Album(name="John Henry", release_date=datetime.date(1994, 7, 21)),
Album(name="Factory Showroom", release_date=datetime.date(1996, 3, 30)),
Album(name="The Complete Dial-A-Song", release_date=None),
])
logs = FakeQuerySet(Log, [
Log(time=datetime.datetime(1979, 7, 1, 1, 1, 1), data="nobody died"),
Log(time=datetime.datetime(1980, 2, 2, 2, 2, 2), data="one person died"),
Log(time=None, data="nothing happened")
])
self.assertEqual(
tmbg.albums.get(release_date__range=(datetime.date(1994, 1, 1), datetime.date(1994, 12, 31))).name,
"John Henry"
)
self.assertEqual(
logs.get(time__range=(datetime.datetime(1980, 1, 1, 1, 1, 1), datetime.datetime(1980, 12, 31, 23, 59, 59))).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__date=datetime.date(1994, 7, 21)).name,
"John Henry"
)
self.assertEqual(
logs.get(time__date=datetime.date(1980, 2, 2)).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__year='1994').name,
"John Henry"
)
self.assertEqual(
logs.get(time__year=1980).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__month=7).name,
"John Henry"
)
self.assertEqual(
logs.get(time__month='2').data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__day='21').name,
"John Henry"
)
self.assertEqual(
logs.get(time__day=2).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__week=29).name,
"John Henry"
)
self.assertEqual(
logs.get(time__week='5').data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__week_day=5).name,
"John Henry"
)
self.assertEqual(
logs.get(time__week_day=7).data,
"one person died"
)
self.assertEqual(
tmbg.albums.get(release_date__quarter=3).name,
"John Henry"
)
self.assertEqual(
logs.get(time__quarter=1).data,
"one person died"
)
self.assertEqual(
logs.get(time__time=datetime.time(2, 2, 2)).data,
"one person died"
)
self.assertEqual(
logs.get(time__hour=2).data,
"one person died"
)
self.assertEqual(
logs.get(time__minute='2').data,
"one person died"
)
self.assertEqual(
logs.get(time__second=2).data,
"one person died"
)
def test_queryset_filtering_accross_foreignkeys(self):
band = Band(
name="The Beatles",
members=[
BandMember(name="John Lennon", favourite_restaurant=self.strawberry_fields),
BandMember(name="Ringo Starr", favourite_restaurant=self.the_yellow_submarine)
],
)
# Filter over a single relationship
# ---------------------------------------
# Using the default/exact lookup type
self.assertEqual(
tuple(band.members.filter(favourite_restaurant__name="Strawberry Fields")),
(band.members.get(name="John Lennon"),)
)
self.assertEqual(
tuple(band.members.filter(favourite_restaurant__name="The Yellow Submarine")),
(band.members.get(name="Ringo Starr"),)
)
# Using an alternative lookup type
self.assertEqual(
tuple(band.members.filter(favourite_restaurant__name__icontains="straw")),
(band.members.get(name="John Lennon"),)
)
self.assertEqual(
tuple(band.members.filter(favourite_restaurant__name__icontains="yello")),
(band.members.get(name="Ringo Starr"),)
)
# Filtering over 2 relationships
# ---------------------------------------
# Using a default/exact field lookup
self.assertEqual(
tuple(band.members.filter(favourite_restaurant__proprietor__name="Gordon Ramsay")),
(band.members.get(name="John Lennon"),)
)
self.assertEqual(
tuple(band.members.filter(favourite_restaurant__proprietor__name="Marco Pierre White")),
(band.members.get(name="Ringo Starr"),)
)
# Using an alternative lookup type
self.assertEqual(
tuple(band.members.filter(favourite_restaurant__proprietor__name__iexact="gORDON rAMSAY")),
(band.members.get(name="John Lennon"),)
)
self.assertEqual(
tuple(band.members.filter(favourite_restaurant__proprietor__name__iexact="mARCO pIERRE wHITE")),
(band.members.get(name="Ringo Starr"),)
)
# Using an exact proprietor comparisson
self.assertEqual(
tuple(band.members.filter(favourite_restaurant__proprietor=self.gordon_ramsay)),
(band.members.get(name="John Lennon"),)
)
self.assertEqual(
tuple(band.members.filter(favourite_restaurant__proprietor=self.marco_pierre_white)),
(band.members.get(name="Ringo Starr"),)
)
def test_filtering_via_reverse_foreignkey(self):
band = Band(
name="The Beatles",
members=[
BandMember(name="John Lennon"),
BandMember(name="Ringo Starr"),
],
)
self.assertEqual(
tuple(band.members.filter(band__name="The Beatles")),
tuple(band.members.all())
)
self.assertEqual(
tuple(band.members.filter(band__name="The Monkeys")),
()
)
def test_ordering_accross_foreignkeys(self):
band = Band(
name="The Beatles",
members=[
BandMember(name="John Lennon", favourite_restaurant=self.strawberry_fields),
BandMember(name="Ringo Starr", favourite_restaurant=self.the_yellow_submarine),
],
)
# Ordering accross a single relationship
# ---------------------------------------
self.assertEqual(
tuple(band.members.order_by("favourite_restaurant__name")),
(
band.members.get(name="John Lennon"),
band.members.get(name="Ringo Starr"),
)
)
# How about ordering in reverse?
self.assertEqual(
tuple(band.members.order_by("-favourite_restaurant__name")),
(
band.members.get(name="Ringo Starr"),
band.members.get(name="John Lennon"),
)
)
# Ordering accross 2 relationships
# --------------------------------
self.assertEqual(
tuple(band.members.order_by("favourite_restaurant__proprietor__name")),
(
band.members.get(name="John Lennon"),
band.members.get(name="Ringo Starr"),
)
)
# How about ordering in reverse?
self.assertEqual(
tuple(band.members.order_by("-favourite_restaurant__proprietor__name")),
(
band.members.get(name="Ringo Starr"),
band.members.get(name="John Lennon"),
)
)
def test_filtering_via_manytomany_raises_exception(self):
bay_window = Feature.objects.create(name="Bay window", desirability=6)
underfloor_heating = Feature.objects.create(name="Underfloor heading", desirability=10)
open_fire = Feature.objects.create(name="Open fire", desirability=3)
log_burner = Feature.objects.create(name="Log burner", desirability=10)
modern_living_room = Room.objects.create(name="Modern living room", features=[bay_window, underfloor_heating, log_burner])
classic_living_room = Room.objects.create(name="Classic living room", features=[bay_window, open_fire])
modern_house = House.objects.create(name="Modern house", address="1 Yellow Brick Road", main_room=modern_living_room)
classic_house = House.objects.create(name="Classic house", address="3 Yellow Brick Road", main_room=classic_living_room)
tenant = Person(
name="Alex", houses=[modern_house, classic_house]
)
with self.assertRaises(ManyToManyTraversalError):
tenant.houses.filter(main_room__features__name="Bay window")
def test_prefetch_related(self):
Band.objects.create(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
with self.assertNumQueries(2):
lists = [list(band.members.all()) for band in Band.objects.prefetch_related('members')]
normal_lists = [list(band.members.all()) for band in Band.objects.all()]
self.assertEqual(lists, normal_lists)
def test_prefetch_related_with_custom_queryset(self):
from django.db.models import Prefetch
Band.objects.create(name='The Beatles', members=[
BandMember(id=1, name='John Lennon'),
BandMember(id=2, name='Paul McCartney'),
])
with self.assertNumQueries(2):
lists = [
list(band.members.all())
for band in Band.objects.prefetch_related(
Prefetch('members', queryset=BandMember.objects.filter(name__startswith='Paul'))
)
]
normal_lists = [list(band.members.filter(name__startswith='Paul')) for band in Band.objects.all()]
self.assertEqual(lists, normal_lists)
def test_order_by_with_multiple_fields(self):
beatles = Band(name='The Beatles', albums=[
Album(name='Please Please Me', sort_order=2),
Album(name='With The Beatles', sort_order=1),
Album(name='Abbey Road', sort_order=2),
])
albums = [album.name for album in beatles.albums.order_by('sort_order', 'name')]
self.assertEqual(['With The Beatles', 'Abbey Road', 'Please Please Me'], albums)
albums = [album.name for album in beatles.albums.order_by('sort_order', '-name')]
self.assertEqual(['With The Beatles', 'Please Please Me', 'Abbey Road'], albums)
def test_meta_ordering(self):
beatles = Band(name='The Beatles', albums=[
Album(name='Please Please Me', sort_order=2),
Album(name='With The Beatles', sort_order=1),
Album(name='Abbey Road', sort_order=3),
])
# in the absence of an explicit order_by clause, it should use the ordering as defined
# in Album.Meta, which is 'sort_order'
albums = [album.name for album in beatles.albums.all()]
self.assertEqual(['With The Beatles', 'Please Please Me', 'Abbey Road'], albums)
def test_parental_key_checks_clusterable_model(self):
from django.core import checks
from django.db import models
from modelcluster.fields import ParentalKey
class Instrument(models.Model):
# Oops, BandMember is not a Clusterable model
member = ParentalKey(BandMember, on_delete=models.CASCADE)
class Meta:
# Prevent Django from thinking this is in the database
# This shouldn't affect the test
abstract = True
# Check for error
errors = Instrument.check()
self.assertEqual(1, len(errors))
# Check the error itself
error = errors[0]
self.assertIsInstance(error, checks.Error)
self.assertEqual(error.id, 'modelcluster.E001')
self.assertEqual(error.obj, Instrument.member.field)
self.assertEqual(error.msg, 'ParentalKey must point to a subclass of ClusterableModel.')
self.assertEqual(error.hint, 'Change tests.BandMember into a ClusterableModel or use a ForeignKey instead.')
def test_parental_key_checks_related_name_is_not_plus(self):
from django.core import checks
from django.db import models
from modelcluster.fields import ParentalKey
class Instrument(models.Model):
# Oops, related_name='+' is not allowed
band = ParentalKey(Band, related_name='+', on_delete=models.CASCADE)
class Meta:
# Prevent Django from thinking this is in the database
# This shouldn't affect the test
abstract = True
# Check for error
errors = Instrument.check()
self.assertEqual(1, len(errors))
# Check the error itself
error = errors[0]
self.assertIsInstance(error, checks.Error)
self.assertEqual(error.id, 'modelcluster.E002')
self.assertEqual(error.obj, Instrument.band.field)
self.assertEqual(error.msg, "related_name='+' is not allowed on ParentalKey fields")
self.assertEqual(error.hint, "Either change it to a valid name or remove it")
def test_parental_key_checks_target_is_resolved_as_class(self):
from django.core import checks
from django.db import models
from modelcluster.fields import ParentalKey
class Instrument(models.Model):
banana = ParentalKey('Banana', on_delete=models.CASCADE)
class Meta:
# Prevent Django from thinking this is in the database
# This shouldn't affect the test
abstract = True
# Check for error
errors = Instrument.check()
self.assertEqual(1, len(errors))
# Check the error itself
error = errors[0]
self.assertIsInstance(error, checks.Error)
self.assertEqual(error.id, 'fields.E300')
self.assertEqual(error.obj, Instrument.banana.field)
self.assertEqual(error.msg, "Field defines a relation with model 'Banana', which is either not installed, or is abstract.")
class GetAllChildRelationsTest(TestCase):
def test_get_all_child_relations(self):
self.assertEqual(
set([rel.name for rel in get_all_child_relations(Restaurant)]),
set(['tagged_items', 'reviews', 'menu_items'])
)
class ParentalM2MTest(TestCase):
def setUp(self):
self.article = Article(title="Test Title")
self.author_1 = Author.objects.create(name="Author 1")
self.author_2 = Author.objects.create(name="Author 2")
self.article.authors = [self.author_1, self.author_2]
self.category_1 = Category.objects.create(name="Category 1")
self.category_2 = Category.objects.create(name="Category 2")
self.article.categories = [self.category_1, self.category_2]
def test_uninitialised_m2m_relation(self):
# Reading an m2m relation of a newly created object should return an empty queryset
new_article = Article(title="Test title")
self.assertEqual([], list(new_article.authors.all()))
self.assertEqual(new_article.authors.count(), 0)
# the manager should have a 'model' property pointing to the target model
self.assertEqual(Author, new_article.authors.model)
def test_parentalm2mfield(self):
# Article should not exist in the database yet
self.assertFalse(Article.objects.filter(title='Test Title').exists())
# Test lookup on parental M2M relation
self.assertEqual(
['Author 1', 'Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 2)
# the manager should have a 'model' property pointing to the target model
self.assertEqual(Author, self.article.authors.model)
# Test adding to the relation
author_3 = Author.objects.create(name="Author 3")
self.article.authors.add(author_3)
self.assertEqual(
['Author 1', 'Author 2', 'Author 3'],
[author.name for author in self.article.authors.all().order_by('name')]
)
self.assertEqual(self.article.authors.count(), 3)
# Test removing from the relation
self.article.authors.remove(author_3)
self.assertEqual(
['Author 1', 'Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 2)
# Test clearing the relation
self.article.authors.clear()
self.assertEqual(
[],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 0)
# Test the 'set' operation
self.article.authors.set([self.author_2])
self.assertEqual(self.article.authors.count(), 1)
self.assertEqual(
['Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
# Test saving to / restoring from DB
self.article.authors = [self.author_1, self.author_2]
self.article.save()
self.article = Article.objects.get(title="Test Title")
self.assertEqual(
['Author 1', 'Author 2'],
[author.name for author in self.article.authors.order_by('name')]
)
self.assertEqual(self.article.authors.count(), 2)
def test_constructor(self):
# Test passing values for M2M relations as kwargs to the constructor
article2 = Article(
title="Test article 2",
authors=[self.author_1],
categories=[self.category_2],
)
self.assertEqual(
['Author 1'],
[author.name for author in article2.authors.order_by('name')]
)
self.assertEqual(article2.authors.count(), 1)
def test_ordering(self):
# our fake querysets should respect the ordering defined on the target model
bela_bartok = Author.objects.create(name='Bela Bartok')
graham_greene = Author.objects.create(name='Graham Greene')
janis_joplin = Author.objects.create(name='Janis Joplin')
simon_sharma = Author.objects.create(name='Simon Sharma')
william_wordsworth = Author.objects.create(name='William Wordsworth')
article3 = Article(title="Test article 3")
article3.authors = [
janis_joplin, william_wordsworth, bela_bartok, simon_sharma, graham_greene
]
self.assertEqual(
list(article3.authors.all()),
[bela_bartok, graham_greene, janis_joplin, simon_sharma, william_wordsworth]
)
def test_save_m2m_with_update_fields(self):
self.article.save()
# modify both relations, but only commit the change to authors
self.article.authors.clear()
self.article.categories.clear()
self.article.title = 'Updated title'
self.article.save(update_fields=['title', 'authors'])
self.updated_article = Article.objects.get(pk=self.article.pk)
self.assertEqual(self.updated_article.title, 'Updated title')
self.assertEqual(self.updated_article.authors.count(), 0)
self.assertEqual(self.updated_article.categories.count(), 2)
def test_reverse_m2m_field(self):
# article is unsaved, so should not be returned by the reverse relation on author
self.assertEqual(self.author_1.articles_by_author.count(), 0)
self.article.save()
# should now be able to look up on the reverse relation
self.assertEqual(self.author_1.articles_by_author.count(), 1)
self.assertEqual(self.author_1.articles_by_author.get(), self.article)
article_2 = Article(title="Test Title 2")
article_2.authors = [self.author_1]
article_2.save()
self.assertEqual(self.author_1.articles_by_author.all().count(), 2)
self.assertEqual(
list(self.author_1.articles_by_author.order_by('title').values_list('title', flat=True)),
['Test Title', 'Test Title 2']
)
def test_value_from_object(self):
authors_field = Article._meta.get_field('authors')
self.assertEqual(
set(authors_field.value_from_object(self.article)),
set([self.author_1, self.author_2])
)
self.article.save()
self.assertEqual(
set(authors_field.value_from_object(self.article)),
set([self.author_1, self.author_2])
)
class ParentalManyToManyPrefetchTests(TestCase):
def setUp(self):
# Create 10 articles with 10 authors each.
authors = Author.objects.bulk_create(
Author(id=i, name=str(i)) for i in range(10)
)
authors = Author.objects.all()
for i in range(10):
article = Article(title=str(i))
article.authors = authors
article.save()
def get_author_names(self, articles):
return [
author.name
for article in articles
for author in article.authors.all()
]
def test_prefetch_related(self):
with self.assertNumQueries(11):
names = self.get_author_names(Article.objects.all())
with self.assertNumQueries(2):
prefetched_names = self.get_author_names(
Article.objects.prefetch_related('authors')
)
self.assertEqual(names, prefetched_names)
def test_prefetch_related_with_custom_queryset(self):
from django.db.models import Prefetch
with self.assertNumQueries(2):
names = self.get_author_names(
Article.objects.prefetch_related(
Prefetch('authors', queryset=Author.objects.filter(name__lt='5'))
)
)
self.assertEqual(len(names), 50)
def test_prefetch_from_fake_queryset(self):
article = Article(title='Article with related articles')
article.related_articles = list(Article.objects.all())
with self.assertNumQueries(10):
names = self.get_author_names(article.related_articles.all())
with self.assertNumQueries(1):
prefetched_names = self.get_author_names(
article.related_articles.prefetch_related('authors')
)
self.assertEqual(names, prefetched_names)
class PrefetchRelatedTest(TestCase):
def test_fakequeryset_prefetch_related(self):
person1 = Person.objects.create(name='Joe')
person2 = Person.objects.create(name='Mary')
# Set main_room for each house before creating the next one for
# databases where supports_nullable_unique_constraints is False.
house1 = House.objects.create(name='House 1', address='123 Main St', owner=person1)
room1_1 = Room.objects.create(name='Dining room')
room1_2 = Room.objects.create(name='Lounge')
room1_3 = Room.objects.create(name='Kitchen')
house1.main_room = room1_1
house1.save()
house2 = House(name='House 2', address='45 Side St', owner=person1)
room2_1 = Room.objects.create(name='Eating room')
room2_2 = Room.objects.create(name='TV Room')
room2_3 = Room.objects.create(name='Bathroom')
house2.main_room = room2_1
person1.houses = itertools.chain(House.objects.all(), [house2])
houses = person1.houses.all()
with self.assertNumQueries(1):
qs = person1.houses.prefetch_related('main_room')
with self.assertNumQueries(0):
main_rooms = [ house.main_room for house in person1.houses.all() ]
self.assertEqual(len(main_rooms), 2)
def test_prefetch_related_with_lookup(self):
restaurant1 = Restaurant.objects.create(name='The Jolly Beaver')
restaurant2 = Restaurant.objects.create(name='The Prancing Rhino')
dish1 = Dish.objects.create(name='Goodies')
dish2 = Dish.objects.create(name='Baddies')
wine1 = Wine.objects.create(name='Chateau1')
wine2 = Wine.objects.create(name='Chateau2')
menu_item1 = MenuItem.objects.create(restaurant=restaurant1, dish=dish1, recommended_wine=wine1, price=1)
menu_item2 = MenuItem.objects.create(restaurant=restaurant2, dish=dish2, recommended_wine=wine2, price=10)
query = Restaurant.objects.all().prefetch_related(
Prefetch('menu_items', queryset=MenuItem.objects.only('price', 'recommended_wine').select_related('recommended_wine'))
)
res = list(query)
self.assertEqual(query[0].menu_items.all()[0], menu_item1)
self.assertEqual(query[1].menu_items.all()[0], menu_item2)
| {
"content_hash": "b4932ed190626b7f687df36dcd4d65af",
"timestamp": "",
"source": "github",
"line_count": 1141,
"max_line_length": 146,
"avg_line_length": 44.61963190184049,
"alnum_prop": 0.6250319184459154,
"repo_name": "wagtail/django-modelcluster",
"id": "7abd3ad91f3bc714c5a33fda3a9211f7878efe87",
"size": "50911",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/tests/test_cluster.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "266990"
}
],
"symlink_target": ""
} |
"""Locators for all elements."""
# pylint: disable=too-few-public-methods
# pylint: disable=too-many-lines
from selenium.webdriver.common.by import By
from lib.constants import objects, url
class Common(object):
"""Common locators."""
# modal
MODAL_GENEATE = ".modal-selector"
MODAL_CREATE = ".modal-wide"
MODAL_CONFIRM = ".modal.hide"
# dropdown
DROPDOWN_MENU = ".dropdown-menu"
# tree
TREE_LIST = ".tree-action-list"
TREE_HEADER = ".tree-header"
# base
BUTTON = "BUTTON_"
BUTTON_CREATE_NEW = "BUTTON_CREATE_NEW_"
COUNT = "COUNT_"
SPINNY = "SPINNY_"
ACCORDION_MEMBERS = "ACCORDION_MEMBERS_"
TOGGLE = "TOGGLE_"
class Login(object):
"""Locators for the Login page."""
BUTTON_LOGIN = (By.CSS_SELECTOR, "a.btn.btn-large.btn-info")
class PageHeader(object):
"""Locators for the Dashboard header."""
TOGGLE_LHN = (By.CSS_SELECTOR, ".lhn-trigger")
BUTTON_DASHBOARD = (By.CSS_SELECTOR, '.header-content .to-my-work['
'href="/dashboard"]')
BUTTON_SEARCH = (By.CSS_SELECTOR, '.header-content ['
'data-toggle="unified-search"]')
BUTTON_MY_TASKS = (By.CSS_SELECTOR, '.header-content ['
'href="/dashboard#task_widget"]')
BUTTON_ALL_OBJECTS = (By.CSS_SELECTOR, '.header-content ['
'href^="/objectBrowser"]')
TOGGLE_USER_DROPDOWN = (
By.CSS_SELECTOR, '.header-content .dropdown-toggle')
BUTTON_HELP = (By.CSS_SELECTOR, '.header-content [id="#page-help"]')
GENERIC_SUCCESS_ALERT = (By.CSS_SELECTOR, ".alert-success")
# dropdown toggle
USER_MENU = ".menu " + Common.DROPDOWN_MENU
BUTTON_ADMIN_DASHBOARD = (
By.CSS_SELECTOR,
Common.DROPDOWN_MENU + ' [href="/admin#people_list_widget"]')
BUTTON_MY_WORK = (By.CSS_SELECTOR,
Common.DROPDOWN_MENU + ' [href="/dashboard"]')
BUTTON_DATA_IMPORT = (By.CSS_SELECTOR,
Common.DROPDOWN_MENU + ' [href="/import"]')
BUTTON_DATA_EXPORT = (By.CSS_SELECTOR,
Common.DROPDOWN_MENU + ' [href="/export"]')
BUTTON_LOGOUT = (By.CSS_SELECTOR, Common.DROPDOWN_MENU + ' [href="/logout"]')
NOTIFICATIONS = (By.CSS_SELECTOR, USER_MENU + ' .notify-wrap')
CHECKBOX_DAILY_DIGEST = (By.CSS_SELECTOR, USER_MENU + ' input')
CHECKBOX_DISABLED = (By.CSS_SELECTOR, USER_MENU + ' input.disabled')
class Dashboard(object):
"""Locators for the Dashboard page."""
# get started
GET_LIST = ".get-started__list"
BUTTON_START_NEW_PROGRAM = (By.CSS_SELECTOR,
GET_LIST + ' [data-object-singular="Program"]')
BUTTON_START_NEW_AUDIT = (By.CSS_SELECTOR,
GET_LIST + ' [data-object-singular="Audit"]')
BUTTON_START_NEW_WORKFLOW = (
By.CSS_SELECTOR, GET_LIST + ' [data-object-singular="Workflow"]')
BUTTON_CREATE_NEW_OBJECT = (By.CSS_SELECTOR, GET_LIST + ' [href="#"]')
BUTTON_ALL_OBJECTS = (By.CSS_SELECTOR,
GET_LIST + ' [href^="/objectBrowser"]')
class LhnMenu(object):
"""Locators for the menu in header"""
class _Locator(object):
"""Locators for Lhn Menu"""
@staticmethod
def get_accordion_button(label):
return (By.CSS_SELECTOR, '[data-model-name="{}"]>a'.format(label))
@staticmethod
def get_create_new_button(label):
return (
By.CSS_SELECTOR,
'[data-model-name="{}"] [data-test-id="button_lhn_create_new_program'
'_522c563f"]'.format(label))
@staticmethod
def get_accordion_count(label):
return (By.CSS_SELECTOR, '[data-model-name="{}"] .item-count'.format(
label))
@staticmethod
def get_accordion_members(object_name):
return (
By.CSS_SELECTOR,
'[data-model-name="{}"]>.content>.sub-level>li'.format(object_name))
@staticmethod
def get_spinny(object_name):
return (By.CSS_SELECTOR, '[data-model-name="{}"] .spinny'.format(
object_name))
class __metaclass__(type):
def __init__(cls, *args):
for object_singular, object_plural in zip(objects.ALL_SINGULAR,
objects.ALL_PLURAL):
capitalized_name = object_singular.title()
# handle underscore in object names
if "_" in capitalized_name:
capitalized_name = capitalized_name.title().replace("_", "")
# set lhn items
setattr(cls, Common.TOGGLE + object_plural,
cls._Locator.get_accordion_button(capitalized_name))
setattr(cls, Common.BUTTON_CREATE_NEW + object_plural,
cls._Locator.get_create_new_button(capitalized_name))
setattr(cls, Common.COUNT + object_plural,
cls._Locator.get_accordion_count(capitalized_name))
setattr(cls, Common.SPINNY + object_plural,
cls._Locator.get_spinny(capitalized_name))
setattr(cls, Common.ACCORDION_MEMBERS + object_plural,
cls._Locator.get_accordion_members(capitalized_name))
LHN_MENU = (By.ID, "lhn")
MODAL = (By.CSS_SELECTOR, '[id="ajax-lhn_modal-javascript:--"]')
EXTENDED_INFO = (By.CSS_SELECTOR, '.extended-info.in .info .fa')
FILTER = (By.CSS_SELECTOR, '.lhs-search')
FILTER_TEXT_BOX = (By.CSS_SELECTOR, '.lhs-search>.widgetsearch')
FILTER_SUBMIT_BUTTON = (
By.CSS_SELECTOR, '.lhs-search>.widgetsearch-submit')
FILTER_CLEAR_BUTTON = (
By.CSS_SELECTOR, '.lhs-search [data-title="Clear filters"]')
LHS_ITEM = (By.CSS_SELECTOR, '[test-data-id="lhs-item_3ad27b8b"]')
ALL_OBJECTS = (By.CSS_SELECTOR, '[data-test-id="all_objects_e0345ec4"]')
MY_OBJECTS = (By.CSS_SELECTOR, '[data-test-id="my_objects_6fa95ae1"]')
PIN = (By.CSS_SELECTOR, '.lhn-pin')
# lhn items
DIRECTIVES = (By.CSS_SELECTOR, '[data-test-id="directives_66116337"]')
TOGGLE_CONTROLS_OR_OBJECTIVES = (
By.CSS_SELECTOR, '[data-test-id="controls/objectives_66116337"]')
TOGGLE_PEOPLE_OR_GROUPS = (
By.CSS_SELECTOR, '[data-test-id="people/groups_66116337"]')
TOGGLE_ASSETS_OR_BUSINESS = (
By.CSS_SELECTOR, '[data-test-id="assets/business_66116337"]')
TOGGLE_RISK_OR_THREATS = (
By.CSS_SELECTOR, '[data-test-id="risk/threats_66116337"]')
# workflows labels
BUTTON_WORKFLOWS_ACTIVE = (
By.CSS_SELECTOR, '[data-for="Workflow"]>[data-value="Active"]')
BUTTON_WORKFLOWS_DRAFT = (
By.CSS_SELECTOR, '[data-for="Workflow"]>[data-value="Draft"]')
BUTTON_WORKFLOWS_INACTIVE = (
By.CSS_SELECTOR, '[data-for="Workflow"]>[data-value="Inactive"]')
class ExtendedInfo(object):
"""Locators for the extended info tooltip in LHN after hovering over a
member object"""
BUTTON_MAP_TO = (
By.CSS_SELECTOR, '[data-test-id="extended_info_button_map"]')
ALREADY_MAPPED = (
By.CSS_SELECTOR, '[data-test-id="extended_info_object_already_mapped"]')
TITLE = (By.CSS_SELECTOR, '#extended-info .main-title')
class BaseModalCreateNew(object):
"""Locators shared with create new object modals."""
MODAL = Common.MODAL_CREATE
# labels
MODAL_TITLE = (By.CSS_SELECTOR, "{} .ui-draggable-handle>h2".format(MODAL))
TITLE = (By.CSS_SELECTOR,
"{} .modal-body form>div:nth-child(2) .span6>label".format(MODAL))
# user input elements
UI_TITLE = (By.CSS_SELECTOR,
"{} .modal-body form>div:nth-child(2) .span6>input".
format(MODAL))
class BaseModalGenerateNew(object):
"""Locators shared with generate new object modals."""
MODAL = Common.MODAL_GENEATE
# labels
MODAL_TITLE = (By.CSS_SELECTOR, "{} .modal-header>h2".format(MODAL))
class TreeView(object):
"""Locators for tree-view components."""
# common
ITEMS = "{} li.tree-item .item-main"
HEADER = "{} " + Common.TREE_HEADER
ITEM_LOADING = (By.CSS_SELECTOR, " .tree-item-placeholder")
ITEM_EXPAND_BUTTON = " .openclose"
SPINNER = (By.CSS_SELECTOR, " .tree-spinner")
BUTTON_SHOW_FIELDS = "{} " + Common.TREE_HEADER + " .fa-bars"
# tree view tool bar of widgets
BUTTON_3BBS = "{} " + Common.TREE_LIST + " .btn-draft"
BUTTON_CREATE = "{} " + Common.TREE_LIST + " .create-button"
BUTTON_GENERATE = (
"{} " + Common.TREE_LIST + " .tree-action-list-items .fa-magic")
class ModalSetVisibleFields(object):
"""Locators for a generic edit object modal."""
OPEN_MENU = ".open .dropdown-menu-form"
MODAL = "{} " + OPEN_MENU
# labels
MODAL_TITLE = MODAL + " h5"
FIELDS_TITLES = "{} " + Common.TREE_HEADER + " .checkbox-inline"
# user input elements
FIELDS_CHECKBOXES = "{} " + Common.TREE_HEADER + " .attr-checkbox"
BUTTON_SET_FIELDS = "{} " + Common.TREE_HEADER + " .set-tree-attrs"
class ModalCreateNewObject(BaseModalCreateNew):
"""Locators for a generic new object modal."""
UI_TITLE = (
By.CSS_SELECTOR,
'{} [placeholder="Enter Title"]'.format(BaseModalCreateNew.MODAL))
UI_CODE = (
By.CSS_SELECTOR,
'{} [name="slug"]'.format(BaseModalCreateNew.MODAL))
BUTTON_SAVE_AND_CLOSE = (
By.CSS_SELECTOR, '{} [data-toggle="modal-submit"]'.format(
BaseModalCreateNew.MODAL))
BUTTON_SAVE_AND_ADD_ANOTHER = (
By.CSS_SELECTOR, '{} [data-toggle="modal-submit-addmore"]'.format(
BaseModalCreateNew.MODAL))
class ModalCreateNewProgram(BaseModalCreateNew):
"""Locators for the program modal visible when creating a new modal from
LHN"""
UI_DESCRIPTION = (By.CSS_SELECTOR,
'[data-test-id="new_program_field_description_1fb8bc06"]'
'>iframe.wysihtml5-sandbox')
UI_NOTES = (By.CSS_SELECTOR,
'[data-test-id="new_program_field_notes_75b8bc05"]'
'>iframe.wysihtml5-sandbox')
UI_CODE = (By.CSS_SELECTOR,
'[data-test-id="new_program_field_code_334276e2"]')
UI_STATE = (By.CSS_SELECTOR,
'[data-test-id="new_program_dropdown_state_036a1fa6"]')
BUTTON_HIDE_OPTIONAL_FIELDS = (By.ID, "formHide")
BUTTON_SHOW_ALL_OPTIONAL_FIELDS = (By.ID, "formHide")
UI_PRIMARY_CONTACT = (By.CSS_SELECTOR, '[data-test-id='
'"new_program_field_primary_contact_'
'86160053"]')
DROPDOWN_CONTACT = (By.CSS_SELECTOR, '.ui-menu-item')
UI_SECONDARY_CONTACT = (By.CSS_SELECTOR, '[data-test-id='
'"new_program_field_secondary_'
'contact_'
'86160053"]')
UI_PROGRAM_URL = (By.CSS_SELECTOR, '[data-test-id='
'"new_program_field_program_url_'
'86160053"]')
UI_REFERENCE_URL = (By.CSS_SELECTOR, '[data-test-id='
'"new_program_field_reference_url_'
'86160053"]')
UI_EFFECTIVE_DATE = (
By.CSS_SELECTOR,
'[test-id="new_program_field_effective_date_f2783a28"] '
'[data-id="effective_date_hidden"] '
'.datepicker__input')
EFFECTIVE_DATE_DATEPICKER = (
By.CSS_SELECTOR,
'[test-id="new_program_field_effective_date_f2783a28"] '
'[data-id="effective_date_hidden"] '
'[data-handler="selectDay"]')
UI_STOP_DATE = (
By.CSS_SELECTOR,
'[test-id="new_program_field_effective_date_f2783a28"] '
'[data-id="stop_date_hidden"] '
'.datepicker__input')
STOP_DATE_DATEPICKER = (
By.CSS_SELECTOR,
'[test-id="new_program_field_effective_date_f2783a28"] '
'[data-id="stop_date_hidden"] '
'[data-handler="selectDay"]')
TITLE = (By.CSS_SELECTOR, '[data-test-id="label_title_2c925d94"]')
DESCRIPTION = (By.CSS_SELECTOR,
'[data-test-id="label_description_2c925d94"]')
PRIVACY = (By.CSS_SELECTOR, '[data-test-id="label_privacy_2c925d94"]')
PROGRAM_URL = (By.CSS_SELECTOR,
'[data-test-id="label_program_url_2c925d94"]')
class ModalCreateNewOrgGroup(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewRisk(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
UI_DESCRIPTION = (
By.CSS_SELECTOR, '.modal-body form>div:nth-child(3) iframe')
class ModalCreateNewDataAsset(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewProcess(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewProject(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewSystem(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewProduct(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class ModalCreateNewControl(BaseModalCreateNew):
"""Locators for the control modal visible when creating a new modal from
LHN"""
class _Locator(object):
"""Locators for the control modal visible when creating a modal from LHN"""
@staticmethod
def get_assessor_row(first_id, second_id):
return (
By.CSS_SELECTOR,
'.modal-body div>form>div>div:nth-child({})>div:nth-child({}) '
'label'.format(first_id, second_id))
@staticmethod
def get_dropdown_item(first_id, second_id):
return (
By.CSS_SELECTOR,
'.modal-body div>form>div>div:nth-child({})>div:nth-child({}) '
'select'.format(first_id, second_id))
DESCRIPTION = (
By.CSS_SELECTOR,
'[data-test-id="control_description-label_9cc51ca3"]')
UI_DESCRIPTION = (
By.CSS_SELECTOR,
'[data-test-id="control_description-text_9cc51ca3"] iframe')
TEST_PLAN = (
By.CSS_SELECTOR,
'[data-test-id="control_test_plan_d8b5a2f4"] label')
UI_TEST_PLAN = (
By.CSS_SELECTOR,
'[data-test-id="control_test_plan_d8b5a2f4"] iframe')
NOTES = (By.CSS_SELECTOR, '[data-id="note_hidden"] label')
CODE = (
By.CSS_SELECTOR,
'[data-test-id="control_code_f8abbcc9"] label')
UI_CODE = (
By.CSS_SELECTOR,
'[data-test-id="control_code_f8abbcc9"] input')
KIND_OR_NATURE = (
By.CSS_SELECTOR,
'[data-test-id="control_kind_nature_dadc232f"] label')
DROPDOWN_KIND_OR_NATURE = (
By.CSS_SELECTOR,
'[data-test-id="control_kind_nature_dadc232f"] select')
FRAUD_RELATED = (
By.CSS_SELECTOR,
'[data-test-id="control_fraud_9cc51ca3"] label')
DROPDOWN_FRAUD_RELATED = (
By.CSS_SELECTOR,
'[data-test-id="control_fraud_9cc51ca3"] select')
FREQUENCY = (
By.CSS_SELECTOR,
'[data-test-id="control_frequency_fb20318a"] label')
DROPDOWN_FREQUENCY = (
By.CSS_SELECTOR,
'[data-test-id="control_frequency_fb20318a"] select')
ASSERTIONS = (
By.CSS_SELECTOR,
'[data-test-id="control_assertions_5d8b7f7a"] label')
SELECTABLE_ASSERTIONS = (
By.CSS_SELECTOR,
'[data-test-id="control_assertions_5d8b7f7a"] select')
PRINCIPAL_ASSESSOR = (
By.CSS_SELECTOR,
'[data-test-id="control_primary_assessor_f7379330"] label')
SECONDARY_ASSESSOR = (
By.CSS_SELECTOR,
'[data-test-id="control_secondary_assessor_b9439af6"] label')
OWNER = (
By.CSS_SELECTOR,
'[data-test-id="control_owner_587d12d6"] label')
BUTTON_ADD_OWNER = (By.CSS_SELECTOR, 'isolate-form .btn')
PRIMARY_CONTACT = (
By.CSS_SELECTOR,
'[data-test-id="control_contacts_8bd3d8c7"] '
'.row-fluid:nth-child(1) label')
UI_PRIMARY_CONTACT = (
By.CSS_SELECTOR,
'[data-test-id="new_program_field_primary_contact_86160053"]')
SECONDARY_CONTACT = (
By.CSS_SELECTOR,
'[data-test-id="control_contacts_8bd3d8c7"] '
'.row-fluid:nth-child(2) label')
UI_SECONDARY_CONTACT = (
By.CSS_SELECTOR,
'[data-test-id="new_program_field_secondary_contact_86160053"]')
CONTROL_URL = (
By.CSS_SELECTOR,
'[data-test-id="control_control_url-label_c4038873"]')
UI_CONTROL_URL = (
By.CSS_SELECTOR,
'[data-test-id="control_control_url-input_c4038873"]')
REFERENCE_URL = (
By.CSS_SELECTOR,
'[data-test-id="control_reference_url-label_8931063d"]')
UI_REFERENCE_URL = (
By.CSS_SELECTOR,
'[data-test-id="control_reference_url-input_8931063d"]')
SIGNIFICANCE = (
By.CSS_SELECTOR,
'[data-test-id="control_significance_18f15545"] label')
DROPDOWN_SIGNIFICANCE = (
By.CSS_SELECTOR,
'[data-test-id="control_significance_18f15545"] select')
TYPE_OR_MEANS = (
By.CSS_SELECTOR,
'[data-test-id="control_type_means_2ffa1b64"] label')
DROPDOWN_TYPE_OR_MEANS = (
By.CSS_SELECTOR,
'[data-test-id="control_type_means_2ffa1b64"] select')
CATEGORIES = (
By.CSS_SELECTOR,
'[data-test-id="control_categories_1eb33246"] label')
SELECTABLE_CATEGORIES = (
By.CSS_SELECTOR,
'[data-test-id="control_categories_1eb33246"] select')
STATE = (
By.CSS_SELECTOR,
'[data-test-id="control_state_5d184456"] label')
DROPDOWN_STATE = (
By.CSS_SELECTOR,
'[data-test-id="control_state_5d184456"] select')
UI_NOTES = (By.CSS_SELECTOR, '[data-id="note_hidden"] iframe')
EFFECTIVE_DATE = (
By.CSS_SELECTOR,
'[test-id="control_effective_dates_0376cf90"] '
'[data-id="effective_date_hidden"] '
'.datepicker__input')
DATEPICKER_EFFECTIVE_DATE = (
By.CSS_SELECTOR,
'[test-id="control_effective_dates_0376cf90"] '
'[data-id="effective_date_hidden"] '
'[data-handler="selectDay"]')
STOP_DATE = (
By.CSS_SELECTOR,
'[test-id="control_effective_dates_0376cf90"] '
'[data-id="stop_date_hidden"] '
'.datepicker__input')
DATEPICKER_STOP_DATE = (
By.CSS_SELECTOR,
'[test-id="control_effective_dates_0376cf90"] '
'[data-id="stop_date_hidden"] '
'[data-handler="selectDay"]')
# buttons
BUTTON_HIDE_ALL_OPTIONAL_FIELDS = (By.CSS_SELECTOR, '#formHide')
class ModalCreateNewIssue(BaseModalCreateNew):
"""Locators for the issue modal visible when creating a new modal from
LHN"""
class ModalCreateNewAsmt(BaseModalCreateNew):
"""Locators for a assessments creation modal."""
class ModalCreateNewAsmtTmpl(BaseModalCreateNew):
"""Locators for a assessment templates creation modal."""
class ModalGenerateNewObject(BaseModalGenerateNew):
"""Locators for a generate new object modal."""
BUTTON_GENERATE = (By.CSS_SELECTOR,
"{} .btn-map".format(BaseModalGenerateNew.MODAL))
class ModalGenerateNewAsmt(ModalGenerateNewObject):
"""Locators for a assessments generation modal."""
MODAL = ModalGenerateNewObject.MODAL
FOUND_OBJECTS = " .snapshot-list .flex-box"
SELECT_ASMT_TMPL_DROPDOWN = (
By.CSS_SELECTOR,
MODAL + ' dropdown[name="assessmentTemplate"] .input-block-level')
SELECT_ASMT_TMPL_OPTIONS = (
By.CSS_SELECTOR,
MODAL + ' dropdown[name="assessmentTemplate"] '
'.input-block-level option')
BUTTON_SEARCH = (By.CSS_SELECTOR, MODAL + " .btn-info")
FOUND_OBJECTS_TITLES = (By.CSS_SELECTOR,
MODAL + FOUND_OBJECTS + " .title-attr")
FOUND_OBJECTS_CHECKBOXES = (By.CSS_SELECTOR,
MODAL + FOUND_OBJECTS + ' [type="checkbox"]')
class ModalEditObject(BaseModalCreateNew):
"""Locators for a generic edit object modal"""
BUTTON_DELETE = (
By.CSS_SELECTOR, '.deny-buttons [data-toggle="modal-ajax-deleteform"]')
class ModalCustomAttribute(object):
"""Locators for a generic custom attributes modal in admin dashboard"""
MODAL_TITLE = (By.CSS_SELECTOR, '.modal-header h2')
ATTRIBUTE_TITLE = (By.CSS_SELECTOR, '.modal-body div:nth-child(1)>label')
INLINE_HELP = (By.CSS_SELECTOR, '.modal-body div:nth-child(2)>label')
ATTRIBUTE_TYPE = (By.CSS_SELECTOR, '.modal-header h2')
PLACEHOLDER = (By.CSS_SELECTOR, '.modal-header h2')
MANDATORY = (By.CSS_SELECTOR, '.modal-header h2')
UI_ATTRIBUTE_TITLE = (By.CSS_SELECTOR, '.modal-body [name="title"]')
UI_INLINE_HELP = (By.CSS_SELECTOR, '.modal-body [name="helptext"]')
UI_PLACEHOLDER = (By.CSS_SELECTOR, '.modal-body [name="placeholder"]')
UI_POSSIBLE_VALUES = (By.CSS_SELECTOR, '.modal-body '
'[name="multi_choice_options"]')
CHECKBOX_MANDATORY = (By.CSS_SELECTOR, '.modal-body [type="checkbox"]')
BUTTON_ADD_ANOTHER = (
By.CSS_SELECTOR,
'.confirm-buttons [data-toggle="modal-submit-addmore"]')
BUTTON_SAVE_AND_CLOSE = (
By.CSS_SELECTOR,
'.modal-footer .confirm-buttons [data-toggle="modal-submit"]')
ATTRIBUTE_TYPE_SELECTOR = (By.CSS_SELECTOR, "dropdown select")
ATTRIBUTE_TYPE_OPTIONS = (By.CSS_SELECTOR, "dropdown select option")
class WidgetBar(object):
"""Locators for the bar containing the widgets/tabs"""
class _Locator(object):
"""Locators for the menu in header."""
@staticmethod
def get_widget(object_name):
return (
By.CSS_SELECTOR,
'.object-nav [href$="#{}_widget"]'.format(object_name)
)
class __metaclass__(type):
def __init__(cls, *args):
for object_singular, object_plural in zip(objects.ALL_SINGULAR,
objects.ALL_PLURAL):
name = object_singular.lower()
setattr(cls, object_plural, cls._Locator.get_widget(name))
BUTTON_ADD = (By.CSS_SELECTOR,
'[data-test-id="button_widget_add_2c925d94"]')
TAB_WIDGET = (By.CSS_SELECTOR, ".object-nav .active")
ADMIN_PEOPLE = _Locator.get_widget("people_list")
ADMIN_ROLES = _Locator.get_widget("roles_list")
ADMIN_EVENTS = _Locator.get_widget("events_list")
ADMIN_CUSTOM_ATTRIBUTE = _Locator.get_widget("custom_attribute")
INFO = _Locator.get_widget("info")
CUSTOM_ATTRIBUTES = _Locator.get_widget("custom_attribute")
EVENTS = _Locator.get_widget("events_list")
ROLES = _Locator.get_widget("roles_list")
RISK_ASSESSMENTS = _Locator.get_widget("risk_assessment")
TASKS = _Locator.get_widget("task")
class WidgetBarButtonAddDropdown(object):
"""Locators for the button/dropdown "add widget" in widget bar"""
class _Locator(object):
"""Toggle locators for the widget custom attributes in admin dashboard."""
@staticmethod
def get_dropdown_item(object_name):
return (
By.CSS_SELECTOR,
'[data-test-id="button_widget_add_2c925d94"] '
'.object-nav [href$="#{}_widget"]'.format(object_name)
)
class __metaclass__(type):
def __init__(cls, *args):
for object_ in objects.ALL_PLURAL:
name = object_.lower()
setattr(cls, object_, cls._Locator.get_dropdown_item(name))
THREAD_ACTORS = _Locator.get_dropdown_item("threat_actor")
WORKFLOW_TASKS = _Locator.get_dropdown_item("workflow_task")
class ObjectWidget(object):
"""Locators for a generic widget."""
_HEADER = '.header [class^="span"]'
HEADER_TITLE = (By.CSS_SELECTOR, _HEADER + ' [data-field="title"]')
HEADER_OWNER = (By.CSS_SELECTOR,
_HEADER + ' [data-field="contact.name|email"]')
HEADER_STATE = (By.CSS_SELECTOR, _HEADER + ' [data-field="status"]')
HEADER_LAST_ASSESSMENT_DATE = (
By.CSS_SELECTOR, _HEADER + ' [data-field="last_assessment_date"]')
MEMBERS_TITLE_LIST = (
By.CSS_SELECTOR, '.object-area .tree-structure .select '
'[class^="span"]:nth-child(1) .title')
INFO_PANE = (By.CSS_SELECTOR, '.sticky-info-panel')
LOADING = (By.CSS_SELECTOR, '.new-tree_loading')
class ModalDeleteObject(object):
"""Locators for a generic delete object modal."""
MODAL_TITLE = (By.CSS_SELECTOR, '.modal-header>h2')
CONFIRMATION_TEXT = (By.CSS_SELECTOR, '.modal-body>div>p')
OBJECT_TITLE = (By.CSS_SELECTOR, '.modal-body>div>p>span')
BUTTON_DELETE = (
By.CSS_SELECTOR, '.modal-footer .confirm-buttons>[data-toggle="delete"]')
class ModalCompareUpdateObject(object):
"""Locators for a generic compare and update object modal."""
MODAL = Common.MODAL_CONFIRM
# labels
MODAL_TITLE = (By.CSS_SELECTOR, "{} .modal-header".format(MODAL))
# user input elements
BUTTON_UPDATE = (By.CSS_SELECTOR, "{} .btn-success".format(MODAL))
class BaseInfoWidget(object):
"""Locators that are common to all info widgets"""
BUTTON_SETTINGS = (By.CSS_SELECTOR, '.info-pane-utility .dropdown-toggle')
TITLE = (By.CSS_SELECTOR, '[data-test-id="title_0ad9fbaf"] h6')
TITLE_ENTERED = (By.CSS_SELECTOR, '[data-test-id="title_0ad9fbaf"] h3')
LINK_GET_LAST_VER = (By.CSS_SELECTOR, '.snapshot [can-click="compareIt"]')
class WidgetInfoProgram(BaseInfoWidget):
"""Locators for the info program widget"""
PERMALINK_ALERT = (By.CSS_SELECTOR, '.content>.flash>.alert-success')
ALERT_LINK_COPIED = (By.CSS_SELECTOR, '.alert.alert-success')
MODAL_DELETE = (By.ID, '[id="ajax-lhn_modal-javascript:--"]')
MODAL_DELETE_CLOSE = (By.CSS_SELECTOR, '.lhn_modal .grcicon-x-grey')
OBJECT_REVIEW = (By.CSS_SELECTOR,
'[data-test-id="title_review_0ad9fbaf"] h6')
SUBMIT_FOR_REVIEW = (By.CSS_SELECTOR,
'[data-test-id="title_review_0ad9fbaf"] '
'[href="javascript://"]')
DESCRIPTION = (By.CSS_SELECTOR,
'[data-test-id="title_description_7a906d2e"] h6')
DESCRIPTION_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_description_'
'content_7a906d2e"]')
NOTES = (By.CSS_SELECTOR, '[data-test-id="title_notes_ef5bc3a71e88"] '
'h6')
NOTES_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_notes_ef5bc3a71e88"]>div')
MANAGER = (By.CSS_SELECTOR, '[data-test-id="title_manager_7a906d2e"] '
'h6')
MANAGER_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_manager_7a906d2e"] '
'[data-test-id="text_manager_7a906d2e"]')
PROGRAM_URL = (By.CSS_SELECTOR,
'[data-test-id="title_program_url_aa7d1a65"] h6')
PROGRAM_URL_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="text_program_url_aa7d1a65"]')
REFERENCE_URL = (By.CSS_SELECTOR,
'[data-test-id="title_reference_url_aa7d1a65"]')
REFERENCE_URL_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="text_reference_url_aa7d1a65"]')
TOGGLE_SHOW_ADVANCED = (By.CSS_SELECTOR,
'[data-test-id="button_advanced_cf47bc01"]')
TOGGLE_SHOW_ADVANCED_ACTIVATED = (
By.CSS_SELECTOR, '[data-test-id="button_advanced_cf47bc01"].active')
CODE = (By.CSS_SELECTOR, '[data-test-id="title_code_cf47bc01"] h6')
CODE_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_code_cf47bc01"] p')
EFFECTIVE_DATE = (By.CSS_SELECTOR,
'[data-test-id="title_effective_date_cf47bc01"] h6')
EFFECTIVE_DATE_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_effective_date_'
'cf47bc01"] p')
STOP_DATE = (By.CSS_SELECTOR,
'[data-test-id="title_stop_date_cf47bc01"] h6')
STOP_DATE_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_stop_date_cf47bc01"] p')
STATE = (By.CSS_SELECTOR,
'[dadata-test-id="new_program_button_save_and_new_86160053"'
' ta-test-id="title_state_0ad9fbaf"] h6')
STATE_ENTERED = (By.CSS_SELECTOR,
'[data-test-id="title_state_value_0ad9fbaf"]')
PRIMARY_CONTACT = (By.CSS_SELECTOR, '[data-test-id="title_primary_'
'contact_696de7244b84"] h6')
PRIMARY_CONTACT_ENTERED = (
By.CSS_SELECTOR, '[data-test-id="text_primary_contact_'
'696de7244b84"] [data-test-id="text_'
'manager_7a906d2e"]')
SECONDARY_CONTACT = (
By.CSS_SELECTOR, '[data-test-id="title_contacts_696de7244b84"] '
'h6:nth-child(2)')
SECONDARY_CONTACT_ENTERED = (
By.CSS_SELECTOR, '[data-test-id="text_secondary_contact_'
'696de7244b84"] [data-test-id="text_manager_'
'7a906d2e"]')
PRIVATE_PROGRAM = (By.CSS_SELECTOR,
'[data-test-id="title_private_ec758af9"] h6')
ICON_LOCK = (By.CSS_SELECTOR, '[data-test-id="icon_private_ec758af9"]')
class WidgetInfoRisk(BaseInfoWidget):
"""Locators for the risk info widget"""
class WidgetInfoOrgGroup(BaseInfoWidget):
"""Locators for the org group info widget"""
class WidgetInfoIssue(BaseInfoWidget):
"""Locators for the org group info widget"""
class WidgetInfoRegulations(BaseInfoWidget):
"""Locators for the regulation info widget"""
class WidgetInfoWorkflow(BaseInfoWidget):
"""Locators for the workflow info widget"""
class WidgetInfoAudit(BaseInfoWidget):
"""Locators for the audit info widget"""
class WidgetInfoAssessment(BaseInfoWidget):
"""Locators for the assessment info widget"""
class WidgetInfoAssessmentTemplate(BaseInfoWidget):
"""Locators for the assessment template info widget."""
class WidgetInfoPolicy(BaseInfoWidget):
"""Locators for the regulation info widget"""
class WidgetInfoStandard(BaseInfoWidget):
"""Locators for the standard info widget"""
class WidgetInfoContract(BaseInfoWidget):
"""Locators for the contract info widget"""
class WidgetInfoClause(BaseInfoWidget):
"""Locators for the clause info widget"""
class WidgetInfoSection(BaseInfoWidget):
"""Locators for the section info widget"""
class WidgetInfoControl(BaseInfoWidget):
"""Locators for the control info widget"""
class WidgetInfoObjective(BaseInfoWidget):
"""Locators for the objective info widget"""
class WidgetInfoPeople(BaseInfoWidget):
"""Locators for the people info widget"""
class WidgetInfoVendor(BaseInfoWidget):
"""Locators for the vendor info widget"""
class WidgetInfoAccessGroup(BaseInfoWidget):
"""Locators for the access group info widget"""
class WidgetInfoSystem(BaseInfoWidget):
"""Locators for the system info widget"""
class WidgetInfoProcess(BaseInfoWidget):
"""Locators for the process info widget"""
class WidgetInfoProduct(BaseInfoWidget):
"""Locators for the product info widget"""
class WidgetInfoFacility(BaseInfoWidget):
"""Locators for the facility info widget"""
class WidgetInfoProject(BaseInfoWidget):
"""Locators for the project info widget"""
class WidgetInfoMarket(BaseInfoWidget):
"""Locators for the market info widget"""
class WidgetInfoDataAsset(BaseInfoWidget):
"""Locators for the data asset info widget"""
class WidgetInfoThreat(BaseInfoWidget):
"""Locators for the data asset info widget"""
class WidgetAdminRoles(object):
"""Locators for the roles widget on the admin dashboard."""
widget_name = url.Widget.ROLES
class WidgetAdminEvents(object):
"""Locators for event widget at admin dashboard"""
_BASE_CSS_SELECTOR = 'section#events_list_widget:not([class~="hidden"])'
_TREE_ITEMS_SELECTOR = ".tree-item[data-model]"
TREE_VIEW_ITEMS = (By.CSS_SELECTOR,
"{0} {1}".
format(_BASE_CSS_SELECTOR, _TREE_ITEMS_SELECTOR))
TREE_VIEW_HEADER = (By.CSS_SELECTOR,
"{} header".format(_BASE_CSS_SELECTOR))
FIRST_TREE_VIEW_ITEM = (By.CSS_SELECTOR,
"{0} {1}:first-child".
format(_BASE_CSS_SELECTOR, _TREE_ITEMS_SELECTOR))
class WidgetInfoSettingsButton(object):
"""Locators for the control info widget"""
TITLE_ENTERED = (By.CSS_SELECTOR, '[data-test-id="title_0ad9fbaf"]>h3')
DROPDOWN_SETTINGS_EDIT = (
By.CSS_SELECTOR,
'[data-test-id="dropdown_settings_edit_f4b27aec"]')
DROPDOWN_SETTINGS_PERMALINK = (
By.CSS_SELECTOR,
'[data-test-id="dropdown_settings_get_permalink_75e3bf91"]')
DROPDOWN_SETTINGS_DELETE = (
By.CSS_SELECTOR, '[data-test-id="dropdown_settings_delete_6a62eaaf"]')
class BaseWidgetGeneric(object):
"""Locators shared amongst non info&admin widgets"""
_object_name = None
class __metaclass__(type):
"""For sharing parametrized class attributes we simply define how a
class should look like. Note that the same functionality can be
implemented using properties though with more code."""
def __init__(cls, *args):
_FILTER = "#{}_widget .sticky-filter"
_FILTER_BUTTON = _FILTER + " .tree-filter__button"
_FILTER_DROPDOWN = _FILTER + " .multiselect-dropdown"
_FILTER_DROPDOWN_ELEMENTS = \
_FILTER_DROPDOWN + " .multiselect-dropdown__element"
cls.TEXTFIELD_TO_FILTER = (
By.CSS_SELECTOR, str(_FILTER + " .tree-filter__expression-holder")
.format(cls._object_name))
cls.BUTTON_FILTER = (
By.CSS_SELECTOR,
str(_FILTER_BUTTON + ' [type="submit"]').format(cls._object_name))
cls.BUTTON_RESET = (
By.CSS_SELECTOR,
str(_FILTER_BUTTON + ' [type="reset"]').format(cls._object_name))
cls.BUTTON_HELP = (
By.CSS_SELECTOR,
str(_FILTER_BUTTON + " #page-help").format(cls._object_name))
cls.DROPDOWN = (
By.CSS_SELECTOR,
str(_FILTER_DROPDOWN + " .multiselect-dropdown__input-container")
.format(cls._object_name))
cls.DROPDOWN_STATES = (
By.CSS_SELECTOR,
str(_FILTER_DROPDOWN_ELEMENTS).format(cls._object_name))
FILTER_PANE_COUNTER = (
By.CSS_SELECTOR,
".tree-pagination__count .tree-view-pagination__count__title")
class WidgetAssessments(BaseWidgetGeneric):
"""Locators for Assessments widget."""
_object_name = objects.get_singular(objects.ASSESSMENTS)
widget_name = url.Widget.ASSESSMENTS
class WidgetControls(BaseWidgetGeneric):
"""Locators for Controls widget."""
_object_name = objects.get_singular(objects.CONTROLS)
widget_name = url.Widget.CONTROLS
class WidgetProducts(BaseWidgetGeneric):
"""Locators for product widget"""
_object_name = "product"
class WidgetProjects(BaseWidgetGeneric):
"""Locators for project widget"""
_object_name = "project"
class WidgetSystems(BaseWidgetGeneric):
"""Locators for system widget"""
_object_name = "system"
class WidgetDataAssets(BaseWidgetGeneric):
"""Locators for system widget"""
_object_name = "data_asset"
class WidgetProcesses(BaseWidgetGeneric):
"""Locators for system widget"""
_object_name = "process"
class WidgetIssues(BaseWidgetGeneric):
"""Locators for system widget"""
_object_name = "issue"
class WidgetAssessmentTemplates(BaseWidgetGeneric):
"""Locators for Assessment Templates widget."""
_object_name = objects.get_singular(objects.ASSESSMENT_TEMPLATES)
widget_name = url.Widget.ASSESSMENT_TEMPLATES
class AdminCustomAttributes(object):
"""Locators for the widget custom attributes in admin dashboard."""
widget_name = url.Widget.CUSTOM_ATTRIBUTES
class _Locator(object):
"""Locators for the widget custom attributes in admin dashboard."""
@staticmethod
def get_toggle(child_id):
return (By.CSS_SELECTOR, '#custom_attribute_widget li:nth-child({}) '
'.openclose'.format(child_id))
@staticmethod
def get_programs_label(child_id):
return (
By.CSS_SELECTOR,
'.tree-structure li:nth-child(5) div thead>tr>th:nth-child({})'
.format(child_id))
class __metaclass__(type):
def __init__(cls, *args):
items = (
objects.WORKFLOWS, objects.RISK_ASSESSMENTS, objects.THREATS,
objects.RISKS, objects.PROGRAMS, objects.AUDITS,
objects.OBJECTIVES, objects.SECTIONS, objects.CONTROLS,
objects.ISSUES, objects.ASSESSMENTS, objects.STANDARDS,
objects.REGULATIONS, objects.POLICIES, objects.CONTRACTS,
objects.CLAUSES, objects.VENDORS, objects.PEOPLE,
objects.ACCESS_GROUPS, objects.ORG_GROUPS, objects.PRODUCTS,
objects.MARKETS, objects.PROCESSES, objects.FACILITIES,
objects.PROJECTS, objects.DATA_ASSETS, objects.SYSTEMS)
for id_, name in enumerate(items, start=1):
setattr(cls,
Common.TOGGLE + name.upper(),
cls._Locator.get_toggle(id_))
FILTER_INPUT_FIELD = (By.CLASS_NAME, 'tree-filter__expression-holder')
FILTER_BUTTON_SUBMIT = (By.CSS_SELECTOR,
'.tree-filter__button>[type="submit"]')
FILTER_BUTTON_RESET = (By.CSS_SELECTOR,
'.tree-filter__button>[type="reset"]')
# programs dropdown
BUTTON_ADD_CUSTOM_PROGRAM_ATTR = (
By.CSS_SELECTOR,
'.tree-item:nth-child(5) [data-test-id="button_add-86eaf948"]')
PROGRAMS_LABEL_ATTRIBUTE_NAME = _Locator.get_programs_label(1)
PROGRAMS_LABEL_ATTRIBUTE_TYPE = _Locator.get_programs_label(2)
PROGRAMS_LABEL_MANDATORY = _Locator.get_programs_label(3)
PROGRAMS_LABEL_EDIT = _Locator.get_programs_label(4)
LISTED_MEMBERS = (
By.CSS_SELECTOR,
'.tree-structure li:nth-child(5) div tbody>tr')
BUTTON_LISTED_MEMBERS_EDIT = (
By.CSS_SELECTOR,
'.tree-structure li:nth-child(5) div tbody>tr>td>ul .fa-pencil-square-o')
CA_ADDED_SUCCESS_ALERT = PageHeader.GENERIC_SUCCESS_ALERT
class CustomAttributesItemContent(AdminCustomAttributes):
"""Locators for the expanded view of custom attribute group
in admin dashboard."""
_base_locator = ".content-open .tier-2-info-content"
_row_locator = "{} .tree-structure .cms_controllers_tree_view_node"\
.format(_base_locator)
TITLES_ROW = (By.CSS_SELECTOR, "{} thead tr".format(_base_locator))
ROW = (By.CSS_SELECTOR, _row_locator)
CELL_IN_ROW = (By.CSS_SELECTOR, "td")
EDIT_BTN = (By.CSS_SELECTOR,
"{} " + Common.TREE_LIST + " a".format(_row_locator))
ADD_BTN = (By.CSS_SELECTOR, "{} .add-item .btn".format(_base_locator))
TREE_SPINNER = (By.CSS_SELECTOR, ".tree-spinner")
| {
"content_hash": "0951f83e9a4aef2d5a4c8f06fffb73ea",
"timestamp": "",
"source": "github",
"line_count": 1049,
"max_line_length": 79,
"avg_line_length": 36.23069590085796,
"alnum_prop": 0.6374519812661159,
"repo_name": "VinnieJohns/ggrc-core",
"id": "42ac413ead13c0023f8cfc3637101401b1e56f38",
"size": "38118",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/selenium/src/lib/constants/locator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "226950"
},
{
"name": "HTML",
"bytes": "1060386"
},
{
"name": "JavaScript",
"bytes": "1927277"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2762348"
},
{
"name": "Shell",
"bytes": "31100"
}
],
"symlink_target": ""
} |
import json
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import Q
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseRedirect
from django.template import loader
from django.urls import reverse
from django.views.decorators.http import require_http_methods
from NearBeach.decorators.check_user_permissions import (
check_user_requirement_item_permissions,
check_user_permissions,
)
from NearBeach.forms import (
AddRequirementLinkForm,
NewRequirementItemForm,
UpdateRequirementItemForm,
)
from NearBeach.views.requirement_views import get_requirement_items
from NearBeach.models import (
requirement_item,
object_assignment,
project,
task,
requirement,
organisation,
list_of_requirement_item_status,
list_of_requirement_item_type,
group,
)
@require_http_methods(["POST"])
@login_required(login_url="login", redirect_field_name="")
@check_user_requirement_item_permissions(min_permission_level=2)
def add_requirement_item_link(request, requirement_item_id, *args, **kwargs):
"""Obtain form data and validate"""
form = AddRequirementLinkForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest(form.errors)
# Get the requirement instnace
requirement_item_instance = requirement_item.objects.get(
requirement_item_id=requirement_item_id
)
# Get the project list from the form
for row in request.POST.getlist("project"):
submit_object_assignment = object_assignment(
requirement_item=requirement_item_instance,
project=project.objects.get(project_id=row),
change_user=request.user,
)
submit_object_assignment.save()
for row in request.POST.getlist("task"):
submit_object_assignment = object_assignment(
requirement_item=requirement_item_instance,
task=task.objects.get(task_id=row),
change_user=request.user,
)
submit_object_assignment.save()
# Now return back a complete list of new links
link_results = get_requirement_item_links(requirement_item_id)
# Send back json data
json_results = json.dumps(list(link_results), cls=DjangoJSONEncoder)
return HttpResponse(json_results, content_type="application/json")
# Internal Code
def get_requirement_item_links(requirement_item_id):
"""Use object_assignment to get the requirments"""
return object_assignment.objects.filter(
Q(is_deleted=False, requirement_item_id=requirement_item_id)
& Q(
Q(opportunity_id__isnull=False)
| Q(quote_id__isnull=False)
| Q(project_id__isnull=False)
| Q(task_id__isnull=False)
)
).values(
"opportunity_id",
"opportunity_id__opportunity_name",
"opportunity_id__opportunity_stage_id__opportunity_stage_description",
"quote_id",
"quote_id__quote_title",
"quote_id__quote_stage_id__quote_stage",
"project_id",
"project_id__project_name",
"project_id__project_status",
"task_id",
"task_id__task_short_description",
"task_id__task_status",
"requirement_item_id",
"requirement_item_id__requirement_item_title",
)
@require_http_methods(["POST"])
@login_required(login_url="login", redirect_field_name="")
@check_user_requirement_item_permissions(min_permission_level=1)
def get_requirement_item_links_list(request, requirement_item_id, *args, **kwargs):
"""
:param request:
:param requirement_item_id:
:return:
"""
# Use object_assignment to get the requirments
link_results = get_requirement_item_links(requirement_item_id)
# Send back json data
json_results = json.dumps(list(link_results), cls=DjangoJSONEncoder)
return HttpResponse(json_results, content_type="application/json")
@login_required(login_url="login", redirect_field_name="")
# @check_user_requirement_item_permissions(min_permission_level=3) # Function won't work without requirmeent_item_id
@check_user_permissions(min_permission_level=3, object_lookup="requirement_id")
def new_requirement_item(request, requirement_id, *args, **kwargs):
"""Check to see if POST"""
if not request.method == "POST":
return HttpResponseBadRequest("Sorry - needs to be in POST")
# Get the data into the form for cleaning
form = NewRequirementItemForm(request.POST)
# Check to make sure there are no errors in the form
if not form.is_valid():
return HttpResponseBadRequest(form.errors)
# Save the data
submit_requirement_item = requirement_item(
requirement=requirement.objects.get(requirement_id=requirement_id),
requirement_item_title=form.cleaned_data["requirement_item_title"],
requirement_item_scope=form.cleaned_data["requirement_item_scope"],
requirement_item_status=form.cleaned_data["requirement_item_status"],
requirement_item_type=form.cleaned_data["requirement_item_type"],
change_user=request.user,
)
submit_requirement_item.save()
# Actuall return all the new requirement_item results to feed upstream
return get_requirement_items(request, requirement_id)
@login_required(login_url="login", redirect_field_name="")
@check_user_requirement_item_permissions(min_permission_level=1)
def requirement_item_information(request, requirement_item_id, *args, **kwargs):
"""
Loads the requirement item information.
:param request:
:param requirement_item_id:
:return:
"""
user_level = kwargs["user_level"]
# Get the requirement information
requirement_item_results = requirement_item.objects.get(
requirement_item_id=requirement_item_id
)
# If the requirement has been closed - send user to the read only section
if requirement_item_results.requirement_item_status.status_is_closed:
return HttpResponseRedirect(
reverse("requirement_readonly", args={requirement_item_id})
)
# Load template
t = loader.get_template(
"NearBeach/requirement_items/requirement_item_information.html"
)
# Get any extra data required
organisation_results = organisation.objects.get(
organisation_id=requirement_item_results.requirement.organisation_id,
)
status_list = list_of_requirement_item_status.objects.filter(
is_deleted=False,
status_is_closed=False,
)
type_list = list_of_requirement_item_type.objects.filter(
is_deleted=False,
)
group_results = group.objects.filter(
is_deleted=False,
)
# context
c = {
"group_results": serializers.serialize("json", group_results),
"nearbeach_title": f"Requirement Item {requirement_item_id}",
"organisation_results": serializers.serialize("json", [organisation_results]),
"requirement_item_id": requirement_item_id,
"requirement_item_results": serializers.serialize(
"json", [requirement_item_results]
),
"status_list": serializers.serialize("json", status_list),
"type_list": serializers.serialize("json", type_list),
"user_level": user_level,
}
return HttpResponse(t.render(c, request))
@require_http_methods(["POST"])
@login_required(login_url="login", redirect_field_name="")
@check_user_requirement_item_permissions(min_permission_level=2)
def requirement_information_save(request, requirement_item_id, *args, **kwargs):
"""
The following will save data
:param request:
:param requirement_id:
:return:
"""
# Get form data
form = UpdateRequirementItemForm(request.POST)
if not form.is_valid():
return HttpResponseBadRequest(form.errors)
# Save the data
requirement_item_submit = requirement_item.objects.get(
requirement_item_id=requirement_item_id
)
requirement_item_submit.change_user = request.user
requirement_item_submit.requirement_item_title = form.cleaned_data[
"requirement_item_title"
]
requirement_item_submit.requirement_item_scope = form.cleaned_data[
"requirement_item_scope"
]
requirement_item_submit.requirement_item_status = form.cleaned_data[
"requirement_item_status"
]
requirement_item_submit.requirement_item_type = form.cleaned_data[
"requirement_item_type"
]
requirement_item_submit.save()
# Send back an empty response
return HttpResponse("")
| {
"content_hash": "1b2d1123033c570ba53f417d51abf12c",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 116,
"avg_line_length": 34.83064516129032,
"alnum_prop": 0.6898587636026858,
"repo_name": "robotichead/NearBeach",
"id": "64afcaa9f7179534820e0e4e6d90debf9f9ff3ba",
"size": "8638",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "NearBeach/views/requirement_item_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "42158"
},
{
"name": "JavaScript",
"bytes": "67633"
},
{
"name": "Python",
"bytes": "617411"
},
{
"name": "SCSS",
"bytes": "13202"
},
{
"name": "Vue",
"bytes": "754332"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class DkModelfields(AppConfig):
name = 'DkModelfields'.lower()
verbose_name = 'DkModelfields'
| {
"content_hash": "f468edc5c25dfaaa42a7fd25cdb652d9",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 34,
"avg_line_length": 23,
"alnum_prop": 0.7391304347826086,
"repo_name": "datakortet/dkmodelfields",
"id": "8cf1d785ae6e1abb5979a42127a411b4152f5324",
"size": "162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dkmodelfields/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66973"
}
],
"symlink_target": ""
} |
import functools
class decorator_uiexpose(object):
"""
Use this decorator to expose instance method to the GUI.
"""
def __init__(self, flags=None):
if flags is None:
flags = []
self.flags = flags
def __call__(self, fn, *args, **kwargs):
def wrapped_f(*args, **kwargs):
return fn(*args, **kwargs)
wrapped_f.__can_show__ = self.__can_show__
wrapped_f._flags = self.flags
return wrapped_f
def __get__(self, inst, owner):
fn = functools.partial(self.__call__, inst)
fn.__can_show__ = self.__can_show__ # todo: necessary?
fn._flags = self.flags # todo: necessary?
return fn
def __can_show__(self):
"""
This method is used for duck-typing by the interface.
"""
return True
| {
"content_hash": "a61d32b3519efa7e7ccd4a6096c2e5fe",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 63,
"avg_line_length": 26.25,
"alnum_prop": 0.5369047619047619,
"repo_name": "SqueezeStudioAnimation/omtk",
"id": "fdabb2c15e41513f67776178700714b373ef301c",
"size": "840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/omtk/core/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "1124321"
},
{
"name": "Python",
"bytes": "1054644"
},
{
"name": "Shell",
"bytes": "143"
}
],
"symlink_target": ""
} |
from pathlib import Path
from tempfile import TemporaryDirectory
from paralleldomain.utilities.any_path import AnyPath, S3Path
def test_resolving():
with TemporaryDirectory() as temp_dir:
path = AnyPath(temp_dir)
assert path.exists()
assert isinstance(path._backend, Path)
path = AnyPath("s3://paralleldomain-testing/")
assert isinstance(path._backend, S3Path)
def test_concat():
with TemporaryDirectory() as temp_dir:
path = AnyPath(temp_dir) / "test"
assert isinstance(path, AnyPath)
path = AnyPath("s3://paralleldomain-testing/") / "test"
assert isinstance(path, AnyPath)
| {
"content_hash": "9607acffc5cb1fc30de05347b318dee8",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 63,
"avg_line_length": 31.428571428571427,
"alnum_prop": 0.6772727272727272,
"repo_name": "parallel-domain/pd-sdk",
"id": "1cc25770f995a67d559c8b661fda46a497d2ade6",
"size": "660",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test_paralleldomain/utilities/test_any_path.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1030434"
},
{
"name": "Shell",
"bytes": "1375"
}
],
"symlink_target": ""
} |
import sublime
import sublime_plugin
from .list_packages_command import ListPackagesThread
class ListUnmanagedPackagesCommand(sublime_plugin.WindowCommand):
"""
A command that shows a list of all packages that are not managed by
Package Control, i.e. that are installed, but not mentioned in
`installed_packages`.
"""
def run(self):
settings = sublime.load_settings('Package Control.sublime-settings')
ignored_packages = settings.get('unmanaged_packages_ignore', [])
ignored_packages.extend(settings.get('installed_packages', []))
def filter_packages(package):
return package[0] not in ignored_packages
ListPackagesThread(self.window, filter_packages).start()
| {
"content_hash": "c23b4f4b8b3b327dd60afd5cbc6da3aa",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 76,
"avg_line_length": 31,
"alnum_prop": 0.7083333333333334,
"repo_name": "herove/dotfiles",
"id": "ae1121c880161ab7155be1837dab643df0833f60",
"size": "744",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sublime/Packages/Package Control/package_control/commands/list_unmanaged_packages_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "358958"
},
{
"name": "C++",
"bytes": "601356"
},
{
"name": "CMake",
"bytes": "17100"
},
{
"name": "Java",
"bytes": "77"
},
{
"name": "JavaScript",
"bytes": "1058301"
},
{
"name": "Python",
"bytes": "5847904"
},
{
"name": "Shell",
"bytes": "49159"
},
{
"name": "Vim script",
"bytes": "43682"
}
],
"symlink_target": ""
} |
"""distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import sys, os, string, re
from types import *
from site import USER_BASE, USER_SITE
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
from distutils import log
if os.name == 'nt':
from distutils.msvccompiler import get_build_version
MSVC_VERSION = int(get_build_version())
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile \
(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_ext (Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('plat-name=', 'p',
"platform name to cross-compile for, if supported "
"(default: %s)" % get_platform()),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
('include-dirs=', 'I',
"list of directories to search for header files" + sep_by),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries" + sep_by),
('rpath=', 'R',
"directories to search for shared C libraries at runtime"),
('link-objects=', 'O',
"extra explicit link objects to include in the link"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
('swig-cpp', None,
"make SWIG create C++ files (default is C)"),
('swig-opts=', None,
"list of SWIG command line options"),
('swig=', None,
"path to the SWIG executable"),
('user', None,
"add user include, library and rpath"),
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.extensions = None
self.build_lib = None
self.plat_name = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
self.user = None
def finalize_options(self):
from distutils import sysconfig
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('plat_name', 'plat_name'),
)
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.append(py_include)
if plat_py_include != py_include:
self.include_dirs.append(plat_py_include)
self.ensure_string_list('libraries')
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif type(self.library_dirs) is StringType:
self.library_dirs = string.split(self.library_dirs, os.pathsep)
if self.rpath is None:
self.rpath = []
elif type(self.rpath) is StringType:
self.rpath = string.split(self.rpath, os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
# the 'libs' directory is for binary installs - we assume that
# must be the *native* platform. But we don't really support
# cross-compiling via a binary install anyway, so we let it go.
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC'))
if MSVC_VERSION == 9:
# Use the .lib files for the correct architecture
if self.plat_name == 'win32':
suffix = ''
else:
# win-amd64 or win-ia64
suffix = self.plat_name[4:]
# We could have been built in one of two places; add both
for d in ('PCbuild',), ('PC', 'VS9.0'):
new_lib = os.path.join(sys.exec_prefix, *d)
if suffix:
new_lib = os.path.join(new_lib, suffix)
self.library_dirs.append(new_lib)
elif MSVC_VERSION == 8:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS8.0'))
elif MSVC_VERSION == 7:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS7.1'))
else:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VC6'))
# OS/2 (EMX) doesn't support Debug vs Release builds, but has the
# import libraries in its "Config" subdirectory
if os.name == 'os2':
self.library_dirs.append(os.path.join(sys.exec_prefix, 'Config'))
# for extensions under Cygwin and AtheOS Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
"python" + get_python_version(),
"config"))
else:
# building python standard extensions
self.library_dirs.append('.')
# For building extensions with a shared Python library,
# Python's library directory must be appended to library_dirs
# See Issues: #1600860, #4366
if False and (sysconfig.get_config_var('Py_ENABLE_SHARED')):
if not sysconfig.python_build:
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = self.define.split(',')
self.define = map(lambda symbol: (symbol, '1'), defines)
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = self.undef.split(',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# Finally add the user include and library directories if requested
if self.user:
user_include = os.path.join(USER_BASE, "include")
user_lib = os.path.join(USER_BASE, "lib")
if os.path.isdir(user_include):
self.include_dirs.append(user_include)
if os.path.isdir(user_lib):
self.library_dirs.append(user_lib)
self.rpath.append(user_lib)
def run(self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
if os.name == 'nt' and self.plat_name != get_platform():
self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
def check_extensions_list(self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(extensions, list):
raise DistutilsSetupError, \
"'ext_modules' option must be a list of Extension instances"
for i, ext in enumerate(extensions):
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
if not isinstance(ext, tuple) or len(ext) != 2:
raise DistutilsSetupError, \
("each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple")
ext_name, build_info = ext
log.warn(("old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s'"
"-- please convert to Extension instance" % ext_name))
if not (isinstance(ext_name, str) and
extension_name_re.match(ext_name)):
raise DistutilsSetupError, \
("first element of each tuple in 'ext_modules' "
"must be the extension name (a string)")
if not isinstance(build_info, dict):
raise DistutilsSetupError, \
("second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)")
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in ('include_dirs', 'library_dirs', 'libraries',
'extra_objects', 'extra_compile_args',
'extra_link_args'):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if 'def_file' in build_info:
log.warn("'def_file' element of build info dict "
"no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
raise DistutilsSetupError, \
("'macros' element of build info dict "
"must be 1- or 2-tuple")
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs(self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
outputs.append(self.get_ext_fullpath(ext.name))
return outputs
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
sources = ext.sources
if sources is None or type(sources) not in (ListType, TupleType):
raise DistutilsSetupError, \
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX -- this is a Vile HACK!
#
# The setup.py script for Python on Unix needs to be able to
# get this list so it can perform all the clean up needed to
# avoid keeping object files around when cleaning out a failed
# build of an extension module. Since Distutils does not
# track dependencies, we have to get rid of intermediates to
# ensure all the intermediates will be properly re-built.
#
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def swig_sources (self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if self.swig_cpp or ('-c++' in self.swig_opts) or \
('-c++' in extension.swig_opts):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
# swig_sources ()
def find_swig (self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
elif os.name == "os2":
# assume swig available in the PATH.
return "swig.exe"
else:
raise DistutilsPlatformError, \
("I don't know how to find (much less run) SWIG "
"on platform '%s'") % os.name
# find_swig ()
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullpath(self, ext_name):
"""Returns the path of the filename for a given extension.
The file is located in `build_lib` or directly in the package
(inplace option).
"""
# makes sure the extension name is only using dots
all_dots = string.maketrans('/'+os.sep, '..')
ext_name = ext_name.translate(all_dots)
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(ext_name)
filename = os.path.split(filename)[-1]
if not self.inplace:
# no further work needed
# returning :
# build_dir/package/path/filename
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
# the inplace option requires to find the package directory
# using the build_py command for that
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
# returning
# package_dir/filename
return os.path.join(package_dir, filename)
def get_ext_fullname(self, ext_name):
"""Returns the fullname of a given extension name.
Adds the `package.` prefix"""
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename(self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = string.split(ext_name, '.')
# OS/2 has an 8 character module (extension) limit :-(
if os.name == "os2":
ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8]
# extensions in debug_mode are named 'module_d.pyd' under windows
so_ext = get_config_var('SO')
if os.name == 'nt' and self.debug:
return os.path.join(*ext_path) + '_d' + so_ext
return os.path.join(*ext_path) + so_ext
def get_export_symbols (self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "init" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "init" function.
"""
initfunc_name = "init" + ext.name.split('.')[-1]
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries (self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows and OS/2, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils.msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
return ext.libraries
elif sys.platform == "os2emx":
# EMX/GCC requires the python library explicitly, and I
# believe VACPP does as well (though not confirmed) - AIM Apr01
template = "python%d%d"
# debug versions of the main DLL aren't supported, at least
# not at this time - AIM Apr01
#if self.debug:
# template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "cygwin":
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "atheos":
from distutils import sysconfig
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# Get SHLIBS from Makefile
extra = []
for lib in sysconfig.get_config_var('SHLIBS').split():
if lib.startswith('-l'):
extra.append(lib[2:])
else:
extra.append(lib)
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib, "m"] + extra
elif sys.platform == 'darwin':
# Don't use the default code below
return ext.libraries
elif sys.platform[:3] == 'aix':
# Don't use the default code below
return ext.libraries
else:
from distutils import sysconfig
if False and sysconfig.get_config_var('Py_ENABLE_SHARED'):
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
return ext.libraries + [pythonlib]
else:
return ext.libraries
# class build_ext
| {
"content_hash": "1c32dc8afd5b9013e8427261bee161cf",
"timestamp": "",
"source": "github",
"line_count": 768,
"max_line_length": 86,
"avg_line_length": 41.98046875,
"alnum_prop": 0.5645916689928973,
"repo_name": "nmercier/linux-cross-gcc",
"id": "1c9496ce61a15acf527532cade0d44ab20e64b51",
"size": "32241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linux/lib/python2.7/distutils/command/build_ext.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1047092"
},
{
"name": "C++",
"bytes": "151335"
},
{
"name": "Makefile",
"bytes": "82796"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "29123266"
},
{
"name": "Shell",
"bytes": "14668"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MongoEngine'
copyright = u'2009, MongoEngine Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import mongoengine
# The short X.Y version.
version = mongoengine.get_version()
# The full version, including alpha/beta/rc tags.
release = mongoengine.get_version()
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['globaltoc.html', 'searchbox.html'],
'**': ['localtoc.html', 'relations.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'MongoEnginedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'MongoEngine.tex', 'MongoEngine Documentation',
'Ross Lawley', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
autoclass_content = 'both'
| {
"content_hash": "3001284f4ea538e271f5b5d850fde9db",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 80,
"avg_line_length": 32.492063492063494,
"alnum_prop": 0.7103077674645824,
"repo_name": "Multiposting/mongoengine",
"id": "40c1f430b5e634d509aa7aadac8c13f46ddef709",
"size": "6563",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2717"
},
{
"name": "Python",
"bytes": "873950"
}
],
"symlink_target": ""
} |
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
STREAMALERT_CLI_ROOT = os.path.dirname(os.path.abspath(__file__))
| {
"content_hash": "d38928cc381c543c51ef41b841ed8d3a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 72,
"avg_line_length": 35.833333333333336,
"alnum_prop": 0.7767441860465116,
"repo_name": "airbnb/streamalert",
"id": "d97a878cbc29236a11f1337ace0deb82bd706373",
"size": "645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streamalert_cli/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "142275"
},
{
"name": "Python",
"bytes": "2209853"
},
{
"name": "Shell",
"bytes": "2975"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cargo', '0004_auto_20151118_2134'),
]
operations = [
migrations.AlterModelOptions(
name='emailingtransaction',
options={'ordering': ('-date_created',), 'verbose_name': 'Email - Transaction', 'verbose_name_plural': 'Email - Transactions'},
),
]
| {
"content_hash": "be360dfbaec7c252f710ac2e25667da7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 139,
"avg_line_length": 26.58823529411765,
"alnum_prop": 0.6238938053097345,
"repo_name": "dalou/django-cargo",
"id": "a9df3d0addcc9bf21689fa23f1e6d1bef1f35d8b",
"size": "476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cargo/migrations/0005_auto_20151119_1209.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52747"
},
{
"name": "HTML",
"bytes": "33921"
},
{
"name": "JavaScript",
"bytes": "146655"
},
{
"name": "Python",
"bytes": "238111"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import swapper
from django.db import models
from fluent_pages.models import UrlNode
from accelerator_abstract.models.accelerator_model import AcceleratorModel
class BaseNodePublishedFor(AcceleratorModel):
node = models.ForeignKey(UrlNode, on_delete=models.CASCADE)
published_for = models.ForeignKey(
swapper.get_model_name(AcceleratorModel.Meta.app_label, "ProgramRole"),
on_delete=models.CASCADE)
class Meta(AcceleratorModel.Meta):
db_table = 'accelerator_nodepublishedfor'
abstract = True
verbose_name = "Node is Published For"
verbose_name_plural = "Node is Published For"
def __str__(self):
tmpl = "%s is available to %s"
return tmpl % (self.node.title, self.published_for.name)
| {
"content_hash": "50cf9514fffb77c7f16d1c2e9ecdd37b",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 33.708333333333336,
"alnum_prop": 0.7119901112484549,
"repo_name": "masschallenge/django-accelerator",
"id": "8025b632f1655df4492eeed1efd4adaff358008b",
"size": "809",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "accelerator_abstract/models/base_node_published_for.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1848"
},
{
"name": "Makefile",
"bytes": "6817"
},
{
"name": "Python",
"bytes": "996767"
},
{
"name": "Shell",
"bytes": "2453"
}
],
"symlink_target": ""
} |
"""
WSGI config for transactions project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "transactions.settings")
application = get_wsgi_application()
| {
"content_hash": "4d20fbb0203d23a41d873e53c7b6e89d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 25.125,
"alnum_prop": 0.7761194029850746,
"repo_name": "akash-dev-github/Transactions",
"id": "1bb86d04a9ce982e0c3e7857fb6e90aa2fd183e8",
"size": "402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "transactions/transactions/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "393"
},
{
"name": "Python",
"bytes": "31119"
}
],
"symlink_target": ""
} |
from robotide.lib.robot.utils import NormalizedDict
class Metadata(NormalizedDict):
def __init__(self, initial=None):
NormalizedDict.__init__(self, initial, ignore='_')
def __unicode__(self):
return u'{%s}' % ', '.join('%s: %s' % (k, self[k]) for k in self)
def __str__(self):
return unicode(self).encode('ASCII', 'replace')
| {
"content_hash": "3d4e4a247037c2b8781d284806e20383",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 28.153846153846153,
"alnum_prop": 0.5956284153005464,
"repo_name": "fingeronthebutton/RIDE",
"id": "7b7653149cd500ecec7dd52d5972c059d7948f62",
"size": "974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robotide/lib/robot/model/metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21370"
},
{
"name": "HTML",
"bytes": "110675"
},
{
"name": "JavaScript",
"bytes": "41401"
},
{
"name": "Python",
"bytes": "2902622"
}
],
"symlink_target": ""
} |
from binary_heap import binHeap
import pytest
import random
def test_empty_heap():
blist = binHeap()
assert blist.heap == [0]
def test_push_pop():
blist = binHeap()
blist.push(123)
assert blist.pop() == 123
# def all_list(heap):
# value_input = []
# while True:
# try:
# value_input.append(heap.pop())
# except IndexError:
# return value_input
# return value_input
| {
"content_hash": "1cc4982f453a5fe0b1f92a4499fd2a76",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 44,
"avg_line_length": 18.458333333333332,
"alnum_prop": 0.5801354401805869,
"repo_name": "jacquestardie/data-structures",
"id": "49eba3509f47a8f2b0431d9b82cb46f7171eb053",
"size": "443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_binary_heap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23297"
}
],
"symlink_target": ""
} |
def install(job):
service = job.service
vdc = service.producers["vdc"][0]
g8client = vdc.producers["g8client"][0]
cl = j.clients.openvcloud.getFromService(g8client)
acc = cl.account_get(vdc.model.data.account)
# if space does not exist, it will create it
space = acc.space_get(vdc.model.dbobj.name, vdc.model.data.location)
data = service.model.data
for location in cl.locations:
if location['name'] == space.model['location']:
gid = location['gid']
space.add_external_network(name=data.name,
subnet=data.publicSubnetCIDR,
gateway=data.gatewayIPAddress,
startip=data.startIPAddress,
endip=data.endIPAddress,
gid=gid,
vlan=data.vLANID)
| {
"content_hash": "e2d324486472f7e00608e386e030a3d2",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 72,
"avg_line_length": 42.04761904761905,
"alnum_prop": 0.5526613816534541,
"repo_name": "Jumpscale/ays_jumpscale8",
"id": "27f63d77970a8553c6cb340386caeaaa3da6c9f2",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "templates/ovc/g8network/actions.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "270835"
}
],
"symlink_target": ""
} |
'''
Testing class for database API's archive related functions.
Authors: Ari Kairala, Petteri Ponsimaa
Originally adopted from Ivan's exercise 1 test class.
'''
import unittest, hashlib
import re, base64, copy, json, server
from database_api_test_common import BaseTestCase, db
from flask import json, jsonify
from exam_archive import ExamDatabaseErrorNotFound, ExamDatabaseErrorExists
from unittest import TestCase
from resources_common import COLLECTIONJSON, PROBLEMJSON, ARCHIVE_PROFILE, API_VERSION
class RestArchiveTestCase(BaseTestCase):
'''
RestArchiveTestCase contains archive related unit tests of the database API.
'''
# List of user credentials in exam_archive_data_dump.sql for testing purposes
super_user = "bigboss"
super_pw = hashlib.sha256("ultimatepw").hexdigest()
admin_user = "antti.admin"
admin_pw = hashlib.sha256("qwerty1234").hexdigest()
basic_user = "testuser"
basic_pw = hashlib.sha256("testuser").hexdigest()
wrong_pw = "wrong-pw"
test_archive_template_1 = {"template": {
"data": [{"name": "archiveId", "value": 4},
{"name": "name", "value": "Computer Science"},
{"name": "organisationName", "value": "OTiT"},
{"name": "identificationNeeded", "value": 1}]
}
}
test_archive_template_2 = {"template": {
"data": [{"name": "archiveId", "value": 4},
{"name": "name", "value": "Wireless Communication Engineering"},
{"name": "organisationName", "value": "OTiT"},
{"name": "identificationNeeded", "value": 0}]
}
}
archivelist_resource_url = '/exam_archive/api/archives/'
# Set a ready header for authorized admin user
header_auth = {'Authorization': 'Basic ' + base64.b64encode(super_user + ":" + super_pw)}
# Define a list of the sample contents of the database, so we can later compare it to the test results
@classmethod
def setUpClass(cls):
print "Testing ", cls.__name__
def test_user_not_authorized(self):
'''
Check that user in not able to get user list without authenticating.
'''
print '(' + self.test_user_not_authorized.__name__ + ')', \
self.test_user_not_authorized.__doc__
# Test ArchiveList/GET
rv = self.app.get(self.archivelist_resource_url)
assert rv.status_code == 401
assert PROBLEMJSON in rv.mimetype
# Try to get Archive list as super user with wrong password
rv = self.app.get(self.archivelist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.super_user + ":" + self.wrong_pw)})
assert rv.status_code == 401
assert PROBLEMJSON in rv.mimetype
def test_user_authorized(self):
'''
Check that authenticated user is able to get archive list.
'''
print '(' + self.test_user_authorized.__name__ + ')', \
self.test_user_authorized.__doc__
# Get Archive list as basic user
rv = self.app.get(self.archivelist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.basic_user + ":" + self.basic_pw)})
assert rv.status_code == 200
assert COLLECTIONJSON in rv.mimetype
# User authorized as super user
rv = self.app.get(self.archivelist_resource_url, headers={'Authorization': 'Basic ' + \
base64.b64encode(self.super_user + ":" + self.super_pw)})
self.assertEquals(rv.status_code,200)
self.assertEquals(COLLECTIONJSON+";"+ARCHIVE_PROFILE,rv.content_type)
def test_archive_get(self):
'''
Check data consistency of Archive/GET and ArchiveList/GET.
'''
print '(' + self.test_archive_get.__name__ + ')', \
self.test_archive_get.__doc__
# Test ArchiveList/GET
self._archive_get(self.archivelist_resource_url)
def _archive_get(self, resource_url):
'''
Check data consistency of ArchiveList/GET.
'''
# Get all the archives from database
archives = db.browse_archives()
# Get all the archives from API
rv = self.app.get(resource_url, headers=self.header_auth)
self.assertEquals(rv.status_code,200)
self.assertEquals(COLLECTIONJSON+";"+ARCHIVE_PROFILE,rv.content_type)
input = json.loads(rv.data)
assert input
# Go through the data
data = input['collection']
items = data['items']
self.assertEquals(data['href'], resource_url)
self.assertEquals(data['version'], API_VERSION)
for item in items:
obj = self._create_dict(item['data'])
archive = db.get_archive(obj['archiveId'])
assert self._isIdentical(obj, archive)
def test_archive_post(self):
'''
Check that a new archive can be created.
'''
print '(' + self.test_archive_post.__name__ + ')', \
self.test_archive_post.__doc__
resource_url = self.archivelist_resource_url
new_archive = self.test_archive_template_1.copy()
# Test ArchiveList/POST
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_archive))
self.assertEquals(rv.status_code,201)
# Post returns the address of newly created resource URL in header, in 'location'. Get the identifier of
# the just created item, fetch it from database and compare.
location = rv.location
location_match = re.match('.*archives/([^/]+)/', location)
self.assertIsNotNone(location_match)
new_id = location_match.group(1)
# Fetch the item from database and set it to archive_id_db, and convert the filled post template data above to
# similar format by replacing the keys with post data attributes.
archive_in_db = db.get_archive(new_id)
archive_posted = self._convert(new_archive)
# Compare the data in database and the post template above.
self.assertDictContainsSubset(archive_posted, archive_in_db)
# Next, try to add the same archive twice - there should be conflict
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_archive))
self.assertEquals(rv.status_code,409)
# Next check that by posting invalid JSON data we get status code 415
invalid_json = "INVALID " + json.dumps(new_archive)
rv = self.app.post(resource_url, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,415)
# Check that template structure is validated
invalid_json = json.dumps(new_archive['template'])
rv = self.app.post(resource_url, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,400)
# Check for the missing required field by removing the third row in array (archive name)
invalid_template = copy.deepcopy(new_archive)
invalid_template['template']['data'].pop(1)
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(invalid_template))
self.assertEquals(rv.status_code,400)
# Lastly, delete the item
rv = self.app.delete(location, headers=self.header_auth)
self.assertEquals(rv.status_code,204)
def test_archive_put(self):
'''
Check that an existing archive can be modified.
'''
print '(' + self.test_archive_put.__name__ + ')', \
self.test_archive_put.__doc__
resource_url = self.archivelist_resource_url
new_archive = self.test_archive_template_1
edited_archive = self.test_archive_template_2
# First create the archive
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(new_archive))
self.assertEquals(rv.status_code,201)
location = rv.location
self.assertIsNotNone(location)
# Then try to edit the archive
rv = self.app.put(location, headers=self.header_auth, data=json.dumps(edited_archive))
self.assertEquals(rv.status_code,200)
location = rv.location
self.assertIsNotNone(location)
# Put returns the address of newly created resource URL in header, in 'location'. Get the identifier of
# the just created item, fetch it from database and compare.
location = rv.location
location_match = re.match('.*archives/([^/]+)/', location)
self.assertIsNotNone(location_match)
new_id = location_match.group(1)
# Fetch the item from database and set it to archive_id_db, and convert the filled post template data above to
# similar format by replacing the keys with post data attributes.
archive_in_db = db.get_archive(new_id)
archive_posted = self._convert(edited_archive)
# Compare the data in database and the post template above.
self.assertDictContainsSubset(archive_posted, archive_in_db)
# Next check that by posting invalid JSON data we get status code 415
invalid_json = "INVALID " + json.dumps(new_archive)
rv = self.app.put(location, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,415)
# Check that template structure is validated
invalid_json = json.dumps(new_archive['template'])
rv = self.app.put(location, headers=self.header_auth, data=invalid_json)
self.assertEquals(rv.status_code,400)
# Lastly, we delete the archive
rv = self.app.delete(location, headers=self.header_auth)
self.assertEquals(rv.status_code,204)
def test_archive_delete(self):
'''
Check that archive in not able to get archive list without authenticating.
'''
print '(' + self.test_archive_delete.__name__ + ')', \
self.test_archive_delete.__doc__
# First create the archive
resource_url = self.archivelist_resource_url
rv = self.app.post(resource_url, headers=self.header_auth, data=json.dumps(self.test_archive_template_2))
self.assertEquals(rv.status_code,201)
location = rv.location
self.assertIsNotNone(location)
# Get the identifier of the just created item, fetch it from database and compare.
location = rv.location
location_match = re.match('.*archives/([^/]+)/', location)
self.assertIsNotNone(location_match)
new_id = location_match.group(1)
# Then, we delete the archive
rv = self.app.delete(location, headers=self.header_auth)
self.assertEquals(rv.status_code,204)
# Try to fetch the deleted archive from database - expect to fail
self.assertIsNone(db.get_archive(new_id))
def test_for_method_not_allowed(self):
'''
For inconsistency check for 405, method not allowed.
'''
print '(' + self.test_archive_get.__name__ + ')', \
self.test_archive_get.__doc__
# ArchiveList/PUT should not exist
rv = self.app.put(self.archivelist_resource_url, headers=self.header_auth)
self.assertEquals(rv.status_code,405)
# ArchiveList/DELETE should not exist
rv = self.app.delete(self.archivelist_resource_url, headers=self.header_auth)
self.assertEquals(rv.status_code,405)
def _isIdentical(self, api_item, db_item):
'''
Check whether template data corresponds to data stored in the database.
'''
return api_item['archiveId'] == db_item['archive_id'] and \
api_item['name'] == db_item['archive_name'] and \
api_item['organisationName'] == db_item['organisation_name'] and \
api_item['identificationNeeded'] == db_item['identification_needed']
def _convert(self, template_data):
'''
Convert template data to a dictionary representing the format the data is saved in the database.
'''
trans_table = {"name":"archive_name", "organisationName":"organisation_name", "archiveId":"archive_id", "dateModified": "date",
"modifierId":"modifier_id", "archiveId":"archive_id", "identificationNeeded":"identification_needed"}
data = self._create_dict(template_data['template']['data'])
db_item = {}
for key, val in data.items():
db_item[trans_table[key]] = val
return db_item
def _create_dict(self,item):
'''
Create a dictionary from template data for easier handling.
'''
dict = {}
for f in item:
dict[f['name']] = f['value']
return dict
# assert 'No entries here so far' in rv.data
if __name__ == '__main__':
print 'Start running tests'
unittest.main()
| {
"content_hash": "d98a24510b8abdfa8b7862f9c7243bff",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 135,
"avg_line_length": 41.47352024922118,
"alnum_prop": 0.6084278524750244,
"repo_name": "petterip/exam-archive",
"id": "9f6de23ac9c63ca609b68ba64253870ef0c7c5af",
"size": "13313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/rest_api_test_archive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2066"
},
{
"name": "HTML",
"bytes": "4750"
},
{
"name": "JavaScript",
"bytes": "89925"
},
{
"name": "PLpgSQL",
"bytes": "8691"
},
{
"name": "Python",
"bytes": "312184"
}
],
"symlink_target": ""
} |
"""
Test suite for the UHPPOTE RFID controller board module.
.. moduleauthor:: Andrew Vaughan <hello@andrewvaughan.io>
"""
| {
"content_hash": "22eaff3183a890ad290b72da0acd5b96",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 57,
"avg_line_length": 24.8,
"alnum_prop": 0.7419354838709677,
"repo_name": "andrewvaughan/uhppote-rfid",
"id": "86956220556042d53c596f419bd9a6c6168b6873",
"size": "148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "576"
},
{
"name": "Python",
"bytes": "67656"
}
],
"symlink_target": ""
} |
import requests
if __name__ == '__main__':
payload = {
"st":'',
"year":''
}
payload['st'] = '466920';
payload['year'] = '2015';
#1. original web, no use at all!!!
#link1 = "http://www.cwb.gov.tw/V7/climate/dailyPrecipitation/dP.htm"
#2. get rainfall specific station and year
#link2 = "http://www.cwb.gov.tw/V7/climate/dailyPrecipitation/Data/466920_2012.htm"
#3. can use by post request obtain second url(above url ^^^^)
# but still no use at all!!!
#link3 = "http://www.cwb.gov.tw/V7/climate/dailyPrecipitation/dP_file.php"
#4. get select option list, included station, year
#link4 = "http://www.cwb.gov.tw/V7/climate/dailyPrecipitation/dP.php"
"""
# choose either post or get, do not use in the same time
# for third url with post payload
# link can be link3
res = requests.post(link, payload)
# just get web url
# link can be link1, link2, link4
res = requests.get(link)
# encoding as utf-8 avoid wrong chiness word
res.encoding = 'utf-8'
print res.text.encode('utf-8')
""""
| {
"content_hash": "7aa9e6af07e168892e64b9f375dcac6f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 84,
"avg_line_length": 26.435897435897434,
"alnum_prop": 0.6614936954413191,
"repo_name": "TaiwanStat/real.taiwanstat.com",
"id": "11d5916347599fa83ebcd35c09cbb827de91ce78",
"size": "1031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vegetable-price/workspace/rainfall_parse/test_rainfall.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "187"
},
{
"name": "CSS",
"bytes": "111100"
},
{
"name": "HTML",
"bytes": "521597"
},
{
"name": "Handlebars",
"bytes": "150100"
},
{
"name": "JavaScript",
"bytes": "3646635"
},
{
"name": "Pug",
"bytes": "3179"
},
{
"name": "Python",
"bytes": "133753"
},
{
"name": "SCSS",
"bytes": "3064"
},
{
"name": "Shell",
"bytes": "1318"
}
],
"symlink_target": ""
} |
import os
import six
import contextlib
import shutil
try:
from unittest import mock
except ImportError:
import mock
from twisted.trial import unittest
from twisted.protocols.policies import WrappingFactory
from twisted.python.filepath import FilePath
from twisted.internet import reactor, defer, error
from twisted.web import server, static, util, resource
from twisted.web._newclient import ResponseFailed
from twisted.web.http import _DataLoss
from twisted.web.test.test_webclient import ForeverTakingResource, \
NoLengthResource, HostHeaderResource, \
PayloadResource
from twisted.cred import portal, checkers, credentials
from w3lib.url import path_to_file_uri
from scrapy.core.downloader.handlers import DownloadHandlers
from scrapy.core.downloader.handlers.datauri import DataURIDownloadHandler
from scrapy.core.downloader.handlers.file import FileDownloadHandler
from scrapy.core.downloader.handlers.http import HTTPDownloadHandler, HttpDownloadHandler
from scrapy.core.downloader.handlers.http10 import HTTP10DownloadHandler
from scrapy.core.downloader.handlers.http11 import HTTP11DownloadHandler
from scrapy.core.downloader.handlers.s3 import S3DownloadHandler
from scrapy.spiders import Spider
from scrapy.http import Request
from scrapy.http.response.text import TextResponse
from scrapy.responsetypes import responsetypes
from scrapy.settings import Settings
from scrapy.utils.test import get_crawler, skip_if_no_boto
from scrapy.utils.python import to_bytes
from scrapy.exceptions import NotConfigured
from tests.mockserver import MockServer, ssl_context_factory
from tests.spiders import SingleRequestSpider
class DummyDH(object):
def __init__(self, crawler):
pass
class OffDH(object):
def __init__(self, crawler):
raise NotConfigured
class LoadTestCase(unittest.TestCase):
def test_enabled_handler(self):
handlers = {'scheme': 'tests.test_downloader_handlers.DummyDH'}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertIn('scheme', dh._schemes)
for scheme in handlers: # force load handlers
dh._get_handler(scheme)
self.assertIn('scheme', dh._handlers)
self.assertNotIn('scheme', dh._notconfigured)
def test_not_configured_handler(self):
handlers = {'scheme': 'tests.test_downloader_handlers.OffDH'}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertIn('scheme', dh._schemes)
for scheme in handlers: # force load handlers
dh._get_handler(scheme)
self.assertNotIn('scheme', dh._handlers)
self.assertIn('scheme', dh._notconfigured)
def test_disabled_handler(self):
handlers = {'scheme': None}
crawler = get_crawler(settings_dict={'DOWNLOAD_HANDLERS': handlers})
dh = DownloadHandlers(crawler)
self.assertNotIn('scheme', dh._schemes)
for scheme in handlers: # force load handlers
dh._get_handler(scheme)
self.assertNotIn('scheme', dh._handlers)
self.assertIn('scheme', dh._notconfigured)
class FileTestCase(unittest.TestCase):
def setUp(self):
self.tmpname = self.mktemp()
with open(self.tmpname + '^', 'w') as f:
f.write('0123456789')
self.download_request = FileDownloadHandler(Settings()).download_request
def tearDown(self):
os.unlink(self.tmpname + '^')
def test_download(self):
def _test(response):
self.assertEquals(response.url, request.url)
self.assertEquals(response.status, 200)
self.assertEquals(response.body, b'0123456789')
request = Request(path_to_file_uri(self.tmpname + '^'))
assert request.url.upper().endswith('%5E')
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_non_existent(self):
request = Request('file://%s' % self.mktemp())
d = self.download_request(request, Spider('foo'))
return self.assertFailure(d, IOError)
class ContentLengthHeaderResource(resource.Resource):
"""
A testing resource which renders itself as the value of the Content-Length
header from the request.
"""
def render(self, request):
return request.requestHeaders.getRawHeaders(b"content-length")[0]
class ChunkedResource(resource.Resource):
def render(self, request):
def response():
request.write(b"chunked ")
request.write(b"content\n")
request.finish()
reactor.callLater(0, response)
return server.NOT_DONE_YET
class BrokenChunkedResource(resource.Resource):
def render(self, request):
def response():
request.write(b"chunked ")
request.write(b"content\n")
# Disable terminating chunk on finish.
request.chunked = False
closeConnection(request)
reactor.callLater(0, response)
return server.NOT_DONE_YET
class BrokenDownloadResource(resource.Resource):
def render(self, request):
def response():
request.setHeader(b"Content-Length", b"20")
request.write(b"partial")
closeConnection(request)
reactor.callLater(0, response)
return server.NOT_DONE_YET
def closeConnection(request):
# We have to force a disconnection for HTTP/1.1 clients. Otherwise
# client keeps the connection open waiting for more data.
if hasattr(request.channel, 'loseConnection'): # twisted >=16.3.0
request.channel.loseConnection()
else:
request.channel.transport.loseConnection()
request.finish()
class EmptyContentTypeHeaderResource(resource.Resource):
"""
A testing resource which renders itself as the value of request body
without content-type header in response.
"""
def render(self, request):
request.setHeader("content-type", "")
return request.content.read()
class HttpTestCase(unittest.TestCase):
scheme = 'http'
download_handler_cls = HTTPDownloadHandler
# only used for HTTPS tests
keyfile = 'keys/cert.pem'
certfile = 'keys/cert.pem'
def setUp(self):
self.tmpname = self.mktemp()
os.mkdir(self.tmpname)
FilePath(self.tmpname).child("file").setContent(b"0123456789")
r = static.File(self.tmpname)
r.putChild(b"redirect", util.Redirect(b"/file"))
r.putChild(b"wait", ForeverTakingResource())
r.putChild(b"hang-after-headers", ForeverTakingResource(write=True))
r.putChild(b"nolength", NoLengthResource())
r.putChild(b"host", HostHeaderResource())
r.putChild(b"payload", PayloadResource())
r.putChild(b"broken", BrokenDownloadResource())
r.putChild(b"chunked", ChunkedResource())
r.putChild(b"broken-chunked", BrokenChunkedResource())
r.putChild(b"contentlength", ContentLengthHeaderResource())
r.putChild(b"nocontenttype", EmptyContentTypeHeaderResource())
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.host = 'localhost'
if self.scheme == 'https':
self.port = reactor.listenSSL(
0, self.wrapper, ssl_context_factory(self.keyfile, self.certfile),
interface=self.host)
else:
self.port = reactor.listenTCP(0, self.wrapper, interface=self.host)
self.portno = self.port.getHost().port
self.download_handler = self.download_handler_cls(Settings())
self.download_request = self.download_handler.download_request
@defer.inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
if hasattr(self.download_handler, 'close'):
yield self.download_handler.close()
shutil.rmtree(self.tmpname)
def getURL(self, path):
return "%s://%s:%d/%s" % (self.scheme, self.host, self.portno, path)
def test_download(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b"0123456789")
return d
def test_download_head(self):
request = Request(self.getURL('file'), method='HEAD')
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b'')
return d
def test_redirect_status(self):
request = Request(self.getURL('redirect'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.status)
d.addCallback(self.assertEquals, 302)
return d
def test_redirect_status_head(self):
request = Request(self.getURL('redirect'), method='HEAD')
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.status)
d.addCallback(self.assertEquals, 302)
return d
@defer.inlineCallbacks
def test_timeout_download_from_spider_nodata_rcvd(self):
# client connects but no data is received
spider = Spider('foo')
meta = {'download_timeout': 0.2}
request = Request(self.getURL('wait'), meta=meta)
d = self.download_request(request, spider)
yield self.assertFailure(d, defer.TimeoutError, error.TimeoutError)
@defer.inlineCallbacks
def test_timeout_download_from_spider_server_hangs(self):
# client connects, server send headers and some body bytes but hangs
spider = Spider('foo')
meta = {'download_timeout': 0.2}
request = Request(self.getURL('hang-after-headers'), meta=meta)
d = self.download_request(request, spider)
yield self.assertFailure(d, defer.TimeoutError, error.TimeoutError)
def test_host_header_not_in_request_headers(self):
def _test(response):
self.assertEquals(
response.body, to_bytes('%s:%d' % (self.host, self.portno)))
self.assertEquals(request.headers, {})
request = Request(self.getURL('host'))
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_host_header_seted_in_request_headers(self):
def _test(response):
self.assertEquals(response.body, b'example.com')
self.assertEquals(request.headers.get('Host'), b'example.com')
request = Request(self.getURL('host'), headers={'Host': 'example.com'})
return self.download_request(request, Spider('foo')).addCallback(_test)
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b'example.com')
return d
def test_content_length_zero_bodyless_post_request_headers(self):
"""Tests if "Content-Length: 0" is sent for bodyless POST requests.
This is not strictly required by HTTP RFCs but can cause trouble
for some web servers.
See:
https://github.com/scrapy/scrapy/issues/823
https://issues.apache.org/jira/browse/TS-2902
https://github.com/kennethreitz/requests/issues/405
https://bugs.python.org/issue14721
"""
def _test(response):
self.assertEquals(response.body, b'0')
request = Request(self.getURL('contentlength'), method='POST', headers={'Host': 'example.com'})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_payload(self):
body = b'1'*100 # PayloadResource requires body length to be 100
request = Request(self.getURL('payload'), method='POST', body=body)
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, body)
return d
class DeprecatedHttpTestCase(HttpTestCase):
"""HTTP 1.0 test case"""
download_handler_cls = HttpDownloadHandler
class Http10TestCase(HttpTestCase):
"""HTTP 1.0 test case"""
download_handler_cls = HTTP10DownloadHandler
class Https10TestCase(Http10TestCase):
scheme = 'https'
class Http11TestCase(HttpTestCase):
"""HTTP 1.1 test case"""
download_handler_cls = HTTP11DownloadHandler
def test_download_without_maxsize_limit(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b"0123456789")
return d
def test_response_class_choosing_request(self):
"""Tests choosing of correct response type
in case of Content-Type is empty but body contains text.
"""
body = b'Some plain text\ndata with tabs\t and null bytes\0'
def _test_type(response):
self.assertEquals(type(response), TextResponse)
request = Request(self.getURL('nocontenttype'), body=body)
d = self.download_request(request, Spider('foo'))
d.addCallback(_test_type)
return d
@defer.inlineCallbacks
def test_download_with_maxsize(self):
request = Request(self.getURL('file'))
# 10 is minimal size for this request and the limit is only counted on
# response body. (regardless of headers)
d = self.download_request(request, Spider('foo', download_maxsize=10))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b"0123456789")
yield d
d = self.download_request(request, Spider('foo', download_maxsize=9))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
@defer.inlineCallbacks
def test_download_with_maxsize_per_req(self):
meta = {'download_maxsize': 2}
request = Request(self.getURL('file'), meta=meta)
d = self.download_request(request, Spider('foo'))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
@defer.inlineCallbacks
def test_download_with_small_maxsize_per_spider(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo', download_maxsize=2))
yield self.assertFailure(d, defer.CancelledError, error.ConnectionAborted)
def test_download_with_large_maxsize_per_spider(self):
request = Request(self.getURL('file'))
d = self.download_request(request, Spider('foo', download_maxsize=100))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b"0123456789")
return d
def test_download_chunked_content(self):
request = Request(self.getURL('chunked'))
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.body)
d.addCallback(self.assertEquals, b"chunked content\n")
return d
def test_download_broken_content_cause_data_loss(self, url='broken'):
request = Request(self.getURL(url))
d = self.download_request(request, Spider('foo'))
def checkDataLoss(failure):
if failure.check(ResponseFailed):
if any(r.check(_DataLoss) for r in failure.value.reasons):
return None
return failure
d.addCallback(lambda _: self.fail("No DataLoss exception"))
d.addErrback(checkDataLoss)
return d
def test_download_broken_chunked_content_cause_data_loss(self):
return self.test_download_broken_content_cause_data_loss('broken-chunked')
def test_download_broken_content_allow_data_loss(self, url='broken'):
request = Request(self.getURL(url), meta={'download_fail_on_dataloss': False})
d = self.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.flags)
d.addCallback(self.assertEqual, ['dataloss'])
return d
def test_download_broken_chunked_content_allow_data_loss(self):
return self.test_download_broken_content_allow_data_loss('broken-chunked')
def test_download_broken_content_allow_data_loss_via_setting(self, url='broken'):
download_handler = self.download_handler_cls(Settings({
'DOWNLOAD_FAIL_ON_DATALOSS': False,
}))
request = Request(self.getURL(url))
d = download_handler.download_request(request, Spider('foo'))
d.addCallback(lambda r: r.flags)
d.addCallback(self.assertEqual, ['dataloss'])
return d
def test_download_broken_chunked_content_allow_data_loss_via_setting(self):
return self.test_download_broken_content_allow_data_loss_via_setting('broken-chunked')
class Https11TestCase(Http11TestCase):
scheme = 'https'
class Https11WrongHostnameTestCase(Http11TestCase):
scheme = 'https'
# above tests use a server certificate for "localhost",
# client connection to "localhost" too.
# here we test that even if the server certificate is for another domain,
# "www.example.com" in this case,
# the tests still pass
keyfile = 'keys/example-com.key.pem'
certfile = 'keys/example-com.cert.pem'
class Https11InvalidDNSId(Https11TestCase):
"""Connect to HTTPS hosts with IP while certificate uses domain names IDs."""
def setUp(self):
super(Https11InvalidDNSId, self).setUp()
self.host = '127.0.0.1'
class Http11MockServerTestCase(unittest.TestCase):
"""HTTP 1.1 test case with MockServer"""
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_download_with_content_length(self):
crawler = get_crawler(SingleRequestSpider)
# http://localhost:8998/partial set Content-Length to 1024, use download_maxsize= 1000 to avoid
# download it
yield crawler.crawl(seed=Request(url='http://localhost:8998/partial', meta={'download_maxsize': 1000}))
failure = crawler.spider.meta['failure']
self.assertIsInstance(failure.value, defer.CancelledError)
@defer.inlineCallbacks
def test_download(self):
crawler = get_crawler(SingleRequestSpider)
yield crawler.crawl(seed=Request(url='http://localhost:8998'))
failure = crawler.spider.meta.get('failure')
self.assertTrue(failure == None)
reason = crawler.spider.meta['close_reason']
self.assertTrue(reason, 'finished')
@defer.inlineCallbacks
def test_download_gzip_response(self):
crawler = get_crawler(SingleRequestSpider)
body = b'1' * 100 # PayloadResource requires body length to be 100
request = Request('http://localhost:8998/payload', method='POST',
body=body, meta={'download_maxsize': 50})
yield crawler.crawl(seed=request)
failure = crawler.spider.meta['failure']
# download_maxsize < 100, hence the CancelledError
self.assertIsInstance(failure.value, defer.CancelledError)
if six.PY2:
request.headers.setdefault(b'Accept-Encoding', b'gzip,deflate')
request = request.replace(url='http://localhost:8998/xpayload')
yield crawler.crawl(seed=request)
# download_maxsize = 50 is enough for the gzipped response
failure = crawler.spider.meta.get('failure')
self.assertTrue(failure == None)
reason = crawler.spider.meta['close_reason']
self.assertTrue(reason, 'finished')
else:
# See issue https://twistedmatrix.com/trac/ticket/8175
raise unittest.SkipTest("xpayload only enabled for PY2")
class UriResource(resource.Resource):
"""Return the full uri that was requested"""
def getChild(self, path, request):
return self
def render(self, request):
# Note: this is an ugly hack for CONNECT request timeout test.
# Returning some data here fail SSL/TLS handshake
# ToDo: implement proper HTTPS proxy tests, not faking them.
if request.method != b'CONNECT':
return request.uri
else:
return b''
class HttpProxyTestCase(unittest.TestCase):
download_handler_cls = HTTPDownloadHandler
def setUp(self):
site = server.Site(UriResource(), timeout=None)
wrapper = WrappingFactory(site)
self.port = reactor.listenTCP(0, wrapper, interface='127.0.0.1')
self.portno = self.port.getHost().port
self.download_handler = self.download_handler_cls(Settings())
self.download_request = self.download_handler.download_request
@defer.inlineCallbacks
def tearDown(self):
yield self.port.stopListening()
if hasattr(self.download_handler, 'close'):
yield self.download_handler.close()
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def test_download_with_proxy(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, b'http://example.com')
http_proxy = self.getURL('')
request = Request('http://example.com', meta={'proxy': http_proxy})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_download_with_proxy_https_noconnect(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, b'https://example.com')
http_proxy = '%s?noconnect' % self.getURL('')
request = Request('https://example.com', meta={'proxy': http_proxy})
return self.download_request(request, Spider('foo')).addCallback(_test)
def test_download_without_proxy(self):
def _test(response):
self.assertEquals(response.status, 200)
self.assertEquals(response.url, request.url)
self.assertEquals(response.body, b'/path/to/resource')
request = Request(self.getURL('path/to/resource'))
return self.download_request(request, Spider('foo')).addCallback(_test)
class DeprecatedHttpProxyTestCase(unittest.TestCase):
"""Old deprecated reference to http10 downloader handler"""
download_handler_cls = HttpDownloadHandler
class Http10ProxyTestCase(HttpProxyTestCase):
download_handler_cls = HTTP10DownloadHandler
class Http11ProxyTestCase(HttpProxyTestCase):
download_handler_cls = HTTP11DownloadHandler
@defer.inlineCallbacks
def test_download_with_proxy_https_timeout(self):
""" Test TunnelingTCP4ClientEndpoint """
http_proxy = self.getURL('')
domain = 'https://no-such-domain.nosuch'
request = Request(
domain, meta={'proxy': http_proxy, 'download_timeout': 0.2})
d = self.download_request(request, Spider('foo'))
timeout = yield self.assertFailure(d, error.TimeoutError)
self.assertIn(domain, timeout.osError)
class HttpDownloadHandlerMock(object):
def __init__(self, settings):
pass
def download_request(self, request, spider):
return request
class S3AnonTestCase(unittest.TestCase):
def setUp(self):
skip_if_no_boto()
self.s3reqh = S3DownloadHandler(Settings(),
httpdownloadhandler=HttpDownloadHandlerMock,
#anon=True, # is implicit
)
self.download_request = self.s3reqh.download_request
self.spider = Spider('foo')
def test_anon_request(self):
req = Request('s3://aws-publicdatasets/')
httpreq = self.download_request(req, self.spider)
self.assertEqual(hasattr(self.s3reqh, 'anon'), True)
self.assertEqual(self.s3reqh.anon, True)
self.assertEqual(
httpreq.url, 'http://aws-publicdatasets.s3.amazonaws.com/')
class S3TestCase(unittest.TestCase):
download_handler_cls = S3DownloadHandler
# test use same example keys than amazon developer guide
# http://s3.amazonaws.com/awsdocs/S3/20060301/s3-dg-20060301.pdf
# and the tests described here are the examples from that manual
AWS_ACCESS_KEY_ID = '0PN5J17HBGZHT7JJ3X82'
AWS_SECRET_ACCESS_KEY = 'uV3F3YluFJax1cknvbcGwgjvx4QpvB+leU8dUj2o'
def setUp(self):
skip_if_no_boto()
s3reqh = S3DownloadHandler(Settings(), self.AWS_ACCESS_KEY_ID,
self.AWS_SECRET_ACCESS_KEY,
httpdownloadhandler=HttpDownloadHandlerMock)
self.download_request = s3reqh.download_request
self.spider = Spider('foo')
@contextlib.contextmanager
def _mocked_date(self, date):
try:
import botocore.auth
except ImportError:
yield
else:
# We need to mock botocore.auth.formatdate, because otherwise
# botocore overrides Date header with current date and time
# and Authorization header is different each time
with mock.patch('botocore.auth.formatdate') as mock_formatdate:
mock_formatdate.return_value = date
yield
def test_extra_kw(self):
try:
S3DownloadHandler(Settings(), extra_kw=True)
except Exception as e:
self.assertIsInstance(e, (TypeError, NotConfigured))
else:
assert False
def test_request_signing1(self):
# gets an object from the johnsmith bucket.
date ='Tue, 27 Mar 2007 19:36:42 +0000'
req = Request('s3://johnsmith/photos/puppy.jpg', headers={'Date': date})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
b'AWS 0PN5J17HBGZHT7JJ3X82:xXjDGYUmKxnwqr5KXNPGldn5LbA=')
def test_request_signing2(self):
# puts an object into the johnsmith bucket.
date = 'Tue, 27 Mar 2007 21:15:45 +0000'
req = Request('s3://johnsmith/photos/puppy.jpg', method='PUT', headers={
'Content-Type': 'image/jpeg',
'Date': date,
'Content-Length': '94328',
})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
b'AWS 0PN5J17HBGZHT7JJ3X82:hcicpDDvL9SsO6AkvxqmIWkmOuQ=')
def test_request_signing3(self):
# lists the content of the johnsmith bucket.
date = 'Tue, 27 Mar 2007 19:42:41 +0000'
req = Request('s3://johnsmith/?prefix=photos&max-keys=50&marker=puppy', \
method='GET', headers={
'User-Agent': 'Mozilla/5.0',
'Date': date,
})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
b'AWS 0PN5J17HBGZHT7JJ3X82:jsRt/rhG+Vtp88HrYL706QhE4w4=')
def test_request_signing4(self):
# fetches the access control policy sub-resource for the 'johnsmith' bucket.
date = 'Tue, 27 Mar 2007 19:44:46 +0000'
req = Request('s3://johnsmith/?acl',
method='GET', headers={'Date': date})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
b'AWS 0PN5J17HBGZHT7JJ3X82:thdUi9VAkzhkniLj96JIrOPGi0g=')
def test_request_signing5(self):
try: import botocore
except ImportError: pass
else:
raise unittest.SkipTest(
'botocore does not support overriding date with x-amz-date')
# deletes an object from the 'johnsmith' bucket using the
# path-style and Date alternative.
date = 'Tue, 27 Mar 2007 21:20:27 +0000'
req = Request('s3://johnsmith/photos/puppy.jpg', \
method='DELETE', headers={
'Date': date,
'x-amz-date': 'Tue, 27 Mar 2007 21:20:26 +0000',
})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
# botocore does not override Date with x-amz-date
self.assertEqual(httpreq.headers['Authorization'],
b'AWS 0PN5J17HBGZHT7JJ3X82:k3nL7gH3+PadhTEVn5Ip83xlYzk=')
def test_request_signing6(self):
# uploads an object to a CNAME style virtual hosted bucket with metadata.
date = 'Tue, 27 Mar 2007 21:06:08 +0000'
req = Request('s3://static.johnsmith.net:8080/db-backup.dat.gz', \
method='PUT', headers={
'User-Agent': 'curl/7.15.5',
'Host': 'static.johnsmith.net:8080',
'Date': date,
'x-amz-acl': 'public-read',
'content-type': 'application/x-download',
'Content-MD5': '4gJE4saaMU4BqNR0kLY+lw==',
'X-Amz-Meta-ReviewedBy': 'joe@johnsmith.net,jane@johnsmith.net',
'X-Amz-Meta-FileChecksum': '0x02661779',
'X-Amz-Meta-ChecksumAlgorithm': 'crc32',
'Content-Disposition': 'attachment; filename=database.dat',
'Content-Encoding': 'gzip',
'Content-Length': '5913339',
})
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(httpreq.headers['Authorization'], \
b'AWS 0PN5J17HBGZHT7JJ3X82:C0FlOtU8Ylb9KDTpZqYkZPX91iI=')
def test_request_signing7(self):
# ensure that spaces are quoted properly before signing
date = 'Tue, 27 Mar 2007 19:42:41 +0000'
req = Request(
("s3://johnsmith/photos/my puppy.jpg"
"?response-content-disposition=my puppy.jpg"),
method='GET',
headers={'Date': date},
)
with self._mocked_date(date):
httpreq = self.download_request(req, self.spider)
self.assertEqual(
httpreq.headers['Authorization'],
b'AWS 0PN5J17HBGZHT7JJ3X82:+CfvG8EZ3YccOrRVMXNaK2eKZmM=')
class BaseFTPTestCase(unittest.TestCase):
username = "scrapy"
password = "passwd"
req_meta = {"ftp_user": username, "ftp_password": password}
def setUp(self):
from twisted.protocols.ftp import FTPRealm, FTPFactory
from scrapy.core.downloader.handlers.ftp import FTPDownloadHandler
# setup dirs and test file
self.directory = self.mktemp()
os.mkdir(self.directory)
userdir = os.path.join(self.directory, self.username)
os.mkdir(userdir)
fp = FilePath(userdir)
fp.child('file.txt').setContent(b"I have the power!")
fp.child('file with spaces.txt').setContent(b"Moooooooooo power!")
# setup server
realm = FTPRealm(anonymousRoot=self.directory, userHome=self.directory)
p = portal.Portal(realm)
users_checker = checkers.InMemoryUsernamePasswordDatabaseDontUse()
users_checker.addUser(self.username, self.password)
p.registerChecker(users_checker, credentials.IUsernamePassword)
self.factory = FTPFactory(portal=p)
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.portNum = self.port.getHost().port
self.download_handler = FTPDownloadHandler(Settings())
self.addCleanup(self.port.stopListening)
def tearDown(self):
shutil.rmtree(self.directory)
def _add_test_callbacks(self, deferred, callback=None, errback=None):
def _clean(data):
self.download_handler.client.transport.loseConnection()
return data
deferred.addCallback(_clean)
if callback:
deferred.addCallback(callback)
if errback:
deferred.addErrback(errback)
return deferred
def test_ftp_download_success(self):
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta=self.req_meta)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 200)
self.assertEqual(r.body, b'I have the power!')
self.assertEqual(r.headers, {b'Local Filename': [b''], b'Size': [b'17']})
return self._add_test_callbacks(d, _test)
def test_ftp_download_path_with_spaces(self):
request = Request(
url="ftp://127.0.0.1:%s/file with spaces.txt" % self.portNum,
meta=self.req_meta
)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 200)
self.assertEqual(r.body, b'Moooooooooo power!')
self.assertEqual(r.headers, {b'Local Filename': [b''], b'Size': [b'18']})
return self._add_test_callbacks(d, _test)
def test_ftp_download_notexist(self):
request = Request(url="ftp://127.0.0.1:%s/notexist.txt" % self.portNum,
meta=self.req_meta)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.status, 404)
return self._add_test_callbacks(d, _test)
def test_ftp_local_filename(self):
local_fname = b"/tmp/file.txt"
meta = {"ftp_local_filename": local_fname}
meta.update(self.req_meta)
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta=meta)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.body, local_fname)
self.assertEqual(r.headers, {b'Local Filename': [b'/tmp/file.txt'], b'Size': [b'17']})
self.assertTrue(os.path.exists(local_fname))
with open(local_fname, "rb") as f:
self.assertEqual(f.read(), b"I have the power!")
os.remove(local_fname)
return self._add_test_callbacks(d, _test)
class FTPTestCase(BaseFTPTestCase):
def test_invalid_credentials(self):
from twisted.protocols.ftp import ConnectionLost
meta = dict(self.req_meta)
meta.update({"ftp_password": 'invalid'})
request = Request(url="ftp://127.0.0.1:%s/file.txt" % self.portNum,
meta=meta)
d = self.download_handler.download_request(request, None)
def _test(r):
self.assertEqual(r.type, ConnectionLost)
return self._add_test_callbacks(d, errback=_test)
class AnonymousFTPTestCase(BaseFTPTestCase):
username = "anonymous"
req_meta = {}
def setUp(self):
from twisted.protocols.ftp import FTPRealm, FTPFactory
from scrapy.core.downloader.handlers.ftp import FTPDownloadHandler
# setup dir and test file
self.directory = self.mktemp()
os.mkdir(self.directory)
fp = FilePath(self.directory)
fp.child('file.txt').setContent(b"I have the power!")
fp.child('file with spaces.txt').setContent(b"Moooooooooo power!")
# setup server for anonymous access
realm = FTPRealm(anonymousRoot=self.directory)
p = portal.Portal(realm)
p.registerChecker(checkers.AllowAnonymousAccess(),
credentials.IAnonymous)
self.factory = FTPFactory(portal=p,
userAnonymous=self.username)
self.port = reactor.listenTCP(0, self.factory, interface="127.0.0.1")
self.portNum = self.port.getHost().port
self.download_handler = FTPDownloadHandler(Settings())
self.addCleanup(self.port.stopListening)
def tearDown(self):
shutil.rmtree(self.directory)
class DataURITestCase(unittest.TestCase):
def setUp(self):
self.download_handler = DataURIDownloadHandler(Settings())
self.download_request = self.download_handler.download_request
self.spider = Spider('foo')
def test_response_attrs(self):
uri = "data:,A%20brief%20note"
def _test(response):
self.assertEquals(response.url, uri)
self.assertFalse(response.headers)
request = Request(uri)
return self.download_request(request, self.spider).addCallback(_test)
def test_default_mediatype_encoding(self):
def _test(response):
self.assertEquals(response.text, 'A brief note')
self.assertEquals(type(response),
responsetypes.from_mimetype("text/plain"))
self.assertEquals(response.encoding, "US-ASCII")
request = Request("data:,A%20brief%20note")
return self.download_request(request, self.spider).addCallback(_test)
def test_default_mediatype(self):
def _test(response):
self.assertEquals(response.text, u'\u038e\u03a3\u038e')
self.assertEquals(type(response),
responsetypes.from_mimetype("text/plain"))
self.assertEquals(response.encoding, "iso-8859-7")
request = Request("data:;charset=iso-8859-7,%be%d3%be")
return self.download_request(request, self.spider).addCallback(_test)
def test_text_charset(self):
def _test(response):
self.assertEquals(response.text, u'\u038e\u03a3\u038e')
self.assertEquals(response.body, b'\xbe\xd3\xbe')
self.assertEquals(response.encoding, "iso-8859-7")
request = Request("data:text/plain;charset=iso-8859-7,%be%d3%be")
return self.download_request(request, self.spider).addCallback(_test)
def test_mediatype_parameters(self):
def _test(response):
self.assertEquals(response.text, u'\u038e\u03a3\u038e')
self.assertEquals(type(response),
responsetypes.from_mimetype("text/plain"))
self.assertEquals(response.encoding, "utf-8")
request = Request('data:text/plain;foo=%22foo;bar%5C%22%22;'
'charset=utf-8;bar=%22foo;%5C%22 foo ;/,%22'
',%CE%8E%CE%A3%CE%8E')
return self.download_request(request, self.spider).addCallback(_test)
def test_base64(self):
def _test(response):
self.assertEquals(response.text, 'Hello, world.')
request = Request('data:text/plain;base64,SGVsbG8sIHdvcmxkLg%3D%3D')
return self.download_request(request, self.spider).addCallback(_test)
| {
"content_hash": "d4bcde5aa4523d530b7aeee618055045",
"timestamp": "",
"source": "github",
"line_count": 991,
"max_line_length": 111,
"avg_line_length": 39.05549949545913,
"alnum_prop": 0.642181686647375,
"repo_name": "taito/scrapy",
"id": "3efcf6e9ccc24da750a90764fdf3017e7eed16e1",
"size": "38704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_downloader_handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2076"
},
{
"name": "Python",
"bytes": "1305737"
},
{
"name": "Roff",
"bytes": "2010"
},
{
"name": "Shell",
"bytes": "258"
}
],
"symlink_target": ""
} |
from typing import Any, Dict, Generator
from ...call_builder.base.base_call_builder import BaseCallBuilder as _BaseCallBuilder
from ...client.base_sync_client import BaseSyncClient
from ...client.response import Response
from ...exceptions import NotPageableError, raise_request_exception
from ...type_checked import type_checked
from ...utils import urljoin_with_query
__all__ = ["BaseCallBuilder"]
@type_checked
class BaseCallBuilder(_BaseCallBuilder):
"""Creates a new :class:`BaseCallBuilder` pointed to server defined by horizon_url.
This is an **abstract** class. Do not create this object directly, use :class:`stellar_sdk.Server` class.
:param client: The client instance used to send request.
:param horizon_url: Horizon server URL.
"""
def __init__(self, client: BaseSyncClient, **kwargs) -> None:
super().__init__(**kwargs)
self.client: BaseSyncClient = client
def call(self) -> Dict[str, Any]:
"""Triggers a HTTP request using this builder's current configuration.
:return: If it is called synchronous, the response will be returned. If
it is called asynchronously, it will return Coroutine.
:raises:
| :exc:`ConnectionError <stellar_sdk.exceptions.ConnectionError>`: if you have not successfully
connected to the server.
| :exc:`NotFoundError <stellar_sdk.exceptions.NotFoundError>`: if status_code == 404
| :exc:`BadRequestError <stellar_sdk.exceptions.BadRequestError>`: if 400 <= status_code < 500
and status_code != 404
| :exc:`BadResponseError <stellar_sdk.exceptions.BadResponseError>`: if 500 <= status_code < 600
| :exc:`UnknownRequestError <stellar_sdk.exceptions.UnknownRequestError>`: if an unknown error occurs,
please submit an issue
"""
url = urljoin_with_query(self.horizon_url, self.endpoint)
return self._call(url, self.params)
def _call(self, url: str, params: dict = None) -> Dict[str, Any]:
raw_resp = self.client.get(url, params)
assert isinstance(raw_resp, Response)
raise_request_exception(raw_resp)
resp = raw_resp.json()
self._check_pageable(resp)
return resp
def stream(
self,
) -> Generator[Dict[str, Any], None, None]:
"""Creates an EventSource that listens for incoming messages from the server.
See `Horizon Response Format <https://developers.stellar.org/api/introduction/response-format/>`__
See `MDN EventSource <https://developer.mozilla.org/en-US/docs/Web/API/EventSource>`__
:return: an EventSource.
:raise: :exc:`StreamClientError <stellar_sdk.exceptions.StreamClientError>` - Failed to fetch stream resource.
"""
url = urljoin_with_query(self.horizon_url, self.endpoint)
return self.client.stream(url, self.params)
def next(self) -> Dict[str, Any]:
if self.next_href is None:
raise NotPageableError("The next page does not exist.")
return self._call(self.next_href, None)
def prev(self) -> Dict[str, Any]:
if self.prev_href is None:
raise NotPageableError("The prev page does not exist.")
return self._call(self.prev_href, None)
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return NotImplemented
return (
self.params == other.params
and self.endpoint == other.endpoint
and self.horizon_url == other.horizon_url
and self.client == other.client
)
def __str__(self):
return (
f"<CallBuilder [horizon_url={self.horizon_url}, "
f"endpoint={self.endpoint}, "
f"params={self.params}, "
f"prev_href={self.prev_href}, "
f"next_href={self.next_href}, "
f"client={self.client}]>"
)
| {
"content_hash": "200c5b0718174e9d02230241c9f20a43",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 118,
"avg_line_length": 40.896907216494846,
"alnum_prop": 0.6359969750441139,
"repo_name": "StellarCN/py-stellar-base",
"id": "bb0daf5d2b241e9f57ed3ae4980f7057a1676cb3",
"size": "3967",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "stellar_sdk/call_builder/call_builder_sync/base_call_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1737"
},
{
"name": "Makefile",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "2044193"
},
{
"name": "RPC",
"bytes": "76503"
}
],
"symlink_target": ""
} |
try:
from genesis import make_project, make_project_scaffold
except ImportError:
from selenext.genesis import make_project, make_project_scaffold
| {
"content_hash": "d547dcc55685a8c45b04ee1ec5ec03ca",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 68,
"avg_line_length": 38.5,
"alnum_prop": 0.7922077922077922,
"repo_name": "Wykleph/selenext",
"id": "c9d3aea02a265e94742c571b20857cac1d957057",
"size": "154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "114632"
}
],
"symlink_target": ""
} |
from mock import Mock
from django.test import TestCase
from celery_progress.backends import CeleryBackend
class CeleryBackendTest(TestCase):
"""
Tests for the CeleryBackend.
"""
def setUp(self):
self.backend = CeleryBackend()
self.mock_store_result = Mock(return_value=True)
self.mock_backend = Mock(store_result=self.mock_store_result)
self.mock_app = Mock(backend=self.mock_backend)
self.backend.app = self.mock_app
def test_set_progress_calls_store_result(self):
self.backend.set_progress('abc-123-def', 50)
self.assertEqual(self.mock_store_result.called, True)
| {
"content_hash": "a36c413416c14ee97bcec2a390da2159",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 69,
"avg_line_length": 29.40909090909091,
"alnum_prop": 0.6877897990726429,
"repo_name": "robgolding63/django-celery-progress",
"id": "b02e662be5017789e13456c21aa847530946164f",
"size": "647",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3574"
},
{
"name": "JavaScript",
"bytes": "684"
},
{
"name": "Python",
"bytes": "7993"
}
],
"symlink_target": ""
} |
'''
requirements
Python version - 3
flask
Rot13 converter
'''
from string import ascii_lowercase as low
from string import ascii_uppercase as upp
from flask import Flask,request,render_template
app = Flask(__name__)
@app.route('/')
def home():
return render_template('rot13.html',output="")
@app.route('/',methods=['POST'])
def rot13():
s = request.form['q']
rot13 = ''
for i in s:
if i.isupper():
rot13 += upp[(upp.index(i) + 13) % 26]
elif i.islower():
rot13 += low[(low.index(i) + 13) % 26]
else:
rot13 += i
#return rot13
return render_template('rot13.html', output=rot13)
if __name__=='__main__':
app.run(debug=True)
| {
"content_hash": "8156f706a4ad4f27ad3af1b4aeb06aa1",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 51,
"avg_line_length": 20.11764705882353,
"alnum_prop": 0.6140350877192983,
"repo_name": "guvi007/ROT13-convertor",
"id": "69ed0f6c674ccb2e02bddede3ec005d5dacd4018",
"size": "684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rot13.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "425"
},
{
"name": "Python",
"bytes": "684"
}
],
"symlink_target": ""
} |
"""
This module holds the functions to crawl and format stuff from the website
"""
import requests
from apscheduler.schedulers.blocking import BlockingScheduler
from bot.logger import LOGGER
from bot.settings import RU_URL
from bs4 import BeautifulSoup
from crawler.db import REDIS
SCHED = BlockingScheduler()
def highlight(text):
"""
This function takes a text and returns it in bold
:type text: string
:param text: The string to be highlighted
"""
return '*{}*'.format(text)
def beautify(menu):
"""
This function takes a text and returns an array with beautified items
:type menu: string
:param menu: The menu text to be beautified (e.g. removing whitespaces)
"""
beautified = []
for item in menu:
beautified.append(item.get_text().strip())
beautified[2] = highlight(beautified[2])
return beautified
def format_menu(day, menu):
"""
This function returns the menu formatted for easy human readability
:type day: string
:param day: The week day as a string to be inserted (e.g. segunda-feira)
:type menu: array
:param menu: The menu array to be inserted
"""
formatted_menu = '{} - {}'.format(
highlight(day.get_text().capitalize()),
', '.join([item for item in menu])
)
return formatted_menu
@SCHED.scheduled_job('cron', day_of_week='mon-fri', hour=12)
def get_menu():
"""
This function is scheduled to run daily and save the menu information
collected from the website on redis
"""
page = requests.get(RU_URL)
if page.status_code != 200:
LOGGER.warning('RU page was unavailable for some unknown reason')
return None
soup = BeautifulSoup(page.content, 'html.parser')
items = soup.find_all('td', {'align': 'center'})
REDIS.flushdb()
for index in range(5):
menu = beautify(items[(index * 6):(index * 6 + 6)])
week_day = soup.find_all('tr')[index + 1].find('td')
REDIS.set(index, format_menu(week_day, menu))
| {
"content_hash": "1290a05b6509885c691aa8378b556ca0",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 76,
"avg_line_length": 26.68421052631579,
"alnum_prop": 0.6553254437869822,
"repo_name": "gabrielecker/ru-telegram-bot",
"id": "557e29e58424005c40e5539a65d59657fd4e329e",
"size": "2028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crawler/ru.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "267"
},
{
"name": "Python",
"bytes": "8658"
}
],
"symlink_target": ""
} |
import werkzeug
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import slug, unslug
from openerp.addons.website_partner.controllers.main import WebsitePartnerPage
from openerp.tools.translate import _
class WebsiteCrmPartnerAssign(WebsitePartnerPage):
_references_per_page = 40
@http.route([
'/partners',
'/partners/page/<int:page>',
'/partners/grade/<model("res.partner.grade"):grade>',
'/partners/grade/<model("res.partner.grade"):grade>/page/<int:page>',
'/partners/country/<model("res.country"):country>',
'/partners/country/<model("res.country"):country>/page/<int:page>',
'/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>',
'/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>/page/<int:page>',
], type='http', auth="public", website=True)
def partners(self, country=None, grade=None, page=0, **post):
country_all = post.pop('country_all', False)
partner_obj = request.registry['res.partner']
country_obj = request.registry['res.country']
search = post.get('search', '')
base_partner_domain = [('is_company', '=', True), ('grade_id', '!=', False), ('website_published', '=', True)]
if not request.registry['res.users'].has_group(request.cr, request.uid, 'base.group_website_publisher'):
base_partner_domain += [('grade_id.website_published', '=', True)]
if search:
base_partner_domain += ['|', ('name', 'ilike', search), ('website_description', 'ilike', search)]
# group by grade
grade_domain = list(base_partner_domain)
if not country and not country_all:
country_code = request.session['geoip'].get('country_code')
if country_code:
country_ids = country_obj.search(request.cr, request.uid, [('code', '=', country_code)], context=request.context)
if country_ids:
country = country_obj.browse(request.cr, request.uid, country_ids[0], context=request.context)
if country:
grade_domain += [('country_id', '=', country.id)]
grades = partner_obj.read_group(
request.cr, SUPERUSER_ID, grade_domain, ["id", "grade_id"],
groupby="grade_id", orderby="grade_id DESC", context=request.context)
grades_partners = partner_obj.search(
request.cr, SUPERUSER_ID, grade_domain,
context=request.context, count=True)
# flag active grade
for grade_dict in grades:
grade_dict['active'] = grade and grade_dict['grade_id'][0] == grade.id
grades.insert(0, {
'grade_id_count': grades_partners,
'grade_id': (0, _("All Categories")),
'active': bool(grade is None),
})
# group by country
country_domain = list(base_partner_domain)
if grade:
country_domain += [('grade_id', '=', grade.id)]
countries = partner_obj.read_group(
request.cr, SUPERUSER_ID, country_domain, ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
countries_partners = partner_obj.search(
request.cr, SUPERUSER_ID, country_domain,
context=request.context, count=True)
# flag active country
for country_dict in countries:
country_dict['active'] = country and country_dict['country_id'] and country_dict['country_id'][0] == country.id
countries.insert(0, {
'country_id_count': countries_partners,
'country_id': (0, _("All Countries")),
'active': bool(country is None),
})
# current search
if grade:
base_partner_domain += [('grade_id', '=', grade.id)]
if country:
base_partner_domain += [('country_id', '=', country.id)]
# format pager
if grade and not country:
url = '/partners/grade/' + slug(grade)
elif country and not grade:
url = '/partners/country/' + slug(country)
elif country and grade:
url = '/partners/grade/' + slug(grade) + '/country/' + slug(country)
else:
url = '/partners'
url_args = {}
if search:
url_args['search'] = search
if country_all:
url_args['country_all'] = True
partner_count = partner_obj.search_count(
request.cr, SUPERUSER_ID, base_partner_domain,
context=request.context)
pager = request.website.pager(
url=url, total=partner_count, page=page, step=self._references_per_page, scope=7,
url_args=url_args)
# search partners matching current search parameters
partner_ids = partner_obj.search(
request.cr, SUPERUSER_ID, base_partner_domain,
order="grade_id DESC, display_name ASC",
context=request.context) # todo in trunk: order="grade_id DESC, implemented_count DESC", offset=pager['offset'], limit=self._references_per_page
partners = partner_obj.browse(request.cr, SUPERUSER_ID, partner_ids, request.context)
# remove me in trunk
partners = sorted(partners, key=lambda x: (x.grade_id.sequence if x.grade_id else 0, len([i for i in x.implemented_partner_ids if i.website_published])), reverse=True)
partners = partners[pager['offset']:pager['offset'] + self._references_per_page]
google_map_partner_ids = ','.join(map(str, [p.id for p in partners]))
google_maps_api_key = request.env['ir.config_parameter'].sudo().get_param('google_maps_api_key')
values = {
'countries': countries,
'current_country': country,
'grades': grades,
'current_grade': grade,
'partners': partners,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'searches': post,
'search_path': "%s" % werkzeug.url_encode(post),
'google_maps_api_key': google_maps_api_key,
}
return request.render("website_crm_partner_assign.index", values, status=partners and 200 or 404)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/partners/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, **post):
_, partner_id = unslug(partner_id)
current_grade, current_country = None, None
grade_id = post.get('grade_id')
country_id = post.get('country_id')
if grade_id:
grade_ids = request.registry['res.partner.grade'].exists(request.cr, request.uid, int(grade_id), context=request.context)
if grade_ids:
current_grade = request.registry['res.partner.grade'].browse(request.cr, request.uid, grade_ids[0], context=request.context)
if country_id:
country_ids = request.registry['res.country'].exists(request.cr, request.uid, int(country_id), context=request.context)
if country_ids:
current_country = request.registry['res.country'].browse(request.cr, request.uid, country_ids[0], context=request.context)
if partner_id:
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
is_website_publisher = request.registry['res.users'].has_group(request.cr, request.uid, 'base.group_website_publisher')
if partner.exists() and (partner.website_published or is_website_publisher):
values = {
'main_object': partner,
'partner': partner,
'current_grade': current_grade,
'current_country': current_country
}
return request.website.render("website_crm_partner_assign.partner", values)
return self.partners(**post)
| {
"content_hash": "11b8777342758fc7db0ffbf43a541adf",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 175,
"avg_line_length": 49.51219512195122,
"alnum_prop": 0.6032019704433498,
"repo_name": "vileopratama/vitech",
"id": "222d4f5de9503d5ff82df1a71cfe645cbc0d7a65",
"size": "8144",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "src/addons/website_crm_partner_assign/controllers/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
} |
from fluent_contents.models import Placeholder
from fluent_contents.plugins.picture.models import PictureItem
from fluent_contents.plugins.text.models import TextItem
from fluent_contents.tests.testapp.models import RawHtmlTestItem, TestPage
from fluent_contents.tests.utils import AppTestCase
class SearchTest(AppTestCase):
"""
Tests for search
"""
def test_search_text(self):
"""
Test: Simple text indexing should work. HTML is stripped.
"""
page = TestPage.objects.create(pk=20, contents="Search!")
placeholder = Placeholder.objects.create_for_object(page, "slot2")
TextItem.objects.create_for_placeholder(placeholder, text="<b>Item1!</b>", sort_order=1)
self.assertEqual(placeholder.get_search_text().rstrip(), "Item1!")
def test_search_skip(self):
"""
Test: Search should skip elements without search_fields or search_output
"""
page = TestPage.objects.create(pk=20, contents="Search!")
placeholder = Placeholder.objects.create_for_object(page, "slot2")
RawHtmlTestItem.objects.create_for_placeholder(
placeholder, html="<b>HTML!!</b>", sort_order=2
)
self.assertEqual(placeholder.get_search_text(), "")
def test_search_fields(self):
"""
Test: Search should skip elements without search_fields or search_output
"""
page = TestPage.objects.create(pk=20, contents="Search!")
placeholder = Placeholder.objects.create_for_object(page, "slot2")
PictureItem.objects.create_for_placeholder(
placeholder, caption="<b>caption</b>", sort_order=1
)
self.assertEqual(placeholder.get_search_text(), "caption")
| {
"content_hash": "ad7f198f81bbb653f85c3eed8b022492",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 96,
"avg_line_length": 38.71111111111111,
"alnum_prop": 0.669345579793341,
"repo_name": "django-fluent/django-fluent-contents",
"id": "db2aaeb2166de136bcbff1cab9cee3dcb2ac2dba",
"size": "1742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fluent_contents/tests/test_search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14203"
},
{
"name": "HTML",
"bytes": "36403"
},
{
"name": "JavaScript",
"bytes": "84216"
},
{
"name": "Python",
"bytes": "405158"
}
],
"symlink_target": ""
} |
RESOURCE_MAPPING = {
# api
'api_test': {
'resource': 'api.test',
'docs': 'https://api.slack.com/methods/api.test'
},
# auth
'auth_test': {
'resource': 'auth.test',
'docs': 'https://api.slack.com/methods/auth.test'
},
# channels
'channels_archive': {
'resource': 'channels.archive',
'docs': 'https://api.slack.com/methods/channels.archive'
},
'channels_create': {
'resource': 'channels.create',
'docs': 'https://api.slack.com/methods/channels.create'
},
'channels_history': {
'resource': 'channels.history',
'docs': 'https://api.slack.com/methods/channels.history'
},
'channels_info': {
'resource': 'channels.info',
'docs': 'https://api.slack.com/methods/channels.info'
},
'channels_kick': {
'resource': 'channels.kick',
'docs': 'https://api.slack.com/methods/channels.kick'
},
'channels_leave': {
'resource': 'channels.leave',
'docs': 'https://api.slack.com/methods/channels.leave'
},
'channels_list': {
'resource': 'channels.list',
'docs': 'https://api.slack.com/methods/channels.list'
},
'channels_mark': {
'resource': 'channels.mark',
'docs': 'https://api.slack.com/methods/channels.mark'
},
'channels_rename': {
'resource': 'channels.rename',
'docs': 'https://api.slack.com/methods/channels.rename'
},
'channels_set_purpose': {
'resource': 'channels.setPurpose',
'docs': 'https://api.slack.com/methods/channels.setPurpose'
},
'channels_set_topic': {
'resource': 'channels.setTopic',
'docs': 'https://api.slack.com/methods/channels.setTopic'
},
'channels_unarchive': {
'resource': 'channels.unarchive',
'docs': 'https://api.slack.com/methods/channels.unarchive'
},
# chat
'chat_delete': {
'resource': 'chat.delete',
'docs': 'https://api.slack.com/methods/chat.delete'
},
'chat_post_message': {
'resource': 'chat.postMessage',
'docs': 'https://api.slack.com/methods/chat.postMessage'
},
'chat_update': {
'resource': 'chat.update',
'docs': 'https://api.slack.com/methods/chat.update'
},
# emoji
'emoji_list': {
'resource': 'emoji.list',
'docs': 'https://api.slack.com/methods/emoji.list'
},
# files
'files_delete': {
'resource': 'files.delete',
'docs': 'https://api.slack.com/methods/files.delete'
},
'files_info': {
'resource': 'files.info',
'docs': 'https://api.slack.com/methods/files.info'
},
'files_list': {
'resource': 'files.list',
'docs': 'https://api.slack.com/methods/files.list'
},
'files_upload': {
'resource': 'files.upload',
'docs': 'https://api.slack.com/methods/files.upload'
},
# groups
'groups_archive': {
'resource': 'groups.archive',
'docs': 'https://api.slack.com/methods/groups.archive'
},
'groups_close': {
'resource': 'groups.close',
'docs': 'https://api.slack.com/methods/groups.close'
},
'groups_create': {
'resource': 'groups.create',
'docs': 'https://api.slack.com/methods/groups.create'
},
'groups_create_child': {
'resource': 'groups.createChild',
'docs': 'https://api.slack.com/methods/groups.createChild'
},
'groups_history': {
'resource': 'groups.history',
'docs': 'https://api.slack.com/methods/groups.history'
},
'groups_info': {
'resource': 'groups.info',
'docs': 'https://api.slack.com/methods/groups.info'
},
'groups_invite': {
'resource': 'groups.invite',
'docs': 'https://api.slack.com/methods/groups.invite'
},
'groups_kick': {
'resource': 'groups.kick',
'docs': 'https://api.slack.com/methods/groups.kick'
},
'groups_leave': {
'resource': 'groups.leave',
'docs': 'https://api.slack.com/methods/groups.leave'
},
'groups_list': {
'resource': 'groups.list',
'docs': 'https://api.slack.com/methods/groups.list'
},
'groups_mark': {
'resource': 'groups.mark',
'docs': 'https://api.slack.com/methods/groups.mark'
},
'groups_open': {
'resource': 'groups.open',
'docs': 'https://api.slack.com/methods/groups.open'
},
'groups_rename': {
'resource': 'groups.rename',
'docs': 'https://api.slack.com/methods/groups.rename'
},
'groups_set_purpose': {
'resource': 'groups.setPurpose',
'docs': 'https://api.slack.com/methods/groups.setPurpose'
},
'groups_set_topic': {
'resource': 'groups.setTopic',
'docs': 'https://api.slack.com/methods/groups.setTopic'
},
'groups_unarchive': {
'resource': 'groups.unarchive',
'docs': 'https://api.slack.com/methods/groups.unarchive'
},
# im
'im_close': {
'resource': 'im.close',
'docs': 'https://api.slack.com/methods/im.close'
},
'im_history': {
'resource': 'im.history',
'docs': 'https://api.slack.com/methods/im.history'
},
'im_list': {
'resource': 'im.list',
'docs': 'https://api.slack.com/methods/im.list'
},
'im_mark': {
'resource': 'im.mark',
'docs': 'https://api.slack.com/methods/im.mark'
},
'im_open': {
'resource': 'im.open',
'docs': 'https://api.slack.com/methods/im.open'
},
# oauth
'oauth_access': {
'resource': 'oauth.access',
'docs': 'https://api.slack.com/methods/oauth.access'
},
# pins
'pins_add': {
'resource': 'pins.add',
'docs': 'https://api.slack.com/methods/pins.add'
},
'pins_list': {
'resource': 'pins.list',
'docs': 'https://api.slack.com/methods/pins.list'
},
'pins_remove': {
'resource': 'pins.remove',
'docs': 'https://api.slack.com/methods/pins.remove'
},
# reactions
'reactions_add': {
'resource': 'reactions.add',
'docs': 'https://api.slack.com/methods/reactions.add'
},
'reactions_get': {
'resource': 'reactions.get',
'docs': 'https://api.slack.com/methods/reactions.get'
},
'reactions_list': {
'resource': 'reactions.list',
'docs': 'https://api.slack.com/methods/reactions.list'
},
'reactions_remove': {
'resource': 'reactions.remove',
'docs': 'https://api.slack.com/methods/reactions.remove'
},
# rtm
'rtm_start': {
'resource': 'rtm.start',
'docs': 'https://api.slack.com/methods/rtm.start'
},
# search
'search_all': {
'resource': 'search.all',
'docs': 'https://api.slack.com/methods/search.all'
},
'search_files': {
'resource': 'search.files',
'docs': 'https://api.slack.com/methods/search.files'
},
'search_messages': {
'resource': 'search.messages',
'docs': 'https://api.slack.com/methods/search.messages'
},
# stars
'stars_add': {
'resource': 'stars.add',
'docs': 'https://api.slack.com/methods/stars.add'
},
'stars_list': {
'resource': 'stars.list',
'docs': 'https://api.slack.com/methods/stars.list'
},
'stars_remove': {
'resource': 'stars.remove',
'docs': 'https://api.slack.com/methods/stars.remove'
},
# team
'team_access_logs': {
'resource': 'team.accessLogs',
'docs': 'https://api.slack.com/methods/team.accessLogs'
},
'team_info': {
'resource': 'team.info',
'docs': 'https://api.slack.com/methods/team.info'
},
# users
'users_get_presence': {
'resource': 'users.getPresence',
'docs': 'https://api.slack.com/methods/users.getPresence'
},
'users_info': {
'resource': 'users.info',
'docs': 'https://api.slack.com/methods/users.info'
},
'users_list': {
'resource': 'users.list',
'docs': 'https://api.slack.com/methods/users.list'
},
'users_set_active': {
'resource': 'users.setActive',
'docs': 'https://api.slack.com/methods/users.setActive'
},
'users_set_presence': {
'resource': 'users.setPresence',
'docs': 'https://api.slack.com/methods/users.setPresence'
},
}
| {
"content_hash": "71138318516d058c724354cbe1cdce60",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 67,
"avg_line_length": 29.051194539249146,
"alnum_prop": 0.5400610902255639,
"repo_name": "humrochagf/tapioca-slack",
"id": "deb0de75efc3e8cce1f72ea09cc9d4eb49753c3f",
"size": "8529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tapioca_slack/resource_mapping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1523"
},
{
"name": "Python",
"bytes": "12637"
}
],
"symlink_target": ""
} |
"""Init module for MCT examples."""
| {
"content_hash": "216efdb0deb9abd3b3ceabc46e632a1c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 35,
"avg_line_length": 36,
"alnum_prop": 0.6666666666666666,
"repo_name": "tensorflow/model-card-toolkit",
"id": "0e805559f82348ec36d965d1e3ccb7f224f20825",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model_card_toolkit/documentation/examples/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "80677"
},
{
"name": "Jinja",
"bytes": "13576"
},
{
"name": "Python",
"bytes": "196136"
},
{
"name": "Shell",
"bytes": "1858"
},
{
"name": "Starlark",
"bytes": "3039"
}
],
"symlink_target": ""
} |
import json
import os
import time
from urllib.parse import parse_qs
from collections import OrderedDict
from datetime import datetime, timedelta
from unittest import mock
from django.conf import settings
from django.core import mail
from django.core.cache import cache
from django.core.files import temp
from django.core.files.base import File as DjangoFile
from django.db import connection, reset_queries
from django.test.client import RequestFactory
from django.test.utils import override_settings
from rest_framework.test import APIRequestFactory
from freezegun import freeze_time
from lxml.html import HTMLParser, fromstring
from pyquery import PyQuery as pq
from olympia import amo, core, ratings
from olympia.abuse.models import AbuseReport
from olympia.access import acl
from olympia.access.models import Group, GroupUser
from olympia.accounts.views import API_TOKEN_COOKIE
from olympia.accounts.serializers import BaseUserSerializer
from olympia.activity.models import ActivityLog, DraftComment
from olympia.addons.models import (
Addon, AddonApprovalsCounter, AddonReviewerFlags, AddonUser, DeniedGuid)
from olympia.amo.storage_utils import copy_stored_file
from olympia.amo.templatetags.jinja_helpers import (
absolutify, format_date, format_datetime)
from olympia.amo.tests import (
APITestClient, TestCase, addon_factory, check_links, file_factory, formset,
initial, reverse_ns, user_factory, version_factory)
from olympia.amo.urlresolvers import reverse
from olympia.blocklist.models import Block, BlocklistSubmission
from olympia.constants.promoted import LINE, RECOMMENDED, SPOTLIGHT, STRATEGIC
from olympia.constants.reviewers import (
REVIEWER_DELAYED_REJECTION_PERIOD_DAYS_DEFAULT)
from olympia.constants.scanners import MAD
from olympia.files.models import File, FileValidation, WebextPermission
from olympia.git.utils import AddonGitRepository, extract_version_to_git
from olympia.git.tests.test_utils import apply_changes
from olympia.ratings.models import Rating, RatingFlag
from olympia.reviewers.models import (
AutoApprovalSummary, CannedResponse, ReviewerScore, ReviewerSubscription,
Whiteboard)
from olympia.reviewers.templatetags.jinja_helpers import code_manager_url
from olympia.reviewers.utils import ContentReviewTable
from olympia.reviewers.views import _queue
from olympia.reviewers.serializers import CannedResponseSerializer
from olympia.scanners.models import ScannerResult
from olympia.users.models import UserProfile
from olympia.versions.models import (
ApplicationsVersions, AppVersion, VersionReviewerFlags)
from olympia.zadmin.models import get_config
EMPTY_PNG = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08'
b'\x06\x00\x00\x00\x1f\x15\xc4\x89\x00\x00\x00\nIDATx\x9cc\x00\x01\x00\x00'
b'\x05\x00\x01\r\n-\xb4\x00\x00\x00\x00IEND\xaeB`\x82')
class TestRedirectsOldPaths(TestCase):
def setUp(self):
user = user_factory()
self.client.login(email=user.email)
def test_redirect_old_queue(self):
response = self.client.get('/en-US/editors/queue/new')
self.assert3xx(response, '/reviewers/queue/new', status_code=301)
def test_redirect_old_review_page(self):
response = self.client.get('/en-US/editors/review/foobar')
self.assert3xx(response, '/reviewers/review/foobar', status_code=301)
class ReviewerTest(TestCase):
fixtures = ['base/users', 'base/approvals']
def login_as_admin(self):
assert self.client.login(email='admin@mozilla.com')
def login_as_reviewer(self):
assert self.client.login(email='reviewer@mozilla.com')
def make_review(self, username='a'):
u = UserProfile.objects.create(username=username)
a = Addon.objects.create(name='yermom', type=amo.ADDON_EXTENSION)
return Rating.objects.create(user=u, addon=a, body='baa')
class TestRatingsModerationLog(ReviewerTest):
def setUp(self):
super(TestRatingsModerationLog, self).setUp()
user = user_factory()
self.grant_permission(user, 'Ratings:Moderate')
self.client.login(email=user.email)
self.url = reverse('reviewers.ratings_moderation_log')
core.set_user(user)
def test_log(self):
response = self.client.get(self.url)
assert response.status_code == 200
def test_start_filter(self):
response = self.client.get(self.url, {'start': '2011-01-01'})
assert response.status_code == 200
def test_enddate_filter(self):
"""
Make sure that if our end date is 1/1/2011, that we include items from
1/1/2011. To not do as such would be dishonorable.
"""
review = self.make_review(username='b')
ActivityLog.create(
amo.LOG.APPROVE_RATING, review, review.addon).update(
created=datetime(2011, 1, 1))
response = self.client.get(self.url, {'end': '2011-01-01'})
assert response.status_code == 200
assert pq(response.content)('tbody td').eq(0).text() == (
'Jan. 1, 2011, midnight')
def test_action_filter(self):
"""
Based on setup we should see only two items if we filter for deleted
reviews.
"""
review = self.make_review()
for i in range(2):
ActivityLog.create(amo.LOG.APPROVE_RATING, review, review.addon)
ActivityLog.create(amo.LOG.DELETE_RATING, review.id, review.addon)
response = self.client.get(self.url, {'filter': 'deleted'})
assert response.status_code == 200
assert pq(response.content)('tbody tr').length == 2
def test_no_results(self):
response = self.client.get(self.url, {'end': '2004-01-01'})
assert response.status_code == 200
assert b'"no-results"' in response.content
def test_moderation_log_detail(self):
review = self.make_review()
ActivityLog.create(amo.LOG.APPROVE_RATING, review, review.addon)
id_ = ActivityLog.objects.moderation_events()[0].id
response = self.client.get(
reverse('reviewers.ratings_moderation_log.detail', args=[id_]))
assert response.status_code == 200
class TestReviewLog(ReviewerTest):
fixtures = ReviewerTest.fixtures + ['base/addon_3615']
def setUp(self):
super(TestReviewLog, self).setUp()
self.user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.login_as_reviewer()
self.url = reverse('reviewers.reviewlog')
def get_user(self):
return UserProfile.objects.all()[0]
def make_approvals(self):
for addon in Addon.objects.all():
ActivityLog.create(
amo.LOG.REJECT_VERSION, addon, addon.current_version,
user=self.get_user(), details={'comments': 'youwin'})
def make_an_approval(self, action, comment='youwin', username=None,
addon=None):
if username:
user = UserProfile.objects.get(username=username)
else:
user = self.get_user()
if not addon:
addon = Addon.objects.all()[0]
ActivityLog.create(action, addon, addon.current_version, user=user,
details={'comments': comment})
def test_basic(self):
self.make_approvals()
response = self.client.get(self.url)
assert response .status_code == 200
doc = pq(response .content)
assert doc('#log-filter button'), 'No filters.'
# Should have 2 showing.
rows = doc('tbody tr')
assert rows.filter(':not(.hide)').length == 2
assert rows.filter('.hide').eq(0).text() == 'youwin'
# Should have none showing if the addons are unlisted.
for addon in Addon.objects.all():
self.make_addon_unlisted(addon)
response = self.client.get(self.url)
assert response .status_code == 200
doc = pq(response.content)
assert not doc('tbody tr :not(.hide)')
# But they should have 2 showing for someone with the right perms.
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
with self.assertNumQueries(15):
# 15 queries:
# - 2 savepoints because of tests
# - 2 user and its groups
# - 2 for motd config and site notice
# - 2 for collections and addons belonging to the user (menu bar)
# - 1 count for the pagination
# - 1 for the activities
# - 1 for the users for these activities
# - 1 for the addons for these activities
# - 1 for the translations of these add-ons
# - 1 for the versions for these activities
# - 1 for the translations of these versions
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
rows = doc('tbody tr')
assert rows.filter(':not(.hide)').length == 2
assert rows.filter('.hide').eq(0).text() == 'youwin'
# Add more activity, it'd still should not cause more queries.
self.make_an_approval(amo.LOG.APPROVE_CONTENT, addon=addon_factory())
self.make_an_approval(amo.LOG.REJECT_CONTENT, addon=addon_factory())
with self.assertNumQueries(15):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
rows = doc('tbody tr')
assert rows.filter(':not(.hide)').length == 4
def test_xss(self):
a = Addon.objects.all()[0]
a.name = '<script>alert("xss")</script>'
a.save()
ActivityLog.create(amo.LOG.REJECT_VERSION, a, a.current_version,
user=self.get_user(), details={'comments': 'xss!'})
response = self.client.get(self.url)
assert response.status_code == 200
inner_html = pq(response.content)('#log-listing tbody td').eq(1).html()
assert '<script>' in inner_html
assert '<script>' not in inner_html
def test_end_filter(self):
"""
Let's use today as an end-day filter and make sure we see stuff if we
filter.
"""
self.make_approvals()
# Make sure we show the stuff we just made.
date = time.strftime('%Y-%m-%d')
response = self.client.get(self.url, {'end': date})
assert response.status_code == 200
doc = pq(response.content)('#log-listing tbody')
assert doc('tr:not(.hide)').length == 2
assert doc('tr.hide').eq(0).text() == 'youwin'
def test_end_filter_wrong(self):
"""
Let's use today as an end-day filter and make sure we see stuff if we
filter.
"""
self.make_approvals()
response = self.client.get(self.url, {'end': 'wrong!'})
# If this is broken, we'll get a traceback.
assert response.status_code == 200
assert pq(response.content)('#log-listing tr:not(.hide)').length == 3
def test_start_filter(self):
with freeze_time('2017-08-01 10:00'):
self.make_approvals()
# Make sure we show the stuff we just made.
response = self.client.get(self.url, {'start': '2017-07-31'})
assert response.status_code == 200
doc = pq(response.content)('#log-listing tbody')
assert doc('tr:not(.hide)').length == 2
assert doc('tr.hide').eq(0).text() == 'youwin'
def test_start_default_filter(self):
with freeze_time('2017-07-31 10:00'):
self.make_approvals()
with freeze_time('2017-08-01 10:00'):
addon = Addon.objects.first()
ActivityLog.create(
amo.LOG.REJECT_VERSION, addon, addon.current_version,
user=self.get_user(), details={'comments': 'youwin'})
# Make sure the default 'start' to the 1st of a month works properly
with freeze_time('2017-08-03 11:00'):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#log-listing tbody')
assert doc('tr:not(.hide)').length == 1
assert doc('tr.hide').eq(0).text() == 'youwin'
def test_search_comment_exists(self):
"""Search by comment."""
self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE,
comment='hello')
response = self.client.get(self.url, {'search': 'hello'})
assert response.status_code == 200
assert pq(response.content)(
'#log-listing tbody tr.hide').eq(0).text() == 'hello'
def test_search_comment_case_exists(self):
"""Search by comment, with case."""
self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE,
comment='hello')
response = self.client.get(self.url, {'search': 'HeLlO'})
assert response.status_code == 200
assert pq(response.content)(
'#log-listing tbody tr.hide').eq(0).text() == 'hello'
def test_search_comment_doesnt_exist(self):
"""Search by comment, with no results."""
self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE,
comment='hello')
response = self.client.get(self.url, {'search': 'bye'})
assert response.status_code == 200
assert pq(response.content)('.no-results').length == 1
def test_search_author_exists(self):
"""Search by author."""
self.make_approvals()
self.make_an_approval(
amo.LOG.REQUEST_ADMIN_REVIEW_CODE, username='reviewer',
comment='hi')
response = self.client.get(self.url, {'search': 'reviewer'})
assert response.status_code == 200
rows = pq(response.content)('#log-listing tbody tr')
assert rows.filter(':not(.hide)').length == 1
assert rows.filter('.hide').eq(0).text() == 'hi'
def test_search_author_case_exists(self):
"""Search by author, with case."""
self.make_approvals()
self.make_an_approval(
amo.LOG.REQUEST_ADMIN_REVIEW_CODE, username='reviewer',
comment='hi')
response = self.client.get(self.url, {'search': 'ReviEwEr'})
assert response.status_code == 200
rows = pq(response.content)('#log-listing tbody tr')
assert rows.filter(':not(.hide)').length == 1
assert rows.filter('.hide').eq(0).text() == 'hi'
def test_search_author_doesnt_exist(self):
"""Search by author, with no results."""
self.make_approvals()
self.make_an_approval(
amo.LOG.REQUEST_ADMIN_REVIEW_CODE, username='reviewer')
response = self.client.get(self.url, {'search': 'wrong'})
assert response.status_code == 200
assert pq(response.content)('.no-results').length == 1
def test_search_addon_exists(self):
"""Search by add-on name."""
self.make_approvals()
addon = Addon.objects.all()[0]
response = self.client.get(self.url, {'search': addon.name})
assert response.status_code == 200
tr = pq(response.content)(
'#log-listing tr[data-addonid="%s"]' % addon.id)
assert tr.length == 1
assert tr.siblings('.comments').text() == 'youwin'
def test_search_addon_case_exists(self):
"""Search by add-on name, with case."""
self.make_approvals()
addon = Addon.objects.all()[0]
response = self.client.get(
self.url, {'search': str(addon.name).swapcase()})
assert response.status_code == 200
tr = pq(response.content)(
'#log-listing tr[data-addonid="%s"]' % addon.id)
assert tr.length == 1
assert tr.siblings('.comments').text() == 'youwin'
def test_search_addon_doesnt_exist(self):
"""Search by add-on name, with no results."""
self.make_approvals()
response = self.client.get(self.url, {'search': 'xxx'})
assert response.status_code == 200
assert pq(response.content)('.no-results').length == 1
def test_addon_missing(self):
self.make_approvals()
activity = ActivityLog.objects.latest('pk')
activity.update(_arguments='')
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#log-listing tr td').eq(1).text() == (
'Add-on has been deleted.')
def test_super_review_logs(self):
self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE)
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#log-listing tr td a').eq(1).text() == (
'Admin add-on-review requested')
def test_comment_logs(self):
self.make_an_approval(amo.LOG.COMMENT_VERSION)
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#log-listing tr td a').eq(1).text() == (
'Commented')
def test_content_approval(self):
self.make_an_approval(amo.LOG.APPROVE_CONTENT)
response = self.client.get(self.url)
assert response.status_code == 200
link = pq(response.content)('#log-listing tbody td a').eq(1)[0]
assert link.attrib['href'] == '/en-US/reviewers/review-content/a3615'
assert link.text_content().strip() == 'Content approved'
def test_content_rejection(self):
self.make_an_approval(amo.LOG.REJECT_CONTENT)
response = self.client.get(self.url)
assert response.status_code == 200
link = pq(response.content)('#log-listing tbody td a').eq(1)[0]
assert link.attrib['href'] == '/en-US/reviewers/review-content/a3615'
assert link.text_content().strip() == 'Content rejected'
@freeze_time('2017-08-03')
def test_review_url(self):
self.login_as_admin()
addon = addon_factory()
unlisted_version = version_factory(
addon=addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
ActivityLog.create(
amo.LOG.APPROVE_VERSION, addon, addon.current_version,
user=self.get_user(), details={'comments': 'foo'})
response = self.client.get(self.url)
assert response.status_code == 200
url = reverse('reviewers.review', args=[addon.slug])
link = pq(response.content)(
'#log-listing tbody tr[data-addonid] a').eq(1)
assert link.attr('href') == url
entry = ActivityLog.create(
amo.LOG.APPROVE_VERSION, addon,
unlisted_version,
user=self.get_user(), details={'comments': 'foo'})
# Force the latest entry to be at the top of the list so that we can
# pick it more reliably later from the HTML
entry.update(created=datetime.now() + timedelta(days=1))
response = self.client.get(self.url)
url = reverse(
'reviewers.review',
args=['unlisted', addon.slug])
assert pq(response.content)(
'#log-listing tr td a').eq(1).attr('href') == url
def test_reviewers_can_only_see_addon_types_they_have_perms_for(self):
def check_two_showing():
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response .content)
assert doc('#log-filter button'), 'No filters.'
rows = doc('tbody tr')
assert rows.filter(':not(.hide)').length == 2
assert rows.filter('.hide').eq(0).text() == 'youwin'
def check_none_showing():
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('tbody tr :not(.hide)')
self.make_approvals()
for perm in ['Review', 'ContentReview']:
GroupUser.objects.filter(user=self.user).delete()
self.grant_permission(self.user, 'Addons:%s' % perm)
# Should have 2 showing.
check_two_showing()
# Should have none showing if the addons are static themes.
for addon in Addon.objects.all():
addon.update(type=amo.ADDON_STATICTHEME)
for perm in ['Review', 'ContentReview']:
GroupUser.objects.filter(user=self.user).delete()
self.grant_permission(self.user, 'Addons:%s' % perm)
check_none_showing()
# But they should have 2 showing for someone with the right perms.
GroupUser.objects.filter(user=self.user).delete()
self.grant_permission(self.user, 'Addons:ThemeReview')
check_two_showing()
# Check if we set them back to extensions theme reviewers can't see 'em
for addon in Addon.objects.all():
addon.update(type=amo.ADDON_EXTENSION)
check_none_showing()
class TestDashboard(TestCase):
def setUp(self):
self.url = reverse('reviewers.dashboard')
self.user = user_factory()
self.client.login(email=self.user.email)
def test_old_temporary_url_redirect(self):
response = self.client.get('/en-US/reviewers/dashboard')
self.assert3xx(
response, reverse('reviewers.dashboard'), status_code=301)
def test_not_a_reviewer(self):
response = self.client.get(self.url)
assert response.status_code == 403
def test_admin_all_permissions(self):
# Create a lot of add-ons to test the queue counts.
# Recommended extensions
addon_factory(
recommended=True,
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(
recommended=True,
version_kw={'recommendation_approved': False}),
recommendation_approved=True,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# Nominated and pending themes, not being counted
# as per https://github.com/mozilla/addons-server/issues/11796
addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(type=amo.ADDON_STATICTHEME),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# Nominated and pending extensions.
version_factory(
addon=addon_factory(),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
AddonReviewerFlags.objects.create(
needs_admin_code_review=True,
addon=addon_factory(
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW}))
under_admin_review_and_pending = addon_factory()
AddonReviewerFlags.objects.create(
addon=under_admin_review_and_pending,
needs_admin_theme_review=True)
version_factory(
addon=under_admin_review_and_pending,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# Auto-approved and Content Review.
addon1 = addon_factory(
file_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=addon1)
AutoApprovalSummary.objects.create(
version=addon1.current_version, verdict=amo.AUTO_APPROVED)
under_content_review = addon_factory(
file_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=under_content_review)
AutoApprovalSummary.objects.create(
version=under_content_review.current_version,
verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=under_content_review, needs_admin_content_review=True)
addon2 = addon_factory(
file_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=addon2)
AutoApprovalSummary.objects.create(
version=addon2.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=addon2, needs_admin_content_review=True)
under_code_review = addon_factory(
file_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=under_code_review)
AutoApprovalSummary.objects.create(
version=under_code_review.current_version,
verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=under_code_review, needs_admin_code_review=True)
admins_group = Group.objects.create(name='Admins', rules='*:*')
GroupUser.objects.create(user=self.user, group=admins_group)
# Pending addon
addon_factory(name='Pending Addön', status=amo.STATUS_NOMINATED)
# Public addon
addon = addon_factory(name='Public Addön', status=amo.STATUS_APPROVED)
# Deleted addon
addon_factory(name='Deleted Addön', status=amo.STATUS_DELETED)
# Mozilla-disabled addon
addon_factory(name='Disabled Addön', status=amo.STATUS_DISABLED)
# Incomplete addon
addon_factory(name='Incomplete Addön', status=amo.STATUS_NULL)
# Invisible (user-disabled) addon
addon_factory(name='Invisible Addön', status=amo.STATUS_APPROVED,
disabled_by_user=True)
pending_rejection = addon_factory(name='Pending Rejection Addôn')
VersionReviewerFlags.objects.create(
version=pending_rejection.current_version,
pending_rejection=datetime.now() + timedelta(days=4)
)
# Rating
rating = Rating.objects.create(
addon=addon, version=addon.current_version, user=self.user,
flag=True, body='This âdd-on sucks!!111', rating=1,
editorreview=True)
rating.ratingflag_set.create()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 9 # All sections are present.
expected_links = [
reverse('reviewers.queue_recommended'),
reverse('reviewers.queue_extension'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.queue_scanners'),
reverse('reviewers.queue_mad'),
reverse('reviewers.queue_auto_approved'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.queue_content_review'),
reverse('reviewers.performance'),
reverse('reviewers.queue_theme_nominated'),
reverse('reviewers.queue_theme_pending'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines',
reverse('reviewers.queue_moderated'),
reverse('reviewers.ratings_moderation_log'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation',
reverse('reviewers.unlisted_queue_all'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.motd'),
reverse('reviewers.queue_pending_rejection'),
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
# pre-approval addons
assert doc('.dashboard a')[0].text == 'Recommended (2)'
assert doc('.dashboard a')[1].text == 'Other Pending Review (3)'
# auto-approved addons
assert doc('.dashboard a')[7].text == 'Auto Approved Add-ons (4)'
# content review
assert doc('.dashboard a')[11].text == 'Content Review (11)'
# themes
assert doc('.dashboard a')[13].text == 'New (1)'
assert doc('.dashboard a')[14].text == 'Updates (1)'
# user ratings moderation
assert (doc('.dashboard a')[18].text ==
'Ratings Awaiting Moderation (1)')
# admin tools
assert (doc('.dashboard a')[24].text ==
'Add-ons Pending Rejection (1)')
def test_can_see_all_through_reviewer_view_all_permission(self):
self.grant_permission(self.user, 'ReviewerTools:View')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 9 # All sections are present.
expected_links = [
reverse('reviewers.queue_extension'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.queue_scanners'),
reverse('reviewers.queue_mad'),
reverse('reviewers.queue_auto_approved'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.queue_content_review'),
reverse('reviewers.performance'),
reverse('reviewers.queue_theme_nominated'),
reverse('reviewers.queue_theme_pending'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines',
reverse('reviewers.queue_moderated'),
reverse('reviewers.ratings_moderation_log'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation',
reverse('reviewers.unlisted_queue_all'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.motd'),
reverse('reviewers.queue_pending_rejection'),
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
def test_regular_reviewer(self):
# Create some add-ons to test the queue counts.
addon_factory(
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# These two are under admin review and will be ignored.
under_admin_review = addon_factory(
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
AddonReviewerFlags.objects.create(
addon=under_admin_review, needs_admin_code_review=True)
under_admin_review_and_pending = addon_factory()
AddonReviewerFlags.objects.create(
addon=under_admin_review_and_pending, needs_admin_code_review=True)
version_factory(
addon=under_admin_review_and_pending,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# This is a static theme so won't be shown
addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# Create an add-on to test the post-review queue count.
# It's under admin content review but that does not have an impact.
addon = addon_factory(
file_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=addon)
AutoApprovalSummary.objects.create(
version=addon.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=addon, needs_admin_content_review=True)
# This one however is under admin code review, it's ignored.
under_code_review = addon_factory(
file_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=under_code_review)
AutoApprovalSummary.objects.create(
version=under_code_review.current_version,
verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=under_code_review, needs_admin_code_review=True)
# Grant user the permission to see only the legacy/post add-ons section
self.grant_permission(self.user, 'Addons:Review')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 3
expected_links = [
reverse('reviewers.queue_extension'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.queue_scanners'),
reverse('reviewers.queue_mad'),
reverse('reviewers.queue_auto_approved'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'Other Pending Review (3)'
assert doc('.dashboard a')[6].text == 'Auto Approved Add-ons (1)'
def test_content_reviewer(self):
# Create an add-on to test the queue count. It's under admin code
# review but that does not have an impact.
addon = addon_factory(
file_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=addon)
AutoApprovalSummary.objects.create(
version=addon.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=addon, needs_admin_code_review=True)
# This one is under admin *content* review so it's ignored.
under_content_review = addon_factory(
file_kw={'is_webextension': True})
AddonApprovalsCounter.reset_for_addon(addon=under_content_review)
AutoApprovalSummary.objects.create(
version=under_content_review.current_version,
verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=under_content_review, needs_admin_content_review=True)
# Grant user the permission to see only the Content Review section.
self.grant_permission(self.user, 'Addons:ContentReview')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 1
expected_links = [
reverse('reviewers.queue_content_review'),
reverse('reviewers.performance'),
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'Content Review (1)'
def test_ratings_moderator(self):
# Create an rating to test the queue count.
addon = addon_factory()
user = user_factory()
rating = Rating.objects.create(
addon=addon, version=addon.current_version, user=user, flag=True,
body=u'This âdd-on sucks!!111', rating=1, editorreview=True)
rating.ratingflag_set.create()
# Grant user the permission to see only the ratings to review section.
self.grant_permission(self.user, 'Ratings:Moderate')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 1
expected_links = [
reverse('reviewers.queue_moderated'),
reverse('reviewers.ratings_moderation_log'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'Ratings Awaiting Moderation (1)'
def test_unlisted_reviewer(self):
# Grant user the permission to see only the unlisted add-ons section.
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 1
expected_links = [
reverse('reviewers.unlisted_queue_all'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
def test_static_theme_reviewer(self):
# Create some static themes to test the queue counts.
addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(type=amo.ADDON_STATICTHEME),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
version_factory(
addon=addon_factory(type=amo.ADDON_STATICTHEME,),
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# These two are under admin review and will be ignored.
under_admin_review = addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_STATICTHEME,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
AddonReviewerFlags.objects.create(
addon=under_admin_review, needs_admin_theme_review=True)
under_admin_review_and_pending = addon_factory(
type=amo.ADDON_STATICTHEME)
AddonReviewerFlags.objects.create(
addon=under_admin_review_and_pending,
needs_admin_theme_review=True)
version_factory(
addon=under_admin_review_and_pending,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# This is an extension so won't be shown
addon_factory(
status=amo.STATUS_NOMINATED,
type=amo.ADDON_EXTENSION,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
# Grant user the permission to see only the legacy add-ons section.
self.grant_permission(self.user, 'Addons:ThemeReview')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 1
expected_links = [
reverse('reviewers.queue_theme_nominated'),
reverse('reviewers.queue_theme_pending'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'New (1)'
assert doc('.dashboard a')[1].text == 'Updates (2)'
def test_legacy_reviewer_and_ratings_moderator(self):
# Grant user the permission to see both the legacy add-ons and the
# ratings moderation sections.
self.grant_permission(self.user, 'Addons:Review')
self.grant_permission(self.user, 'Ratings:Moderate')
# Test.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.dashboard h3')) == 4
expected_links = [
reverse('reviewers.queue_extension'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.queue_scanners'),
reverse('reviewers.queue_mad'),
reverse('reviewers.queue_auto_approved'),
reverse('reviewers.performance'),
reverse('reviewers.reviewlog'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide',
reverse('reviewers.queue_moderated'),
reverse('reviewers.ratings_moderation_log'),
'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation',
]
links = [link.attrib['href'] for link in doc('.dashboard a')]
assert links == expected_links
assert doc('.dashboard a')[0].text == 'Other Pending Review (0)'
assert 'target' not in doc('.dashboard a')[0].attrib
assert doc('.dashboard a')[10].text == (
'Ratings Awaiting Moderation (0)')
assert 'target' not in doc('.dashboard a')[5].attrib
assert doc('.dashboard a')[12].text == 'Moderation Guide'
assert doc('.dashboard a')[12].attrib['target'] == '_blank'
assert doc('.dashboard a')[12].attrib['rel'] == 'noopener noreferrer'
def test_view_mobile_site_link_hidden(self):
self.grant_permission(self.user, 'ReviewerTools:View')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('a.mobile-link')
class QueueTest(ReviewerTest):
fixtures = ['base/users']
listed = True
def setUp(self):
super(QueueTest, self).setUp()
self.user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.login_as_reviewer()
if self.listed is False:
# Testing unlisted views: needs Addons:ReviewUnlisted perm.
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
self.url = reverse('reviewers.queue_extension')
self.addons = OrderedDict()
self.expected_addons = []
self.channel_name = 'listed' if self.listed else 'unlisted'
def generate_files(self, subset=None, files=None):
if subset is None:
subset = []
channel = (amo.RELEASE_CHANNEL_LISTED if self.listed else
amo.RELEASE_CHANNEL_UNLISTED)
files = files or OrderedDict([
('Nominated One', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(5),
'nomination': self.days_ago(5),
'version': '0.1',
},
'status': amo.STATUS_NOMINATED,
}),
('Nominated Two', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(4),
'nomination': self.days_ago(4),
'version': '0.1',
},
'status': amo.STATUS_NOMINATED,
}),
('Pending One', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(3),
'nomination': self.days_ago(3),
'version': '0.1',
},
'status': amo.STATUS_APPROVED,
}),
('Pending Two', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(2),
'nomination': self.days_ago(2),
'version': '0.1',
},
'status': amo.STATUS_APPROVED,
}),
('Public', {
'file_kw': {
'status': amo.STATUS_APPROVED,
},
'version_kw': {
'created': self.days_ago(1),
'nomination': self.days_ago(1),
'version': '0.1',
},
'status': amo.STATUS_APPROVED,
}),
])
results = OrderedDict()
for name, attrs in files.items():
if not subset or name in subset:
version_kw = attrs.pop('version_kw', {})
version_kw['channel'] = channel
file_kw = attrs.pop('file_kw', {})
results[name] = addon_factory(
name=name, version_kw=version_kw, file_kw=file_kw, **attrs)
# status might be wrong because we want to force a particular
# status without necessarily having the requirements for it.
# So update it if we didn't end up with the one we want.
if ('status' in attrs and
results[name].status != attrs['status']):
results[name].update(status=attrs['status'])
self.addons.update(results)
return results
def generate_file(self, name):
return self.generate_files([name])[name]
def get_review_data(self):
# Format: (Created n days ago,
# percentages of [< 5, 5-10, >10])
return ((1, (0, 0, 100)),
(8, (0, 50, 50)),
(12, (50, 0, 50)))
def get_addon_latest_version(self, addon):
if self.listed:
channel = amo.RELEASE_CHANNEL_LISTED
else:
channel = amo.RELEASE_CHANNEL_UNLISTED
return addon.find_latest_version(channel=channel)
def get_expected_addons_by_names(self, names):
expected_addons = []
files = self.generate_files()
for name in sorted(names):
if name in files:
expected_addons.append(files[name])
# Make sure all elements have been added
assert len(expected_addons) == len(names)
return expected_addons
def _test_queue_layout(self, name, tab_position, total_addons,
total_queues, per_page=None):
args = {'per_page': per_page} if per_page else {}
response = self.client.get(self.url, args)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a')
link = links.eq(tab_position)
assert links.length == total_queues
assert link.text() == '%s' % name
assert link.attr('href') == self.url
if per_page:
assert doc('.data-grid-top .num-results').text() == (
u'Results %s\u20131 of %s' % (per_page, total_addons))
def _test_results(self):
response = self.client.get(self.url)
assert response.status_code == 200
expected = []
if not len(self.expected_addons):
raise AssertionError('self.expected_addons was an empty list')
# We typically don't include the channel name if it's the
# default one, 'listed'.
channel = [] if self.channel_name == 'listed' else [self.channel_name]
for idx, addon in enumerate(self.expected_addons):
if self.channel_name == 'unlisted':
# In unlisted queue we don't display latest version number.
name = str(addon.name)
else:
latest_version = self.get_addon_latest_version(addon)
assert latest_version
name = '%s %s' % (str(addon.name), latest_version.version)
url = reverse('reviewers.review', args=channel + [addon.slug])
expected.append((name, url))
doc = pq(response.content)
links = doc('#addon-queue tr.addon-row td a:not(.app-icon)')
assert len(links) == len(self.expected_addons)
check_links(expected, links, verify=False)
return doc
class TestQueueBasics(QueueTest):
def test_only_viewable_by_reviewer(self):
# Addon reviewer has access.
response = self.client.get(self.url)
assert response.status_code == 200
# Regular user doesn't have access.
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
# Theme reviewer doesn't have access either.
self.client.logout()
assert self.client.login(email='theme_reviewer@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
def test_invalid_page(self):
response = self.client.get(self.url, {'page': 999})
assert response.status_code == 200
assert response.context['page'].number == 1
def test_invalid_per_page(self):
response = self.client.get(self.url, {'per_page': '<garbage>'})
# No exceptions:
assert response.status_code == 200
@mock.patch.multiple('olympia.reviewers.views',
REVIEWS_PER_PAGE_MAX=1,
REVIEWS_PER_PAGE=1)
def test_max_per_page(self):
self.generate_files()
response = self.client.get(self.url, {'per_page': '2'})
assert response.status_code == 200
doc = pq(response.content)
assert doc('.data-grid-top .num-results').text() == (
u'Results 1\u20131 of 4')
@mock.patch('olympia.reviewers.views.REVIEWS_PER_PAGE', new=1)
def test_reviews_per_page(self):
self.generate_files()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.data-grid-top .num-results').text() == (
u'Results 1\u20131 of 4')
def test_grid_headers(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
'Add-on',
'Type',
'Waiting Time',
'Flags',
]
assert [pq(th).text() for th in doc('#addon-queue tr th')[1:]] == (
expected)
def test_grid_headers_sort_after_search(self):
params = {'searching': ['True'],
'text_query': ['abc'],
'addon_type_ids': ['2'],
'sort': ['addon_type_id']}
response = self.client.get(self.url, params)
assert response.status_code == 200
tr = pq(response.content)('#addon-queue tr')
sorts = {
# Column index => sort.
1: 'addon_name', # Add-on.
2: '-addon_type_id', # Type.
3: 'waiting_time_min', # Waiting Time.
}
for idx, sort in sorts.items():
# Get column link.
a = tr('th').eq(idx).find('a')
# Update expected GET parameters with sort type.
params.update(sort=[sort])
# Parse querystring of link to make sure `sort` type is correct.
assert parse_qs(a.attr('href').split('?')[1]) == params
def test_no_results(self):
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('.queue-outer .no-results').length == 1
def test_no_paginator_when_on_single_page(self):
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('.pagination').length == 0
def test_paginator_when_many_pages(self):
# 'Pending One' and 'Pending Two' should be the only add-ons in
# the pending queue, but we'll generate them all for good measure.
self.generate_files()
response = self.client.get(self.url, {'per_page': 1})
assert response.status_code == 200
doc = pq(response.content)
assert doc('.data-grid-top .num-results').text() == (
u'Results 1\u20131 of 4')
assert doc('.data-grid-bottom .num-results').text() == (
u'Results 1\u20131 of 4')
def test_legacy_queue_sort(self):
sorts = (
['age', 'Waiting Time'],
['name', 'Add-on'],
['type', 'Type'],
)
for key, text in sorts:
response = self.client.get(self.url, {'sort': key})
assert response.status_code == 200
assert pq(response.content)('th.ordered a').text() == text
def test_flags_is_restart_required(self):
addon = addon_factory(
status=amo.STATUS_NOMINATED, name='Some Add-on',
version_kw={'version': '0.1'},
file_kw={'status': amo.STATUS_AWAITING_REVIEW,
'is_restart_required': True})
r = self.client.get(reverse('reviewers.queue_extension'))
rows = pq(r.content)('#addon-queue tr.addon-row')
assert rows.length == 1
assert rows.attr('data-addon') == str(addon.id)
assert rows.find('td').eq(1).text() == 'Some Add-on 0.1'
assert rows.find('.ed-sprite-is_restart_required').length == 1
def test_flags_is_restart_required_false(self):
addon = addon_factory(
status=amo.STATUS_NOMINATED, name='Restartless',
version_kw={'version': '0.1'},
file_kw={'status': amo.STATUS_AWAITING_REVIEW,
'is_restart_required': False})
r = self.client.get(reverse('reviewers.queue_extension'))
rows = pq(r.content)('#addon-queue tr.addon-row')
assert rows.length == 1
assert rows.attr('data-addon') == str(addon.id)
assert rows.find('td').eq(1).text() == 'Restartless 0.1'
assert rows.find('.ed-sprite-is_restart_required').length == 0
def test_flags_promoted(self):
addon = addon_factory(name='Firefox Fún')
version_factory(
addon=addon,
version='1.1',
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.make_addon_promoted(addon, LINE)
r = self.client.get(reverse('reviewers.queue_extension'))
rows = pq(r.content)('#addon-queue tr.addon-row')
assert rows.length == 1
assert rows.attr('data-addon') == str(addon.id)
assert rows.find('td').eq(1).text() == 'Firefox Fún 1.1'
assert rows.find('.ed-sprite-promoted-line').length == 1
def test_tabnav_permissions(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a').map(lambda i, e: e.attrib['href'])
expected = [
reverse('reviewers.queue_extension'),
reverse('reviewers.queue_scanners'),
reverse('reviewers.queue_mad'),
reverse('reviewers.queue_auto_approved'),
]
assert links == expected
self.grant_permission(self.user, 'Ratings:Moderate')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a').map(lambda i, e: e.attrib['href'])
expected = [
reverse('reviewers.queue_extension'),
reverse('reviewers.queue_scanners'),
reverse('reviewers.queue_mad'),
reverse('reviewers.queue_moderated'),
reverse('reviewers.queue_auto_approved'),
]
assert links == expected
self.grant_permission(self.user, 'Addons:ContentReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a').map(lambda i, e: e.attrib['href'])
expected.append(reverse('reviewers.queue_content_review'))
assert links == expected
self.grant_permission(self.user, 'Addons:RecommendedReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a').map(lambda i, e: e.attrib['href'])
expected.insert(0, reverse('reviewers.queue_recommended'))
assert links == expected
self.grant_permission(self.user, 'Reviews:Admin')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
links = doc('.tabnav li a').map(lambda i, e: e.attrib['href'])
expected.extend([
reverse('reviewers.queue_pending_rejection'),
])
assert links == expected
@override_settings(DEBUG=True, LESS_PREPROCESS=False)
def test_queue_is_never_executing_the_full_query(self):
"""Test that _queue() is paginating without accidentally executing the
full query."""
self.grant_permission(self.user, 'Addons:ContentReview')
request = RequestFactory().get('/')
request.user = self.user
self.generate_files()
qs = Addon.objects.all().no_transforms()
# Execute the queryset we're passing to the _queue() so that we have
# the exact query to compare to later (we can't use str(qs.query) to do
# that, it has subtle differences in representation because of the way
# params are passed for the lang=lang hack).
reset_queries()
list(qs)
assert len(connection.queries) == 1
full_query = connection.queries[0]['sql']
qs = qs.all() # Trash queryset caching
reset_queries()
response = _queue(
request, ContentReviewTable, 'content_review', qs=qs,
SearchForm=None)
assert connection.queries
assert full_query not in [item['sql'] for item in connection.queries]
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('#addon-queue tr.addon-row')) == qs.count()
request = RequestFactory().get('/', {'per_page': 2})
request.user = self.user
qs = qs.all() # Trash queryset caching
reset_queries()
response = _queue(
request, ContentReviewTable, 'content_review', qs=qs,
SearchForm=None)
assert connection.queries
assert full_query not in [item['sql'] for item in connection.queries]
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('#addon-queue tr.addon-row')) == 2
request = RequestFactory().get('/', {'per_page': 2, 'page': 2})
request.user = self.user
qs = qs.all() # Trash queryset caching
reset_queries()
response = _queue(
request, ContentReviewTable, 'content_review', qs=qs,
SearchForm=None)
assert connection.queries
assert full_query not in [item['sql'] for item in connection.queries]
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('#addon-queue tr.addon-row')) == 2
class TestThemePendingQueue(QueueTest):
def setUp(self):
super(TestThemePendingQueue, self).setUp()
# These should be the only ones present.
self.expected_addons = self.get_expected_addons_by_names(
['Pending One', 'Pending Two'])
Addon.objects.all().update(type=amo.ADDON_STATICTHEME)
self.url = reverse('reviewers.queue_theme_pending')
GroupUser.objects.filter(user=self.user).delete()
self.grant_permission(self.user, 'Addons:ThemeReview')
def test_results(self):
self._test_results()
def test_queue_layout(self):
self._test_queue_layout('🎨 Updates',
tab_position=1, total_addons=2, total_queues=2)
def test_extensions_filtered_out(self):
self.addons['Pending Two'].update(type=amo.ADDON_EXTENSION)
# Extensions shouldn't be shown
self.expected_addons = [self.addons['Pending One']]
self._test_results()
# Even if you have that permission also
self.grant_permission(self.user, 'Addons:Review')
self.expected_addons = [self.addons['Pending One']]
self._test_results()
class TestExtensionQueue(QueueTest):
def setUp(self):
super().setUp()
# These should be the only ones present.
self.expected_addons = self.get_expected_addons_by_names(
['Pending One', 'Pending Two', 'Nominated One', 'Nominated Two'])
self.url = reverse('reviewers.queue_extension')
def test_results(self):
self._test_results()
def test_results_two_versions(self):
version1 = self.addons['Nominated One'].versions.all()[0]
version2 = self.addons['Nominated Two'].versions.all()[0]
file_ = version2.files.get()
# Create another version for Nominated Two, v0.2, by "cloning" v0.1.
# Its creation date must be more recent than v0.1 for version ordering
# to work. Its nomination date must be coherent with that, but also
# not cause the queue order to change with respect to the other
# add-ons.
version2.created = version2.created + timedelta(minutes=1)
version2.nomination = version2.nomination + timedelta(minutes=1)
version2.pk = None
version2.version = '0.2'
version2.save()
# Associate v0.2 it with a file.
file_.pk = None
file_.version = version2
file_.save()
# disable old files like Version.from_upload() would.
version2.disable_old_files()
response = self.client.get(self.url)
assert response.status_code == 200
expected = [
('Nominated One 0.1', reverse('reviewers.review',
args=[version1.addon.slug])),
('Nominated Two 0.2', reverse('reviewers.review',
args=[version2.addon.slug])),
]
doc = pq(response.content)
check_links(
expected,
doc('#addon-queue tr.addon-row td a:not(.app-icon)'),
verify=False)
def test_queue_layout(self):
self._test_queue_layout('🛠️ Other Pending Review',
tab_position=0, total_addons=4, total_queues=4)
def test_webextensions_filtered_out_because_of_post_review(self):
self.addons['Nominated Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
self.addons['Pending Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
# Webextensions are filtered out from the queue since auto_approve is
# taking care of them.
self.expected_addons = [
self.addons['Nominated One'], self.addons['Pending One']]
self._test_results()
def test_webextension_with_auto_approval_disabled_false_filtered_out(self):
self.addons['Pending Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Pending Two'], auto_approval_disabled=False)
self.addons['Nominated Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Nominated Two'],
auto_approval_disabled_until_next_approval=False)
self.expected_addons = [
self.addons['Nominated One'], self.addons['Pending One']]
self._test_results()
def test_webextension_with_auto_approval_disabled_does_show_up(self):
self.addons['Pending Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
self.addons['Nominated Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
self.addons['Pending One'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Pending One'], auto_approval_disabled=True)
self.addons['Nominated One'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Nominated One'],
auto_approval_disabled_until_next_approval=True)
self.expected_addons = [
self.addons['Nominated One'], self.addons['Pending One']]
self._test_results()
def test_webextension_with_auto_approval_delayed_until_past_filtered_out(
self):
self.addons['Pending Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Pending Two'],
auto_approval_delayed_until=datetime.now() - timedelta(hours=24))
self.addons['Nominated Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Nominated Two'],
auto_approval_delayed_until=datetime.now() - timedelta(hours=24))
self.expected_addons = [
self.addons['Nominated One'], self.addons['Pending One']]
self._test_results()
def test_webextension_with_auto_approval_delayed_until_does_show_up(self):
self.addons['Pending Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
self.addons['Nominated Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
self.addons['Pending One'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Pending One'],
auto_approval_delayed_until=datetime.now() + timedelta(hours=24))
self.addons['Nominated One'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Nominated One'],
auto_approval_delayed_until=datetime.now() + timedelta(hours=24))
self.expected_addons = [
self.addons['Nominated One'], self.addons['Pending One']]
self._test_results()
def test_promoted_addon_in_pre_review_group_does_show_up(self):
self.addons['Pending Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
self.addons['Nominated Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
self.addons['Pending One'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
self.addons['Nominated One'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED).files.update(
is_webextension=True)
self.make_addon_promoted(self.addons['Pending One'], group=LINE)
self.make_addon_promoted(self.addons['Nominated One'], group=SPOTLIGHT)
# STRATEGIC isn't a pre_review group so won't show up
self.make_addon_promoted(self.addons['Nominated Two'], group=STRATEGIC)
# RECOMMENDED is pre_review, but is handled in it's own queue
self.make_addon_promoted(self.addons['Pending Two'], group=RECOMMENDED)
self.expected_addons = [
self.addons['Nominated One'], self.addons['Pending One']]
self._test_results()
def test_static_theme_filtered_out(self):
self.addons['Pending Two'].update(type=amo.ADDON_STATICTHEME)
self.addons['Nominated Two'].update(type=amo.ADDON_STATICTHEME)
# Static Theme shouldn't be shown
self.expected_addons = [
self.addons['Nominated One'], self.addons['Pending One']]
self._test_results()
# Even if you have that permission also
self.grant_permission(self.user, 'Addons:ThemeReview')
self._test_results()
def test_search_plugins_filtered_out(self):
self.addons['Nominated Two'].update(type=amo.ADDON_SEARCH)
self.addons['Pending Two'].update(type=amo.ADDON_SEARCH)
# search extensions are filtered out from the queue since auto_approve
# is taking care of them.
self.expected_addons = [
self.addons['Nominated One'], self.addons['Pending One']]
self._test_results()
def test_pending_rejection_filtered_out(self):
VersionReviewerFlags.objects.create(
version=self.addons['Nominated Two'].current_version,
pending_rejection=datetime.now())
VersionReviewerFlags.objects.create(
version=self.addons['Pending Two'].current_version,
pending_rejection=datetime.now())
self.expected_addons = [
self.addons['Nominated One'], self.addons['Pending One']]
self._test_results()
class TestThemeNominatedQueue(QueueTest):
def setUp(self):
super(TestThemeNominatedQueue, self).setUp()
# These should be the only ones present.
self.expected_addons = self.get_expected_addons_by_names(
['Nominated One', 'Nominated Two'])
Addon.objects.all().update(type=amo.ADDON_STATICTHEME)
self.url = reverse('reviewers.queue_theme_nominated')
GroupUser.objects.filter(user=self.user).delete()
self.grant_permission(self.user, 'Addons:ThemeReview')
def test_results(self):
self._test_results()
def test_results_two_versions(self):
version1 = self.addons['Nominated One'].versions.all()[0]
version2 = self.addons['Nominated Two'].versions.all()[0]
file_ = version2.files.get()
# Create another version for Nominated Two, v0.2, by "cloning" v0.1.
# Its creation date must be more recent than v0.1 for version ordering
# to work. Its nomination date must be coherent with that, but also
# not cause the queue order to change with respect to the other
# add-ons.
version2.created = version2.created + timedelta(minutes=1)
version2.nomination = version2.nomination + timedelta(minutes=1)
version2.pk = None
version2.version = '0.2'
version2.save()
# Associate v0.2 it with a file.
file_.pk = None
file_.version = version2
file_.save()
# disable old files like Version.from_upload() would.
version2.disable_old_files()
response = self.client.get(self.url)
assert response.status_code == 200
expected = [
('Nominated One 0.1', reverse('reviewers.review',
args=[version1.addon.slug])),
('Nominated Two 0.2', reverse('reviewers.review',
args=[version2.addon.slug])),
]
doc = pq(response.content)
check_links(
expected,
doc('#addon-queue tr.addon-row td a:not(.app-icon)'),
verify=False)
def test_queue_layout(self):
self._test_queue_layout('🎨 New',
tab_position=0, total_addons=2, total_queues=2)
def test_static_theme_filtered_out(self):
self.addons['Nominated Two'].update(type=amo.ADDON_EXTENSION)
# Static Theme shouldn't be shown
self.expected_addons = [self.addons['Nominated One']]
self._test_results()
# Even if you have that permission also
self.grant_permission(self.user, 'Addons:Review')
self.expected_addons = [self.addons['Nominated One']]
self._test_results()
class TestRecommendedQueue(QueueTest):
def setUp(self):
super().setUp()
self.grant_permission(self.user, 'Addons:RecommendedReview')
# These should be the only ones present.
self.expected_addons = self.get_expected_addons_by_names(
['Pending One', 'Pending Two', 'Nominated One', 'Nominated Two'])
for addon in self.expected_addons:
self.make_addon_promoted(addon, RECOMMENDED)
self.url = reverse('reviewers.queue_recommended')
def test_results(self):
self._test_results()
def test_results_two_versions(self):
version1 = self.addons['Nominated One'].versions.all()[0]
version2 = self.addons['Nominated Two'].versions.all()[0]
file_ = version2.files.get()
# Create another version for Nominated Two, v0.2, by "cloning" v0.1.
# Its creation date must be more recent than v0.1 for version ordering
# to work. Its nomination date must be coherent with that, but also
# not cause the queue order to change with respect to the other
# add-ons.
version2.created = version2.created + timedelta(minutes=1)
version2.nomination = version2.nomination + timedelta(minutes=1)
version2.pk = None
version2.version = '0.2'
version2.save()
# Associate v0.2 it with a file.
file_.pk = None
file_.version = version2
file_.save()
# disable old files like Version.from_upload() would.
version2.disable_old_files()
response = self.client.get(self.url)
assert response.status_code == 200
expected = [
('Nominated One 0.1', reverse('reviewers.review',
args=[version1.addon.slug])),
('Nominated Two 0.2', reverse('reviewers.review',
args=[version2.addon.slug])),
]
doc = pq(response.content)
check_links(
expected,
doc('#addon-queue tr.addon-row td a:not(.app-icon)'),
verify=False)
def test_queue_layout(self):
self._test_queue_layout(
'Recommended', tab_position=0, total_addons=4, total_queues=5)
def test_nothing_recommended_filtered_out(self):
version = self.addons['Nominated One'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
version.files.update(is_webextension=True)
version = self.addons['Pending Two'].find_latest_version(
channel=amo.RELEASE_CHANNEL_LISTED)
version.files.update(is_webextension=True)
AddonReviewerFlags.objects.create(
addon=self.addons['Pending Two'], auto_approval_disabled=False)
self._test_results()
class TestModeratedQueue(QueueTest):
fixtures = ['base/users', 'ratings/dev-reply']
def setUp(self):
super(TestModeratedQueue, self).setUp()
self.url = reverse('reviewers.queue_moderated')
RatingFlag.objects.create(
rating_id=218468, user=self.user, flag=RatingFlag.SPAM)
Rating.objects.get(pk=218468).update(editorreview=True)
assert RatingFlag.objects.filter(flag=RatingFlag.SPAM).count() == 1
assert Rating.objects.filter(editorreview=True).count() == 1
self.user.groupuser_set.all().delete() # Remove all permissions
self.grant_permission(self.user, 'Ratings:Moderate')
def test_results(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#reviews-flagged')
rows = doc('.review-flagged:not(.review-saved)')
assert rows.length == 1
assert rows.find('h3').text() == ''
# Default is "Skip."
assert doc('#id_form-0-action_1:checked').length == 1
flagged = doc('.reviews-flagged-reasons span.light').text()
reviewer = RatingFlag.objects.all()[0].user.name
assert flagged.startswith('Flagged by %s' % reviewer), (
'Unexpected text: %s' % flagged)
addon = Addon.objects.get(id=1865)
addon.name = u'náme'
addon.save()
response = self.client.get(self.url)
doc = pq(response.content)('#reviews-flagged')
rows = doc('.review-flagged:not(.review-saved)')
assert rows.length == 1
assert rows.find('h3').text() == u'náme'
def setup_actions(self, action):
response = self.client.get(self.url)
assert response.status_code == 200
form_0_data = initial(response.context['reviews_formset'].forms[0])
assert Rating.objects.filter(addon=1865).count() == 2
formset_data = formset(form_0_data)
formset_data['form-0-action'] = action
response = self.client.post(self.url, formset_data)
self.assert3xx(response, self.url)
def test_skip(self):
self.setup_actions(ratings.REVIEW_MODERATE_SKIP)
# Make sure it's still there.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
rows = doc('#reviews-flagged .review-flagged:not(.review-saved)')
assert rows.length == 1
def test_skip_score(self):
self.setup_actions(ratings.REVIEW_MODERATE_SKIP)
assert ReviewerScore.objects.filter(
note_key=amo.REVIEWED_ADDON_REVIEW).count() == 0
def get_logs(self, action):
return ActivityLog.objects.filter(action=action.id)
def test_remove(self):
"""Make sure the reviewer tools can delete a review."""
self.setup_actions(ratings.REVIEW_MODERATE_DELETE)
logs = self.get_logs(amo.LOG.DELETE_RATING)
assert logs.count() == 1
# Make sure it's removed from the queue.
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#reviews-flagged .no-results').length == 1
response = self.client.get(reverse('reviewers.ratings_moderation_log'))
assert pq(response.content)('table .more-details').attr('href') == (
reverse('reviewers.ratings_moderation_log.detail',
args=[logs[0].id]))
# Make sure it was actually deleted.
assert Rating.objects.filter(addon=1865).count() == 1
# But make sure it wasn't *actually* deleted.
assert Rating.unfiltered.filter(addon=1865).count() == 2
def test_remove_fails_for_own_addon(self):
"""
Make sure the reviewer tools can't delete a review for an
add-on owned by the user.
"""
addon = Addon.objects.get(pk=1865)
user = UserProfile.objects.get(email='reviewer@mozilla.com')
AddonUser(addon=addon, user=user).save()
# Make sure the initial count is as expected
assert Rating.objects.filter(addon=1865).count() == 2
self.setup_actions(ratings.REVIEW_MODERATE_DELETE)
logs = self.get_logs(amo.LOG.DELETE_RATING)
assert logs.count() == 0
# Make sure it's not removed from the queue.
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#reviews-flagged .no-results').length == 0
# Make sure it was not actually deleted.
assert Rating.objects.filter(addon=1865).count() == 2
def test_remove_score(self):
self.setup_actions(ratings.REVIEW_MODERATE_DELETE)
assert ReviewerScore.objects.filter(
note_key=amo.REVIEWED_ADDON_REVIEW).count() == 1
def test_keep(self):
"""Make sure the reviewer tools can remove flags and keep a review."""
self.setup_actions(ratings.REVIEW_MODERATE_KEEP)
logs = self.get_logs(amo.LOG.APPROVE_RATING)
assert logs.count() == 1
# Make sure it's removed from the queue.
response = self.client.get(self.url)
assert response.status_code == 200
assert pq(response.content)('#reviews-flagged .no-results').length == 1
rating = Rating.objects.filter(addon=1865)
# Make sure it's NOT deleted...
assert rating.count() == 2
# ...but it's no longer flagged.
assert rating.filter(editorreview=1).count() == 0
def test_keep_score(self):
self.setup_actions(ratings.REVIEW_MODERATE_KEEP)
assert ReviewerScore.objects.filter(
note_key=amo.REVIEWED_ADDON_REVIEW).count() == 1
def test_queue_layout(self):
# From the fixtures we already have 2 reviews, one is flagged. We add
# a bunch of reviews from different scenarios and make sure they don't
# count towards the total.
# Add a review associated with an normal addon
rating = Rating.objects.create(
addon=addon_factory(), user=user_factory(),
body='show me', editorreview=True)
RatingFlag.objects.create(rating=rating)
# Add a review associated with an incomplete addon
rating = Rating.objects.create(
addon=addon_factory(status=amo.STATUS_NULL), user=user_factory(),
body='dont show me', editorreview=True)
RatingFlag.objects.create(rating=rating)
# Add a review associated to an unlisted version
addon = addon_factory()
version = version_factory(
addon=addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
rating = Rating.objects.create(
addon=addon_factory(), version=version, user=user_factory(),
body='dont show me either', editorreview=True)
RatingFlag.objects.create(rating=rating)
self._test_queue_layout('Rating Reviews',
tab_position=0, total_addons=2, total_queues=1)
def test_no_reviews(self):
Rating.objects.all().delete()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#reviews-flagged')
assert doc('.no-results').length == 1
assert doc('.review-saved button').length == 1 # Show only one button.
def test_do_not_show_reviews_for_non_public_addons(self):
Addon.objects.all().update(status=amo.STATUS_NULL)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#reviews-flagged')
# There should be no results since all add-ons are not public.
assert doc('.no-results').length == 1
def test_do_not_show_reviews_for_unlisted_addons(self):
for addon in Addon.objects.all():
self.make_addon_unlisted(addon)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#reviews-flagged')
# There should be no results since all add-ons are unlisted.
assert doc('.no-results').length == 1
class TestUnlistedAllList(QueueTest):
listed = False
def setUp(self):
super(TestUnlistedAllList, self).setUp()
self.url = reverse('reviewers.unlisted_queue_all')
# We should have all add-ons, sorted by id desc.
self.generate_files()
self.expected_addons = [
self.addons['Public'],
self.addons['Pending Two'],
self.addons['Pending One'],
self.addons['Nominated Two'],
self.addons['Nominated One'],
]
def test_results(self):
self._test_results()
def test_review_notes_json(self):
latest_version = self.expected_addons[0].find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
log = ActivityLog.create(amo.LOG.APPROVE_VERSION,
latest_version,
self.expected_addons[0],
user=UserProfile.objects.get(pk=999),
details={'comments': 'stish goin` down son'})
url = reverse('reviewers.queue_review_text') + str(log.id)
response = self.client.get(url)
assert response.status_code == 200
assert (json.loads(response.content) ==
{'reviewtext': 'stish goin` down son'})
class TestAutoApprovedQueue(QueueTest):
def setUp(self):
super(TestAutoApprovedQueue, self).setUp()
self.url = reverse('reviewers.queue_auto_approved')
def login_with_permission(self):
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.user.groupuser_set.all().delete() # Remove all permissions
self.grant_permission(user, 'Addons:Review')
self.client.login(email=user.email)
def get_addon_latest_version(self, addon):
"""Method used by _test_results() to fetch the version that the queue
is supposed to display. Overridden here because in our case, it's not
necessarily the latest available version - we display the current
public version instead (which is not guaranteed to be the latest
auto-approved one, but good enough) for this page."""
return addon.current_version
def generate_files(self):
"""Generate add-ons needed for these tests."""
# Has not been auto-approved.
extra_addon = addon_factory(name=u'Extra Addôn 1')
AutoApprovalSummary.objects.create(
version=extra_addon.current_version, verdict=amo.NOT_AUTO_APPROVED)
# Has not been auto-approved either, only dry run.
extra_addon2 = addon_factory(name=u'Extra Addôn 2')
AutoApprovalSummary.objects.create(
version=extra_addon2.current_version,
verdict=amo.WOULD_HAVE_BEEN_AUTO_APPROVED)
# Has been auto-approved, but that auto-approval has been confirmed by
# a human already.
extra_addon3 = addon_factory(name=u'Extra Addôn 3')
extra_summary3 = AutoApprovalSummary.objects.create(
version=extra_addon3.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=extra_addon3, counter=1,
last_human_review=extra_summary3.created)
# Has been auto-approved and reviewed by a human before.
addon1 = addon_factory(name=u'Addôn 1')
AutoApprovalSummary.objects.create(
version=addon1.current_version, verdict=amo.AUTO_APPROVED)
AddonApprovalsCounter.objects.create(
addon=addon1, counter=1, last_human_review=self.days_ago(42))
# Has been auto-approved twice, last_human_review is somehow None,
# the 'created' date will be used to order it (older is higher).
addon2 = addon_factory(name=u'Addôn 2')
addon2.update(created=self.days_ago(10))
AutoApprovalSummary.objects.create(
version=addon2.current_version, verdict=amo.AUTO_APPROVED)
AddonApprovalsCounter.objects.create(
addon=addon2, counter=1, last_human_review=None)
addon2_version2 = version_factory(addon=addon2)
AutoApprovalSummary.objects.create(
version=addon2_version2, verdict=amo.AUTO_APPROVED)
# Has been auto-approved and never been seen by a human,
# the 'created' date will be used to order it (newer is lower).
addon3 = addon_factory(name=u'Addôn 3')
addon3.update(created=self.days_ago(2))
AutoApprovalSummary.objects.create(
version=addon3.current_version, verdict=amo.AUTO_APPROVED)
AddonApprovalsCounter.objects.create(
addon=addon3, counter=1, last_human_review=None)
# Has been auto-approved, should be first because of its weight.
addon4 = addon_factory(name=u'Addôn 4')
addon4.update(created=self.days_ago(14))
AutoApprovalSummary.objects.create(
version=addon4.current_version, verdict=amo.AUTO_APPROVED,
weight=500)
AddonApprovalsCounter.objects.create(
addon=addon4, counter=0, last_human_review=self.days_ago(1))
self.expected_addons = [addon4, addon2, addon3, addon1]
def test_only_viewable_with_specific_permission(self):
# content reviewer does not have access.
self.user.groupuser_set.all().delete() # Remove all permissions
self.grant_permission(self.user, 'Addons:ContentReview')
response = self.client.get(self.url)
assert response.status_code == 403
# Regular user doesn't have access.
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
def test_results(self):
self.login_with_permission()
self.generate_files()
with self.assertNumQueries(16):
# - 2 for savepoints because we're in tests
# - 2 for user/groups
# - 1 for the current queue count for pagination purposes
# - 3 for the addons in the queues and their files (regardless of
# how many are in the queue - that's the important bit)
# - 2 for config items (motd / site notice)
# - 2 for my add-ons / my collection in user menu
# - 4 for reviewer scores and user stuff displayed above the queue
self._test_results()
def test_results_weights(self):
addon1 = addon_factory(name=u'Addôn 1')
AutoApprovalSummary.objects.create(
version=addon1.current_version, verdict=amo.AUTO_APPROVED,
weight=amo.POST_REVIEW_WEIGHT_HIGHEST_RISK + 1)
AddonApprovalsCounter.reset_for_addon(addon1)
addon2 = addon_factory(name=u'Addôn 2')
AutoApprovalSummary.objects.create(
version=addon2.current_version, verdict=amo.AUTO_APPROVED,
weight=amo.POST_REVIEW_WEIGHT_HIGH_RISK + 1)
AddonApprovalsCounter.reset_for_addon(addon2)
addon3 = addon_factory(name=u'Addôn 3')
AutoApprovalSummary.objects.create(
version=addon3.current_version, verdict=amo.AUTO_APPROVED,
weight=amo.POST_REVIEW_WEIGHT_MEDIUM_RISK + 1)
AddonApprovalsCounter.reset_for_addon(addon3)
addon4 = addon_factory(name=u'Addôn 4')
AutoApprovalSummary.objects.create(
version=addon4.current_version, verdict=amo.AUTO_APPROVED,
weight=1)
AddonApprovalsCounter.reset_for_addon(addon4)
self.expected_addons = [addon1, addon2, addon3, addon4]
self.login_with_permission()
doc = self._test_results()
expected = ['risk-highest', 'risk-high', 'risk-medium', 'risk-low']
classnames = [
item.attrib['class'] for item in doc('.addon-row td:eq(4) span')]
assert expected == classnames
def test_queue_layout(self):
self.login_with_permission()
self.generate_files()
self._test_queue_layout(
'Auto Approved', tab_position=3, total_addons=4, total_queues=4,
per_page=1)
def test_pending_rejection_filtered_out(self):
self.login_with_permission()
self.generate_files()
VersionReviewerFlags.objects.create(
version=self.expected_addons[0].current_version,
pending_rejection=datetime.now())
VersionReviewerFlags.objects.create(
version=self.expected_addons[1].current_version,
pending_rejection=datetime.now())
self.expected_addons = self.expected_addons[2:]
self._test_results()
def test_flags_promoted(self):
self.login_with_permission()
addon = addon_factory(name=u'Addôn', version_kw={'version': '77.6'})
AutoApprovalSummary.objects.create(
version=addon.current_version, verdict=amo.AUTO_APPROVED)
AddonApprovalsCounter.objects.create(
addon=addon, counter=1, last_human_review=self.days_ago(42))
self.make_addon_promoted(addon, STRATEGIC)
r = self.client.get(self.url)
rows = pq(r.content)('#addon-queue tr.addon-row')
assert rows.length == 1
assert rows.attr('data-addon') == str(addon.id)
assert rows.find('td').eq(1).text() == 'Addôn 77.6'
assert rows.find('.ed-sprite-promoted-strategic').length == 1
class TestContentReviewQueue(QueueTest):
def setUp(self):
super(TestContentReviewQueue, self).setUp()
self.url = reverse('reviewers.queue_content_review')
self.channel_name = 'content'
def login_with_permission(self):
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.user.groupuser_set.all().delete() # Remove all permissions
self.grant_permission(user, 'Addons:ContentReview')
self.client.login(email=user.email)
return user
def get_addon_latest_version(self, addon):
"""Method used by _test_results() to fetch the version that the queue
is supposed to display. Overridden here because in our case, it's not
necessarily the latest available version - we display the current
public version instead (which is not guaranteed to be the latest
auto-approved one, but good enough) for this page."""
return addon.current_version
def generate_files(self):
"""Generate add-ons needed for these tests."""
# The extra_ addons should not appear in the queue.
# This first add-on has been content reviewed long ago.
extra_addon1 = addon_factory(name=u'Extra Addön 1')
AutoApprovalSummary.objects.create(
version=extra_addon1.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=extra_addon1, last_content_review=self.days_ago(370))
# This one is quite similar, except its last content review is even
# older..
extra_addon2 = addon_factory(name=u'Extra Addön 2')
AutoApprovalSummary.objects.create(
version=extra_addon2.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=extra_addon2, last_content_review=self.days_ago(842))
# Has been auto-approved, but that content has been approved by
# a human already.
extra_addon3 = addon_factory(name=u'Extra Addôn 3')
AutoApprovalSummary.objects.create(
version=extra_addon3.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=extra_addon3, last_content_review=self.days_ago(1))
# This one has never been content-reviewed, but it has the
# needs_admin_content_review flag, and we're not an admin.
extra_addon4 = addon_factory(name=u'Extra Addön 4')
extra_addon4.update(created=self.days_ago(2))
AutoApprovalSummary.objects.create(
version=extra_addon4.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=extra_addon4, last_content_review=None)
AddonReviewerFlags.objects.create(
addon=extra_addon4, needs_admin_content_review=True)
# Those should appear in the queue
# Has not been auto-approved.
addon1 = addon_factory(name=u'Addôn 1', created=self.days_ago(4))
# Has not been auto-approved either, only dry run.
addon2 = addon_factory(name=u'Addôn 2', created=self.days_ago(3))
AutoApprovalSummary.objects.create(
version=addon2.current_version,
verdict=amo.WOULD_HAVE_BEEN_AUTO_APPROVED,
)
# This one has never been content-reviewed. It has an
# needs_admin_code_review flag, but that should not have any impact.
addon3 = addon_factory(name=u'Addön 3', created=self.days_ago(2))
AutoApprovalSummary.objects.create(
version=addon3.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
AddonApprovalsCounter.objects.create(
addon=addon3, last_content_review=None)
AddonReviewerFlags.objects.create(
addon=addon3, needs_admin_code_review=True)
# This one has never been content reviewed either, and it does not even
# have an AddonApprovalsCounter.
addon4 = addon_factory(name=u'Addön 4', created=self.days_ago(1))
AutoApprovalSummary.objects.create(
version=addon4.current_version,
verdict=amo.AUTO_APPROVED, confirmed=True)
assert not AddonApprovalsCounter.objects.filter(addon=addon4).exists()
# Those should *not* appear in the queue
# Has not been auto-approved but themes, langpacks and search plugins
# are excluded.
addon_factory(
name=u'Theme 1', created=self.days_ago(4),
type=amo.ADDON_STATICTHEME)
addon_factory(
name=u'Langpack 1', created=self.days_ago(4),
type=amo.ADDON_LPAPP)
addon_factory(
name=u'search plugin 1', created=self.days_ago(4),
type=amo.ADDON_SEARCH)
# Addons with no last_content_review date, ordered by
# their creation date, older first.
self.expected_addons = [addon1, addon2, addon3, addon4]
def test_only_viewable_with_specific_permission(self):
# Regular addon reviewer does not have access.
response = self.client.get(self.url)
assert response.status_code == 403
# Regular user doesn't have access.
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
def test_results(self):
self.login_with_permission()
self.generate_files()
with self.assertNumQueries(16):
# - 2 for savepoints because we're in tests
# - 2 for user/groups
# - 1 for the current queue count for pagination purposes
# - 3 for the addons in the queues and their files (regardless of
# how many are in the queue - that's the important bit)
# - 2 for config items (motd / site notice)
# - 2 for my add-ons / my collection in user menu
# - 4 for reviewer scores and user stuff displayed above the queue
self._test_results()
def test_queue_layout(self):
self.login_with_permission()
self.generate_files()
self._test_queue_layout(
'Content Review', tab_position=0, total_addons=4, total_queues=1,
per_page=1)
def test_queue_layout_admin(self):
# Admins should see the extra add-on that needs admin content review.
user = self.login_with_permission()
self.grant_permission(user, 'Reviews:Admin')
self.generate_files()
self._test_queue_layout(
'Content Review', tab_position=0, total_addons=5, total_queues=2)
def test_pending_rejection_filtered_out(self):
self.login_with_permission()
self.generate_files()
VersionReviewerFlags.objects.create(
version=self.expected_addons[0].current_version,
pending_rejection=datetime.now())
VersionReviewerFlags.objects.create(
version=self.expected_addons[1].current_version,
pending_rejection=datetime.now())
self.expected_addons = self.expected_addons[2:]
self._test_results()
class TestScannersReviewQueue(QueueTest):
fixtures = ['base/users']
def setUp(self):
super().setUp()
self.url = reverse('reviewers.queue_scanners')
def generate_files(self):
# Has no versions needing human review.
extra_addon = addon_factory()
version_factory(
addon=extra_addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
# Has 3 listed versions, 2 needing human review, 1 unlisted but not
# needing human review.
addon1 = addon_factory(created=self.days_ago(31))
addon1_v1 = addon1.current_version
addon1_v1.update(needs_human_review=True)
version_factory(addon=addon1, needs_human_review=True)
version_factory(addon=addon1)
version_factory(addon=addon1, channel=amo.RELEASE_CHANNEL_UNLISTED)
AddonApprovalsCounter.objects.create(
addon=addon1, counter=1, last_human_review=self.days_ago(1))
# Has 1 listed and 1 unlisted versions, both needing human review.
addon2 = addon_factory(
created=self.days_ago(15),
version_kw={'needs_human_review': True})
addon2.current_version
version_factory(
addon=addon2, channel=amo.RELEASE_CHANNEL_UNLISTED,
needs_human_review=True)
# Has 2 unlisted versions, 1 needing human review. Needs admin content
# review but that shouldn't matter.
addon3 = addon_factory(
created=self.days_ago(7),
version_kw={'channel': amo.RELEASE_CHANNEL_UNLISTED,
'needs_human_review': True})
addon3.versions.get()
version_factory(
addon=addon3, channel=amo.RELEASE_CHANNEL_UNLISTED)
AddonReviewerFlags.objects.create(
addon=addon3, needs_admin_content_review=True)
# Needs admin code review, so wouldn't show up for regular reviewers.
addon4 = addon_factory(
created=self.days_ago(1),
version_kw={'needs_human_review': True})
AddonReviewerFlags.objects.create(
addon=addon4, needs_admin_code_review=True)
self.expected_addons = [addon1, addon2, addon3]
def test_results(self):
self.generate_files()
response = self.client.get(self.url)
assert response.status_code == 200
expected = []
# addon1
addon = self.expected_addons[0]
expected.append((
'Listed versions needing human review (2)',
reverse('reviewers.review', args=[addon.slug])
))
# addon2
addon = self.expected_addons[1]
expected.append((
'Listed versions needing human review (1)',
reverse('reviewers.review', args=[addon.slug])
))
expected.append((
'Unlisted versions needing human review (1)',
reverse('reviewers.review', args=['unlisted', addon.slug])
))
# addon3
addon = self.expected_addons[2]
expected.append((
'Unlisted versions needing human review (1)',
reverse('reviewers.review', args=['unlisted', addon.slug])
))
doc = pq(response.content)
links = doc('#addon-queue tr.addon-row td a:not(.app-icon)')
# Number of expected links is not equal to len(self.expected_addons)
# because we display a review link for each channel that has versions
# needing review per add-on, and addon2 has both unlisted and listed
# versions needing review.
assert len(links) == 4
check_links(expected, links, verify=False)
def test_only_viewable_with_specific_permission(self):
# content reviewer does not have access.
self.user.groupuser_set.all().delete() # Remove all permissions
self.grant_permission(self.user, 'Addons:ContentReview')
response = self.client.get(self.url)
assert response.status_code == 403
# Regular user doesn't have access.
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
def test_queue_layout(self):
self.generate_files()
self._test_queue_layout(
'Flagged By Scanners',
tab_position=1, total_addons=3, total_queues=4, per_page=1)
def test_queue_layout_admin(self):
# Admins should see the extra add-on that needs admin content review.
self.login_as_admin()
self.generate_files()
self._test_queue_layout(
'Flagged By Scanners',
tab_position=2, total_addons=4, total_queues=10, per_page=1)
class TestPendingRejectionReviewQueue(QueueTest):
fixtures = ['base/users']
def setUp(self):
super().setUp()
self.url = reverse('reviewers.queue_pending_rejection')
def generate_files(self):
addon1 = addon_factory(created=self.days_ago(4))
VersionReviewerFlags.objects.create(
version=addon1.versions.get(),
pending_rejection=datetime.now() + timedelta(days=1))
addon2 = addon_factory(
created=self.days_ago(5),
status=amo.STATUS_NOMINATED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
VersionReviewerFlags.objects.create(
version=addon2.versions.get(),
pending_rejection=datetime.now() + timedelta(days=2))
# Extra add-ons without pending rejection on their current version,
# they shouldn't appear.
addon_factory()
addon = addon_factory(
name='Has a version pending rejection but it is not the current',
version_kw={'created': self.days_ago(1), 'version': '0.1'})
VersionReviewerFlags.objects.create(
version=addon.current_version, pending_rejection=datetime.now())
version_factory(addon=addon, version='0.2')
# Addon 2 has an older creation date, but what matters for the ordering
# is the pending rejection deadline.
self.expected_addons = [addon1, addon2]
def test_results(self):
self.login_as_admin()
self.generate_files()
self._test_results()
class TestPerformance(QueueTest):
fixtures = ['base/users', 'base/addon_3615']
"""Test the page at /reviewers/performance."""
def setUpReviewer(self):
self.login_as_reviewer()
core.set_user(UserProfile.objects.get(username='reviewer'))
self.create_logs()
def setUpAdmin(self):
self.login_as_admin()
core.set_user(UserProfile.objects.get(username='admin'))
self.create_logs()
def get_url(self, args=None):
if args is None:
args = []
return reverse('reviewers.performance', args=args)
def create_logs(self):
addon = Addon.objects.all()[0]
version = addon.versions.all()[0]
for i in amo.LOG_REVIEWER_REVIEW_ACTION:
ActivityLog.create(amo.LOG_BY_ID[i], addon, version)
# Throw in an automatic approval - should be ignored.
ActivityLog.create(
amo.LOG.APPROVE_VERSION, addon, version,
user=UserProfile.objects.get(id=settings.TASK_USER_ID))
def _test_chart(self):
r = self.client.get(self.get_url())
assert r.status_code == 200
doc = pq(r.content)
num = len(amo.LOG_REVIEWER_REVIEW_ACTION)
label = datetime.now().strftime('%Y-%m')
data = {label: {u'teamcount': num, u'teamavg': u'%s.0' % num,
u'usercount': num, u'teamamt': 1,
u'label': datetime.now().strftime('%b %Y')}}
assert json.loads(doc('#monthly').attr('data-chart')) == data
def test_performance_chart_reviewer(self):
self.setUpReviewer()
self._test_chart()
def test_performance_chart_as_admin(self):
self.setUpAdmin()
self._test_chart()
def test_usercount_with_more_than_one_reviewer(self):
self.client.login(email='clouserw@gmail.com')
core.set_user(UserProfile.objects.get(username='clouserw'))
self.create_logs()
self.setUpReviewer()
r = self.client.get(self.get_url())
assert r.status_code == 200
doc = pq(r.content)
data = json.loads(doc('#monthly').attr('data-chart'))
label = datetime.now().strftime('%Y-%m')
assert data[label]['usercount'] == len(amo.LOG_REVIEWER_REVIEW_ACTION)
def _test_performance_other_user_as_admin(self):
userid = core.get_user().pk
r = self.client.get(self.get_url([10482]))
doc = pq(r.content)
assert doc('#select_user').length == 1 # Let them choose reviewers.
options = doc('#select_user option')
assert options.length == 3
assert options.eq(2).val() == str(userid)
assert 'clouserw' in doc('#reviews_user').text()
def test_performance_other_user_as_admin(self):
self.setUpAdmin()
self._test_performance_other_user_as_admin()
def test_performance_other_user_not_admin(self):
self.setUpReviewer()
r = self.client.get(self.get_url([10482]))
doc = pq(r.content)
assert doc('#select_user').length == 0 # Don't let them choose.
assert doc('#reviews_user').text() == 'Your Reviews'
class SearchTest(ReviewerTest):
listed = True
def setUp(self):
super(SearchTest, self).setUp()
self.user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.login_as_reviewer()
if self.listed is False:
# Testing unlisted views: needs Addons:ReviewUnlisted perm.
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
def named_addons(self, request):
return [
r.record.addon_name for r in request.context['page'].object_list]
def search(self, *args, **kw):
response = self.client.get(self.url, kw)
assert response.status_code == 200
assert response.context['search_form'].errors.as_text() == ''
return response
class BaseTestQueueSearch(SearchTest):
fixtures = ['base/users', 'base/appversion']
__test__ = False # this is an abstract test case
def generate_files(self, subset=None):
if subset is None:
subset = []
files = OrderedDict([
('Not Needing Admin Review', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(10),
'nomination': self.days_ago(10),
'version': '0.1',
},
'status': amo.STATUS_NOMINATED,
}),
('Another Not Needing Admin Review', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(9),
'nomination': self.days_ago(9),
'version': '0.1',
},
'status': amo.STATUS_NOMINATED,
}),
('Needs Admin Review', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(8),
'nomination': self.days_ago(8),
'version': '0.1',
},
'status': amo.STATUS_NOMINATED,
'needs_admin_code_review': True,
}),
('Bieber Lang', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(7),
'nomination': self.days_ago(7),
'version': '0.1',
},
'status': amo.STATUS_NOMINATED,
'type': amo.ADDON_LPAPP,
}),
('Justin Bieber Search Bar', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(6),
'nomination': self.days_ago(6),
'version': '0.1',
},
'status': amo.STATUS_NOMINATED,
'type': amo.ADDON_SEARCH,
}),
('Bieber Dictionary', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(5),
'nomination': self.days_ago(5),
'version': '0.1',
},
'status': amo.STATUS_NOMINATED,
'type': amo.ADDON_DICT,
}),
('Bieber For Mobile', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'application': amo.ANDROID.id,
'created': self.days_ago(4),
'nomination': self.days_ago(4),
'version': '0.1',
},
'status': amo.STATUS_NOMINATED,
}),
('Linux Widget', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(3),
'nomination': self.days_ago(3),
'version': '0.1',
},
'status': amo.STATUS_NOMINATED,
}),
('Mac Widget', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(2),
'nomination': self.days_ago(2),
'version': '0.1',
},
'status': amo.STATUS_NOMINATED,
}),
('Deleted', {
'file_kw': {
'status': amo.STATUS_AWAITING_REVIEW,
},
'version_kw': {
'created': self.days_ago(1),
'nomination': self.days_ago(1),
'version': '0.1',
},
'status': amo.STATUS_DELETED,
}),
])
results = {}
channel = (amo.RELEASE_CHANNEL_LISTED if self.listed else
amo.RELEASE_CHANNEL_UNLISTED)
for name, attrs in files.items():
if not subset or name in subset:
needs_admin_code_review = attrs.pop(
'needs_admin_code_review', None)
version_kw = attrs.pop('version_kw', {})
version_kw['channel'] = channel
file_kw = attrs.pop('file_kw', {})
results[name] = addon_factory(
name=name, version_kw=version_kw, file_kw=file_kw, **attrs)
if needs_admin_code_review:
AddonReviewerFlags.objects.create(
addon=results[name], needs_admin_code_review=True)
return results
def generate_file(self, name):
return self.generate_files([name])[name]
def test_search_by_needs_admin_code_review_admin(self):
self.login_as_admin()
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review'])
response = self.search(needs_admin_code_review=1)
assert response.status_code == 200
assert self.named_addons(response) == ['Needs Admin Review']
def test_queue_counts_admin(self):
self.login_as_admin()
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review'])
response = self.search(text_query='admin', per_page=1)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.data-grid-top .num-results').text() == (
u'Results 1\u20131 of 2')
def test_search_by_addon_name_admin(self):
self.login_as_admin()
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review',
'Bieber Lang'])
response = self.search(text_query='admin')
assert response.status_code == 200
assert sorted(self.named_addons(response)) == [
'Needs Admin Review', 'Not Needing Admin Review']
def test_not_searching(self, **kwargs):
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review'])
response = self.search(**kwargs)
assert response.status_code == 200
assert sorted(self.named_addons(response)) == [
'Not Needing Admin Review']
# We were just displaying the queue, not searching, but the searching
# hidden input in the form should always be set to True regardless, it
# will be used once the user submits the form.
doc = pq(response.content)
assert doc('#id_searching').attr('value') == 'True'
def test_not_searching_with_param(self):
self.test_not_searching(some_param=1)
def test_search_by_nothing(self):
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review'])
response = self.search(searching='True')
assert response.status_code == 200
assert sorted(self.named_addons(response)) == (
['Needs Admin Review', 'Not Needing Admin Review'])
def test_search_by_needs_admin_code_review(self):
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review'])
response = self.search(needs_admin_code_review=1, searching='True')
assert response.status_code == 200
assert self.named_addons(response) == ['Needs Admin Review']
def test_queue_counts(self):
self.generate_files(['Not Needing Admin Review',
'Another Not Needing Admin Review',
'Needs Admin Review'])
response = self.search(
text_query='admin', per_page=1, searching='True')
assert response.status_code == 200
doc = pq(response.content)
assert doc('.data-grid-top .num-results').text() == (
u'Results 1\u20131 of 3')
def test_search_by_addon_name(self):
self.generate_files(['Not Needing Admin Review', 'Needs Admin Review',
'Bieber Lang'])
response = self.search(text_query='admin', searching='True')
assert response.status_code == 200
assert sorted(self.named_addons(response)) == (
['Needs Admin Review', 'Not Needing Admin Review'])
def test_search_by_addon_in_locale(self):
name = 'Not Needing Admin Review'
generated = self.generate_file(name)
uni = u'フォクすけといっしょ'
addon = Addon.objects.get(pk=generated.id)
addon.name = {'ja': uni}
addon.save()
self.url = self.url.replace('/en-US/', '/ja/')
response = self.client.get(self.url, {'text_query': uni}, follow=True)
assert response.status_code == 200
assert self.named_addons(response) == [name]
def test_search_by_addon_author(self):
name = 'Not Needing Admin Review'
generated = self.generate_file(name)
user = UserProfile.objects.all()[0]
email = user.email.swapcase()
author = AddonUser.objects.create(user=user, addon=generated)
for role in [amo.AUTHOR_ROLE_OWNER, amo.AUTHOR_ROLE_DEV]:
author.role = role
author.save()
response = self.search(text_query=email)
assert response.status_code == 200
assert self.named_addons(response) == [name]
def test_search_by_supported_email_in_locale(self):
name = 'Not Needing Admin Review'
generated = self.generate_file(name)
uni = u'フォクすけといっしょ@site.co.jp'
addon = Addon.objects.get(pk=generated.id)
addon.support_email = {'ja': uni}
addon.save()
self.url = self.url.replace('/en-US/', '/ja/')
response = self.client.get(self.url, {'text_query': uni}, follow=True)
assert response.status_code == 200
assert self.named_addons(response) == [name]
def test_clear_search_visible(self):
response = self.search(text_query='admin', searching=True)
assert response.status_code == 200
assert pq(response.content)(
'.clear-queue-search').text() == 'clear search'
def test_clear_search_hidden(self):
response = self.search(text_query='admin')
assert response.status_code == 200
assert not pq(response.content)('.clear-queue-search').text()
class TestQueueSearch(BaseTestQueueSearch):
__test__ = True
def setUp(self):
super(TestQueueSearch, self).setUp()
self.url = reverse('reviewers.queue_extension')
def test_search_by_addon_type(self):
self.generate_files(['Not Needing Admin Review', 'Bieber Lang',
'Justin Bieber Search Bar'])
response = self.search(addon_type_ids=[amo.ADDON_LPAPP])
assert response.status_code == 200
assert self.named_addons(response) == ['Bieber Lang']
def test_search_by_addon_type_any(self):
self.generate_file('Not Needing Admin Review')
response = self.search(addon_type_ids=[amo.ADDON_ANY])
assert response.status_code == 200
assert self.named_addons(response), 'Expected some add-ons'
def test_search_by_many_addon_types(self):
self.generate_files(['Not Needing Admin Review', 'Bieber Lang',
'Bieber Dictionary'])
response = self.search(
addon_type_ids=[amo.ADDON_LPAPP, amo.ADDON_DICT])
assert response.status_code == 200
assert sorted(self.named_addons(response)) == (
['Bieber Dictionary', 'Bieber Lang'])
def test_search_by_app(self):
self.generate_files(['Bieber For Mobile', 'Linux Widget'])
response = self.search(application_id=[amo.ANDROID.id])
assert response.status_code == 200
assert self.named_addons(response) == ['Bieber For Mobile']
def test_preserve_multi_apps(self):
self.generate_files(['Bieber For Mobile', 'Linux Widget'])
channel = (amo.RELEASE_CHANNEL_LISTED if self.listed else
amo.RELEASE_CHANNEL_UNLISTED)
multi = addon_factory(
status=amo.STATUS_NOMINATED, name='Multi Application',
version_kw={'channel': channel, 'application': amo.FIREFOX.id},
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
av_min, _ = AppVersion.objects.get_or_create(
application=amo.ANDROID.id, version='4.0.99')
av_max, _ = AppVersion.objects.get_or_create(
application=amo.ANDROID.id, version='5.0.0')
ApplicationsVersions.objects.get_or_create(
application=amo.ANDROID.id, version=multi.versions.latest(),
min=av_min, max=av_max)
response = self.search(application_id=[amo.ANDROID.id])
assert response.status_code == 200
assert list(sorted(self.named_addons(response))) == [
'Bieber For Mobile', 'Multi Application']
def test_clear_search_uses_correct_queue(self):
# The "clear search" link points to the right listed or unlisted queue.
# Listed queue.
url = reverse('reviewers.queue_extension')
response = self.client.get(
url, {'text_query': 'admin', 'searching': True})
assert response.status_code == 200
doc = pq(response.content)
assert doc('.clear-queue-search').attr('href') == url
class TestQueueSearchUnlistedAllList(BaseTestQueueSearch):
listed = False
__test__ = True
def setUp(self):
super(TestQueueSearchUnlistedAllList, self).setUp()
self.url = reverse('reviewers.unlisted_queue_all')
def test_search_deleted(self):
self.generate_files(['Not Needing Admin Review', 'Deleted'])
r = self.search(deleted=1)
assert self.named_addons(r) == ['Deleted']
def test_search_not_deleted(self):
self.generate_files(['Not Needing Admin Review', 'Deleted'])
response = self.search(deleted=0)
assert response.status_code == 200
assert self.named_addons(response) == ['Not Needing Admin Review']
def test_search_by_guid(self):
name = 'Not Needing Admin Review'
addon = self.generate_file(name)
addon.update(guid='@guidymcguid')
response = self.search(text_query='mcguid')
assert response.status_code == 200
assert self.named_addons(response) == ['Not Needing Admin Review']
class ReviewBase(QueueTest):
def setUp(self):
super(QueueTest, self).setUp()
self.login_as_reviewer()
self.addons = {}
self.addon = self.generate_file('Public')
self.version = self.addon.current_version
self.file = self.version.files.get()
self.reviewer = UserProfile.objects.get(username='reviewer')
self.reviewer.update(display_name=u'A Reviêwer')
self.url = reverse('reviewers.review', args=[self.addon.slug])
AddonUser.objects.create(addon=self.addon, user_id=999)
def get_addon(self):
return Addon.objects.get(pk=self.addon.pk)
def get_dict(self, **kw):
data = {'operating_systems': 'win', 'applications': 'something',
'comments': 'something'}
data.update(kw)
return data
class TestReview(ReviewBase):
def test_reviewer_required(self):
assert self.client.head(self.url).status_code == 200
def test_not_anonymous(self):
self.client.logout()
self.assertLoginRedirects(self.client.head(self.url), to=self.url)
@mock.patch.object(settings, 'ALLOW_SELF_REVIEWS', False)
def test_not_author(self):
AddonUser.objects.create(addon=self.addon, user=self.reviewer)
assert self.client.head(self.url).status_code == 302
def test_review_unlisted_while_a_listed_version_is_awaiting_review(self):
self.make_addon_unlisted(self.addon)
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED, slug='awaiting')
self.url = reverse(
'reviewers.review', args=('unlisted', self.addon.slug))
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
assert self.client.get(self.url).status_code == 200
def test_needs_unlisted_reviewer_for_only_unlisted(self):
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self.addon.update_version()
self.url = reverse(
'reviewers.review', args=('unlisted', self.addon.slug))
assert self.client.head(self.url).status_code == 404
# Adding a listed version makes it pass @reviewer_addon_view_factory
# decorator that only depends on the addon being purely unlisted or
# not, but still fail the check inside the view because we're looking
# at the unlisted channel specifically.
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
version='9.9')
assert self.client.head(self.url).status_code == 403
assert self.client.post(self.url).status_code == 403
# It works with the right permission.
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
assert self.client.head(self.url).status_code == 200
def test_dont_need_unlisted_reviewer_for_mixed_channels(self):
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED,
version='9.9')
assert self.addon.find_latest_version(
channel=amo.RELEASE_CHANNEL_UNLISTED)
assert self.addon.current_version.channel == amo.RELEASE_CHANNEL_LISTED
assert self.client.head(self.url).status_code == 200
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
assert self.client.head(self.url).status_code == 200
def test_need_correct_reviewer_for_promoted_addon(self):
self.make_addon_promoted(self.addon, RECOMMENDED)
self.file.update(status=amo.STATUS_AWAITING_REVIEW)
response = self.client.get(self.url)
assert response.status_code == 200
choices = list(
dict(response.context['form'].fields['action'].choices).keys()
)
expected_choices = ['reply', 'super', 'comment']
assert choices == expected_choices
doc = pq(response.content)
assert doc('.is_promoted')
assert doc('.is_promoted').text() == (
'This is a Recommended add-on. '
'You don\'t have permission to review it.'
)
self.grant_permission(self.reviewer, 'Addons:RecommendedReview')
response = self.client.get(self.url)
assert response.status_code == 200
choices = list(
dict(response.context['form'].fields['action'].choices).keys()
)
expected_choices = [
'public', 'reject', 'reject_multiple_versions', 'reply', 'super',
'comment'
]
assert choices == expected_choices
doc = pq(response.content)
assert doc('.is_promoted')
assert doc('.is_promoted').text() == (
'This is a Recommended add-on.'
)
# Change to a different class of promoted addon
self.make_addon_promoted(self.addon, SPOTLIGHT)
response = self.client.get(self.url)
assert response.status_code == 200
choices = list(
dict(response.context['form'].fields['action'].choices).keys()
)
expected_choices = ['super', 'comment']
assert choices == expected_choices
doc = pq(response.content)
assert doc('.is_promoted')
assert doc('.is_promoted').text() == (
'This is a Spotlight add-on. '
'You don\'t have permission to review it.'
)
self.grant_permission(self.reviewer, 'Reviews:Admin')
response = self.client.get(self.url)
assert response.status_code == 200
choices = list(
dict(response.context['form'].fields['action'].choices).keys()
)
expected_choices = [
'public', 'reject', 'reject_multiple_versions', 'reply', 'super',
'comment'
]
assert choices == expected_choices
doc = pq(response.content)
assert doc('.is_promoted')
assert doc('.is_promoted').text() == (
'This is a Spotlight add-on.'
)
def test_not_recommendable(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('h2.addon').text() == 'Review Public 0.1 (Listed)'
assert not doc('.is_promoted')
def test_not_flags(self):
self.addon.current_version.files.update(is_restart_required=False)
response = self.client.get(self.url)
assert response.status_code == 200
assert len(response.context['flags']) == 0
def test_flag_needs_admin_code_review(self):
self.addon.current_version.files.update(is_restart_required=False)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
response = self.client.get(self.url)
assert response.status_code == 200
assert len(response.context['flags']) == 1
def test_info_comments_requested(self):
response = self.client.post(self.url, {'action': 'reply'})
assert response.context['form'].errors['comments'][0] == (
'This field is required.')
def test_whiteboard_url(self):
# Listed review.
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert (
doc('#whiteboard_form').attr('action') ==
'/en-US/reviewers/whiteboard/listed/public')
assert doc('#id_whiteboard-public')
assert doc('#id_whiteboard-private')
# Content review.
self.grant_permission(self.reviewer, 'Addons:ContentReview')
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert (
doc('#whiteboard_form').attr('action') ==
'/en-US/reviewers/whiteboard/content/public')
# Unlisted review.
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
self.url = reverse(
'reviewers.review', args=['unlisted', self.addon.slug])
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert (
doc('#whiteboard_form').attr('action') ==
'/en-US/reviewers/whiteboard/unlisted/public')
# Listed review, but deleted.
self.addon.delete()
self.url = reverse(
'reviewers.review', args=['listed', self.addon.pk])
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert (
doc('#whiteboard_form').attr('action') ==
'/en-US/reviewers/whiteboard/listed/%d' % self.addon.pk)
def test_whiteboard_for_static_themes(self):
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert (
doc('#whiteboard_form').attr('action') ==
'/en-US/reviewers/whiteboard/listed/public')
assert doc('#id_whiteboard-public')
assert not doc('#id_whiteboard-private')
def test_comment(self):
response = self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
assert response.status_code == 302
assert len(mail.outbox) == 0
comment_version = amo.LOG.COMMENT_VERSION
assert ActivityLog.objects.filter(
action=comment_version.id).count() == 1
def test_reviewer_reply(self):
response = self.client.post(self.url, {'action': 'reply',
'comments': 'hello sailor'})
assert response.status_code == 302
assert len(mail.outbox) == 1
self.assertTemplateUsed(response, 'activity/emails/from_reviewer.txt')
def test_super_review_requested(self):
response = self.client.post(self.url, {'action': 'super',
'comments': 'hello sailor'})
assert response.status_code == 302
def test_reviewer_reply_canned_response(self):
response = self.client.post(self.url, {'action': 'reply',
'comments': 'hello sailor',
'canned_response': 'foo'})
assert response.status_code == 302
assert len(mail.outbox) == 1
self.assertTemplateUsed(response, 'activity/emails/from_reviewer.txt')
def test_page_title(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('title').text() == (
'%s – Add-ons for Firefox' % self.addon.name)
def test_files_shown(self):
response = self.client.get(self.url)
assert response.status_code == 200
items = pq(response.content)('#versions-history .files .file-info')
assert items.length == 1
file_ = self.version.all_files[0]
expected = [
('All Platforms', file_.get_absolute_url(attachment=True)),
('Validation', reverse(
'devhub.file_validation', args=[self.addon.slug, file_.id])),
('Contents', None),
]
check_links(expected, items.find('a'), verify=False)
def test_item_history(self, channel=amo.RELEASE_CHANNEL_LISTED):
self.addons['something'] = addon_factory(
status=amo.STATUS_APPROVED, name=u'something',
version_kw={'version': u'0.2',
'channel': channel},
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
assert self.addon.versions.filter(channel=channel).count() == 1
self.review_version(self.version, self.url)
v2 = self.addons['something'].versions.all()[0]
v2.addon = self.addon
v2.created = v2.created + timedelta(days=1)
v2.save()
assert self.addon.versions.filter(channel=channel).count() == 2
action = self.review_version(v2, self.url)
response = self.client.get(self.url)
assert response.status_code == 200
# The 2 following lines replace pq(res.content), it's a workaround for
# https://github.com/gawel/pyquery/issues/31
UTF8_PARSER = HTMLParser(encoding='utf-8')
doc = pq(fromstring(response.content, parser=UTF8_PARSER))
table = doc('#versions-history .review-files')
# Check the history for both versions.
ths = table.children('tr > th')
assert ths.length == 2
assert '0.1' in ths.eq(0).text()
assert '0.2' in ths.eq(1).text()
rows = table('td.files')
assert rows.length == 2
comments = rows.siblings('td')
assert comments.length == 2
for idx in range(comments.length):
td = comments.eq(idx)
assert td.find('.history-comment').text() == 'something'
assert td.find('th').text() == {
'public': 'Approved',
'reply': 'Reviewer Reply'}[action]
reviewer_name = td.find('td a').text()
assert ((reviewer_name == self.reviewer.name) or
(reviewer_name == self.other_reviewer.name))
def test_item_history_pagination(self):
addon = self.addons['Public']
addon.current_version.update(created=self.days_ago(366))
for i in range(0, 10):
# Add versions 1.0 to 1.9
version_factory(
addon=addon, version=f'1.{i}', created=self.days_ago(365 - i))
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
table = doc('#versions-history .review-files')
ths = table.children('tr > th')
assert ths.length == 10
# Original version should not be there any more, it's on the second
# page. Versions on the page should be displayed in chronological order
assert '1.0' in ths.eq(0).text()
assert '1.1' in ths.eq(1).text()
assert '1.9' in ths.eq(9).text()
response = self.client.get(self.url, {'page': 2})
assert response.status_code == 200
doc = pq(response.content)
table = doc('#versions-history .review-files')
ths = table.children('tr > th')
assert ths.length == 1
assert '0.1' in ths.eq(0).text()
def test_item_history_with_unlisted_versions_too(self):
# Throw in an unlisted version to be ignored.
version_factory(
version=u'0.2', addon=self.addon,
channel=amo.RELEASE_CHANNEL_UNLISTED,
file_kw={'status': amo.STATUS_APPROVED})
self.test_item_history()
def test_item_history_with_unlisted_review_page(self):
self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self.version.reload()
# Throw in an listed version to be ignored.
version_factory(
version=u'0.2', addon=self.addon,
channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_APPROVED})
self.url = reverse('reviewers.review', args=[
'unlisted', self.addon.slug])
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
self.test_item_history(channel=amo.RELEASE_CHANNEL_UNLISTED)
def test_item_history_compat_ordered(self):
""" Make sure that apps in compatibility are ordered. """
av = AppVersion.objects.all()[0]
v = self.addon.versions.all()[0]
ApplicationsVersions.objects.create(
version=v, application=amo.ANDROID.id, min=av, max=av)
assert self.addon.versions.count() == 1
url = reverse('reviewers.review', args=[self.addon.slug])
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
icons = doc('.listing-body .app-icon')
assert icons.eq(0).attr('title') == 'Firefox for Android'
assert icons.eq(1).attr('title') == 'Firefox'
def test_item_history_weight(self):
""" Make sure the weight is shown on the review page"""
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED,
weight=284, weight_info={'fôo': 200, 'bär': 84})
self.grant_permission(self.reviewer, 'Addons:Review')
url = reverse('reviewers.review', args=[self.addon.slug])
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
risk = doc('.listing-body .file-weight')
assert risk.text() == "Weight: 284"
assert risk.attr['title'] == 'bär: 84\nfôo: 200'
def test_scanners_score(self):
self.grant_permission(self.reviewer, 'Addons:Review')
url = reverse('reviewers.review', args=[self.addon.slug])
# Without a score.
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
score = doc('.listing-body .scanners-score')
assert score.text() == "Scanners score: n/a ?"
# With a score.
ScannerResult.objects.create(version=self.version, scanner=MAD,
score=0.1)
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
score = doc('.listing-body .scanners-score')
assert score.text() == "Scanners score: 10% ?"
def test_item_history_notes(self):
version = self.addon.versions.all()[0]
version.release_notes = 'hi'
version.approval_notes = 'secret hi'
version.save()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#versions-history .review-files')
version = doc('.activity_version')
assert version.length == 1
assert version.text() == 'hi'
approval = doc('.activity_approval')
assert approval.length == 1
assert approval.text() == 'secret hi'
def test_item_history_header(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert ('Approved' in doc(
'#versions-history .review-files .listing-header .light').text())
def test_item_history_comment(self):
# Add Comment.
self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#versions-history .review-files')
assert doc('th').eq(1).text() == 'Commented'
assert doc('.history-comment').text() == 'hello sailor'
def test_item_history_pending_rejection(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#versions-history .review-files')
assert doc('.pending-rejection') == []
VersionReviewerFlags.objects.create(
version=self.version,
pending_rejection=datetime.now() + timedelta(hours=1, minutes=1))
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#versions-history .review-files')
assert doc('.pending-rejection').text() == (
'· Scheduled for rejection in 1\xa0hour'
)
def test_item_history_pending_rejection_but_latest_is_unreviewed(self):
# Adding a non-pending rejection as the latest version shouldn't change
# anything if it's public.
VersionReviewerFlags.objects.create(
version=self.version,
pending_rejection=datetime.now() + timedelta(hours=1, minutes=1))
self.addon.current_version.update(created=self.days_ago(366))
latest_version = version_factory(addon=self.addon)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#versions-history .review-files')
assert doc('.pending-rejection').text() == (
'· Scheduled for rejection in 1\xa0hour'
)
# If the latest version is not pending rejection and unreviewed, we
# won't automatically reject versions pending rejection even if the
# deadline has passed - so the message changes.
latest_version.current_file.update(status=amo.STATUS_AWAITING_REVIEW)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)('#versions-history .review-files')
assert doc('.pending-rejection').text() == (
'· Pending Rejection on review of new version'
)
def test_item_history_pending_rejection_other_pages(self):
self.addon.current_version.update(created=self.days_ago(366))
for i in range(0, 10):
# Add versions 1.0 to 1.9. Schedule a couple for future rejection
# (the date doesn't matter).
version = version_factory(
addon=self.addon, version=f'1.{i}',
created=self.days_ago(365 - i))
if not bool(i % 5):
VersionReviewerFlags.objects.create(
version=version, pending_rejection=datetime.now())
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
ths = doc('#versions-history tr.listing-header th')
assert ths.length == 10
# Original version should not be there any more, it's on the second
# page. Versions on the page should be displayed in chronological order
# Versions 1.0, and 1.5 are pending rejection.
assert 'Scheduled for rejection in' in ths.eq(0).text()
assert 'Scheduled for rejection in' in ths.eq(5).text()
# Make sure the message doesn't appear on the rest of the versions.
for num in [1, 2, 3, 4, 6, 7, 8, 9]:
assert 'Scheduled for rejection in' not in ths.eq(num).text()
# There are no other versions pending rejection in other pages.
span = doc('#review-files-header .other-pending-rejection')
assert span.length == 0
# Load the second page. This time there should be a message indicating
# there are flagged versions in other pages.
response = self.client.get(self.url, {'page': 2})
assert response.status_code == 200
doc = pq(response.content)
span = doc('#review-files-header .other-pending-rejection')
assert span.length == 1
assert span.text() == '2 versions pending rejection on other pages.'
def test_files_in_item_history(self):
data = {'action': 'public', 'operating_systems': 'win',
'applications': 'something', 'comments': 'something'}
self.client.post(self.url, data)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
items = doc('#versions-history .review-files .files .file-info')
assert items.length == 1
assert items.find('a.reviewers-install').text() == 'All Platforms'
def test_no_items(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#versions-history .review-files .no-activity').length == 1
def test_action_links(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('View Product Page', self.addon.get_url_path()),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_action_links_as_admin(self):
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('View Product Page', self.addon.get_url_path()),
('Edit', self.addon.get_dev_url()),
('Admin Page',
reverse('admin:addons_addon_change', args=[self.addon.id])),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_unlisted_addon_action_links_as_admin(self):
"""No "View Product Page" link for unlisted addons, "edit"/"manage" links
for the admins."""
self.make_addon_unlisted(self.addon)
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('Unlisted Review Page', reverse(
'reviewers.review', args=('unlisted', self.addon.id))),
('Edit', self.addon.get_dev_url()),
('Admin Page', reverse(
'admin:addons_addon_change', args=[self.addon.id])),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_mixed_channels_action_links_as_admin(self):
self.make_addon_unlisted(self.addon)
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED)
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('View Product Page', self.addon.get_url_path()),
('Unlisted Review Page', reverse(
'reviewers.review', args=('unlisted', self.addon.id))),
('Edit', self.addon.get_dev_url()),
('Admin Page', reverse(
'admin:addons_addon_change', args=[self.addon.id])),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_mixed_channels_action_links_as_admin_on_unlisted_review(self):
self.make_addon_unlisted(self.addon)
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED)
self.login_as_admin()
self.url = reverse(
'reviewers.review', args=('unlisted', self.addon.slug))
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('View Product Page', self.addon.get_url_path()),
('Listed Review Page',
reverse('reviewers.review', args=(self.addon.id,))),
('Edit', self.addon.get_dev_url()),
('Admin Page',
reverse('admin:addons_addon_change', args=[self.addon.id])),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_mixed_channels_action_links_as_admin_deleted_addon(self):
self.make_addon_unlisted(self.addon)
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED)
self.addon.delete()
self.login_as_admin()
self.url = reverse(
'reviewers.review', args=('listed', self.addon.id))
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('Unlisted Review Page', reverse(
'reviewers.review', args=('unlisted', self.addon.id))),
('Admin Page', reverse(
'admin:addons_addon_change', args=[self.addon.id])),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_mixed_channels_action_links_as_admin_unlisted_deleted_addon(self):
self.make_addon_unlisted(self.addon)
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED)
self.addon.delete()
self.login_as_admin()
self.url = reverse(
'reviewers.review', args=('unlisted', self.addon.id))
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('Listed Review Page',
reverse('reviewers.review', args=(self.addon.id,))),
('Admin Page',
reverse('admin:addons_addon_change', args=[self.addon.id])),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_mixed_channels_action_links_as_regular_reviewer(self):
self.make_addon_unlisted(self.addon)
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
self.addon.update(status=amo.STATUS_NOMINATED)
self.login_as_reviewer()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected = [
('View Product Page', self.addon.get_url_path()),
]
check_links(expected, doc('#actions-addon a'), verify=False)
def test_admin_links_as_non_admin(self):
self.login_as_reviewer()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
admin = doc('#actions-addon li')
assert admin.length == 1
def test_extra_actions_subscribe_checked_state(self):
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
self.login_as_reviewer()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
subscribe_listed_input = doc('#notify_new_listed_versions')[0]
assert 'checked' not in subscribe_listed_input.attrib
subscribe_unlisted_input = doc('#notify_new_unlisted_versions')[0]
assert 'checked' not in subscribe_unlisted_input.attrib
ReviewerSubscription.objects.create(
addon=self.addon, user=self.reviewer,
channel=amo.RELEASE_CHANNEL_LISTED)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
subscribe_input = doc('#notify_new_listed_versions')[0]
assert subscribe_input.attrib['checked'] == 'checked'
ReviewerSubscription.objects.create(
addon=self.addon, user=self.reviewer,
channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
subscribe_input = doc('#notify_new_unlisted_versions')[0]
assert subscribe_input.attrib['checked'] == 'checked'
def test_extra_actions_token(self):
self.login_as_reviewer()
self.client.cookies[API_TOKEN_COOKIE] = 'youdidntsaythemagicword'
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
token = doc('#extra-review-actions').attr('data-api-token')
assert token == 'youdidntsaythemagicword'
def test_extra_actions_not_for_reviewers(self):
AddonReviewerFlags.objects.create(
addon=self.addon,
auto_approval_disabled=True,
needs_admin_code_review=True,
needs_admin_content_review=True,
needs_admin_theme_review=True,
auto_approval_delayed_until=datetime.now() + timedelta(hours=1))
VersionReviewerFlags.objects.create(
version=self.addon.current_version,
pending_rejection=datetime.now())
self.login_as_reviewer()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#force_disable_addon')
assert not doc('#force_enable_addon')
assert not doc('#block_addon')
assert not doc('#edit_addon_block')
assert not doc('#clear_admin_code_review')
assert not doc('#clear_admin_content_review')
assert not doc('#clear_admin_theme_review')
assert not doc('#disable_auto_approval')
assert not doc('#enable_auto_approval')
assert not doc('#clear_auto_approval_delayed_until')
assert not doc('#clear_pending_rejections')
assert not doc('#deny_resubmission')
assert not doc('#allow_resubmission')
def test_extra_actions_admin_disable_enable(self):
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#force_disable_addon')
elem = doc('#force_disable_addon')[0]
assert 'hidden' not in elem.getparent().attrib.get('class', '')
assert doc('#force_enable_addon')
elem = doc('#force_enable_addon')[0]
assert 'hidden' in elem.getparent().attrib.get('class', '')
# Not present because it hasn't been set yet
assert not doc('#clear_auto_approval_delayed_until')
flags = AddonReviewerFlags.objects.create(
addon=self.addon, auto_approval_delayed_until=self.days_ago(1))
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# Still not present because it's in the past.
assert not doc('#clear_auto_approval_delayed_until')
flags.update(
auto_approval_delayed_until=datetime.now() + timedelta(hours=24))
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#clear_auto_approval_delayed_until')
def test_no_resubmission_buttons_when_addon_is_not_deleted(self):
self.login_as_admin()
response = self.client.get(self.url)
doc = pq(response.content)
assert not doc('#deny_resubmission')
assert not doc('#allow_resubmission')
def test_resubmission_buttons_are_displayed_for_deleted_addons(self):
self.login_as_admin()
self.addon.update(status=amo.STATUS_DELETED)
assert not self.addon.is_guid_denied
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# The "deny" button is visible when the GUID is not denied.
assert doc('#deny_resubmission')
elem = doc('#deny_resubmission')[0]
assert 'hidden' not in elem.getparent().attrib.get('class', '')
# The "allow" button is hidden when the GUID is not denied.
assert doc('#allow_resubmission')
elem = doc('#allow_resubmission')[0]
assert 'hidden' in elem.getparent().attrib.get('class', '')
def test_resubmission_buttons_are_displayed_for_deleted_addons_and_denied_guid(self): # noqa
self.login_as_admin()
self.addon.update(status=amo.STATUS_DELETED)
self.addon.deny_resubmission()
assert self.addon.is_guid_denied
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# The "deny" button is hidden when the GUID is denied.
assert doc('#deny_resubmission')
elem = doc('#deny_resubmission')[0]
assert 'hidden' in elem.getparent().attrib.get('class', '')
# The "allow" button is visible when the GUID is denied.
assert doc('#allow_resubmission')
elem = doc('#allow_resubmission')[0]
assert 'hidden' not in elem.getparent().attrib.get('class', '')
def test_admin_block_actions(self):
self.login_as_admin()
assert not self.addon.block
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#block_addon')
assert not doc('#edit_addon_block')
assert not doc('#edit_addon_blocklistsubmission')
assert doc('#block_addon')[0].attrib.get('href') == (
reverse('admin:blocklist_block_addaddon', args=(self.addon.id,)))
Block.objects.create(
addon=self.addon, updated_by=user_factory())
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#block_addon')
assert doc('#edit_addon_block')
assert not doc('#edit_addon_blocklistsubmission')
assert doc('#edit_addon_block')[0].attrib.get('href') == (
reverse('admin:blocklist_block_addaddon', args=(self.addon.id,)))
# If the guid is in a pending submission we show a link to that instead
subm = BlocklistSubmission.objects.create(input_guids=self.addon.guid)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#block_addon')
assert not doc('#edit_addon_block')
blocklistsubmission_block = doc('#edit_addon_blocklistsubmission')
assert blocklistsubmission_block
assert blocklistsubmission_block[0].attrib.get('href') == (
reverse(
'admin:blocklist_blocklistsubmission_change', args=(subm.id,)))
def test_unflag_option_forflagged_as_admin(self):
self.login_as_admin()
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#clear_admin_code_review').length == 1
assert doc('#clear_admin_content_review').length == 0
assert doc('#clear_admin_content_review').length == 0
def test_unflag_content_option_forflagged_as_admin(self):
self.login_as_admin()
AddonReviewerFlags.objects.create(
addon=self.addon,
needs_admin_code_review=False,
needs_admin_content_review=True)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#clear_admin_code_review').length == 0
assert doc('#clear_admin_content_review').length == 1
assert doc('#clear_admin_theme_review').length == 0
def test_unflag_theme_option_forflagged_as_admin(self):
self.login_as_admin()
AddonReviewerFlags.objects.create(
addon=self.addon,
needs_admin_code_review=False,
needs_admin_content_review=False,
needs_admin_theme_review=True)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#clear_admin_code_review').length == 0
assert doc('#clear_admin_content_review').length == 0
assert doc('#clear_admin_theme_review').length == 1
def test_disable_auto_approvals_as_admin(self):
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#disable_auto_approval')
elem = doc('#disable_auto_approval')[0]
assert 'hidden' not in elem.getparent().attrib.get('class', '')
assert doc('#enable_auto_approval')
elem = doc('#enable_auto_approval')[0]
assert 'hidden' in elem.getparent().attrib.get('class', '')
# Still present for dictionaries
self.addon.update(type=amo.ADDON_DICT)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#disable_auto_approval')
assert doc('#enable_auto_approval')
# And search plugins
self.addon.update(type=amo.ADDON_SEARCH)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#disable_auto_approval')
assert doc('#enable_auto_approval')
# Both of them should be absent on static themes, which are not
# auto-approved.
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#disable_auto_approval')
assert not doc('#enable_auto_approval')
def test_enable_auto_approvals_as_admin_auto_approvals_disabled(self):
self.login_as_admin()
AddonReviewerFlags.objects.create(
addon=self.addon, auto_approval_disabled=True)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#disable_auto_approval')
elem = doc('#disable_auto_approval')[0]
assert 'hidden' in elem.getparent().attrib.get('class', '')
assert doc('#enable_auto_approval')
elem = doc('#enable_auto_approval')[0]
assert 'hidden' not in elem.getparent().attrib.get('class', '')
# Both of them should be absent on static themes, which are not
# auto-approved.
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#disable_auto_approval')
assert not doc('#enable_auto_approval')
def test_clear_pending_rejections_as_admin(self):
self.login_as_admin()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('#clear_pending_rejections')
VersionReviewerFlags.objects.create(
version=self.addon.current_version,
pending_rejection=datetime.now())
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#clear_pending_rejections')
def test_no_public(self):
has_public = self.version.files.filter(
status=amo.STATUS_APPROVED).exists()
assert has_public
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
validation = doc.find('.files')
assert validation.find('a').eq(1).text() == "Validation"
assert validation.find('a').eq(2).text() == "Contents"
assert validation.find('a').length == 3
def test_public_search(self):
self.version.files.update(status=amo.STATUS_APPROVED)
self.addon.update(type=amo.ADDON_SEARCH)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#versions-history .files ul .file-info').length == 1
def test_version_deletion(self):
"""
Make sure that we still show review history for deleted versions.
"""
# Add a new version to the add-on.
addon = addon_factory(
status=amo.STATUS_NOMINATED, name='something',
version_kw={'version': '0.2'},
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
assert self.addon.versions.count() == 1
self.review_version(self.version, self.url)
v2 = addon.versions.all()[0]
v2.addon = self.addon
v2.created = v2.created + timedelta(days=1)
v2.save()
self.review_version(v2, self.url)
assert self.addon.versions.count() == 2
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# View the history verify two versions:
ths = doc('#versions-history .review-files > tr > th:first-child')
assert '0.1' in ths.eq(0).text()
assert '0.2' in ths.eq(1).text()
# Delete a version:
v2.delete()
# Verify two versions, one deleted:
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
ths = doc('#versions-history .review-files > tr > th:first-child')
assert ths.length == 2
assert '0.1' in ths.text()
def test_no_versions(self):
"""The review page should still load if there are no versions."""
assert self.client.get(self.url).status_code == 200
response = self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
assert response.status_code == 302
self.assert3xx(response, reverse('reviewers.queue_extension'),
status_code=302)
self.version.delete()
# Regular reviewer can still see it since the deleted version was
# listed.
assert self.client.get(self.url).status_code == 200
response = self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
assert response.status_code == 302
self.assert3xx(response, reverse('reviewers.queue_extension'),
status_code=302)
# Now they need unlisted permission cause we can't find a listed
# version, even deleted.
self.version.delete(hard=True)
assert self.client.get(self.url).status_code == 404
# Reviewer with more powers can look.
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
assert self.client.get(self.url).status_code == 200
response = self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
assert response.status_code == 302
self.assert3xx(response, reverse('reviewers.queue_extension'),
status_code=302)
def test_addon_deleted(self):
"""The review page should still load for deleted addons."""
self.addon.delete()
self.url = reverse('reviewers.review', args=[self.addon.pk])
assert self.client.get(self.url).status_code == 200
response = self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
assert response.status_code == 302
self.assert3xx(response, reverse('reviewers.queue_extension'),
status_code=302)
@mock.patch('olympia.reviewers.utils.sign_file')
def review_version(self, version, url, mock_sign):
if version.channel == amo.RELEASE_CHANNEL_LISTED:
version.files.all()[0].update(status=amo.STATUS_AWAITING_REVIEW)
action = 'public'
else:
action = 'reply'
data = {
'action': action,
'operating_systems': 'win',
'applications': 'something',
'comments': 'something',
}
self.client.post(url, data)
if version.channel == amo.RELEASE_CHANNEL_LISTED:
assert mock_sign.called
return action
def test_eula_displayed(self):
assert not bool(self.addon.eula)
response = self.client.get(self.url)
assert response.status_code == 200
self.assertNotContains(response, 'View End-User License Agreement')
self.addon.eula = 'Test!'
self.addon.save()
assert bool(self.addon.eula)
response = self.client.get(self.url)
assert response.status_code == 200
self.assertContains(response, 'View End-User License Agreement')
eula_url = reverse(
'reviewers.eula', args=(self.addon.slug,))
self.assertContains(response, eula_url + '"')
# The url should pass on the channel param so the backlink works
self.make_addon_unlisted(self.addon)
self.login_as_admin()
unlisted_url = reverse(
'reviewers.review', args=['unlisted', self.addon.slug])
response = self.client.get(unlisted_url)
assert response.status_code == 200
eula_url = reverse(
'reviewers.eula', args=(self.addon.slug,))
self.assertContains(response, eula_url + '?channel=unlisted"')
def test_privacy_policy_displayed(self):
assert self.addon.privacy_policy is None
response = self.client.get(self.url)
assert response.status_code == 200
self.assertNotContains(response, 'View Privacy Policy')
self.addon.privacy_policy = 'Test!'
self.addon.save()
response = self.client.get(self.url)
assert response.status_code == 200
self.assertContains(response, 'View Privacy Policy')
privacy_url = reverse(
'reviewers.privacy', args=(self.addon.slug,))
self.assertContains(response, privacy_url + '"')
self.make_addon_unlisted(self.addon)
self.login_as_admin()
unlisted_url = reverse(
'reviewers.review', args=['unlisted', self.addon.slug])
response = self.client.get(unlisted_url)
assert response.status_code == 200
privacy_url = reverse(
'reviewers.privacy', args=(self.addon.slug,))
self.assertContains(response, privacy_url + '?channel=unlisted"')
def test_requires_payment_indicator(self):
assert not self.addon.requires_payment
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert 'No' in doc('tr.requires-payment td').text()
self.addon.update(requires_payment=True)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert 'Yes' in doc('tr.requires-payment td').text()
def test_addon_id_display(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert self.addon.guid in doc('tr.addon-guid td').text()
def test_amo_id_display(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert str(self.addon.id) in doc('tr.addon-amo-id td').text()
def test_viewing(self):
url = reverse('reviewers.review_viewing')
response = self.client.post(url, {'addon_id': self.addon.id})
data = json.loads(response.content)
assert data['current'] == self.reviewer.id
assert data['current_name'] == self.reviewer.name
assert data['is_user'] == 1
# Now, login as someone else and test.
self.login_as_admin()
response = self.client.post(url, {'addon_id': self.addon.id})
data = json.loads(response.content)
assert data['current'] == self.reviewer.id
assert data['current_name'] == self.reviewer.name
assert data['is_user'] == 0
# Lets just override this to make the test a bit shorter.
@mock.patch.object(amo, 'REVIEWER_REVIEW_LOCK_LIMIT', 1)
def test_viewing_lock_limit(self):
url = reverse('reviewers.review_viewing')
response = self.client.post(url, {'addon_id': 1234})
data = json.loads(response.content)
assert data['current'] == self.reviewer.id
assert data['current_name'] == self.reviewer.name
assert data['is_user'] == 1
# Second review page is over the limit.
response = self.client.post(url, {'addon_id': 5678})
data = json.loads(response.content)
assert data['current'] == settings.TASK_USER_ID # Mozilla's task ID.
assert data['current_name'] == 'Review lock limit reached'
assert data['is_user'] == 2
# Now, login as someone else and test. First page is blocked.
self.login_as_admin()
response = self.client.post(url, {'addon_id': 1234})
data = json.loads(response.content)
assert data['current'] == self.reviewer.id
assert data['current_name'] == self.reviewer.name
assert data['is_user'] == 0
# Second page is available.
response = self.client.post(url, {'addon_id': 5678})
data = json.loads(response.content)
admin = UserProfile.objects.get(username='admin')
assert data['current'] == admin.id
assert data['current_name'] == admin.name
assert data['is_user'] == 1
# Lets just override this to make the test a bit shorter.
@mock.patch.object(amo, 'REVIEWER_REVIEW_LOCK_LIMIT', 1)
def test_viewing_lock_admin(self):
self.login_as_admin()
url = reverse('reviewers.review_viewing')
admin = UserProfile.objects.get(username='admin')
response = self.client.post(url, {'addon_id': 101})
data = json.loads(response.content)
assert data['current'] == admin.id
assert data['current_name'] == admin.name
assert data['is_user'] == 1
# Admin don't have time for no limits.
response = self.client.post(url, {'addon_id': 202})
data = json.loads(response.content)
assert data['current'] == admin.id
assert data['current_name'] == admin.name
assert data['is_user'] == 1
def test_viewing_review_unlocks(self):
reviewing_url = reverse('reviewers.review_viewing')
self.client.post(reviewing_url, {'addon_id': self.addon.id})
key = 'review_viewing:{id}'.format(id=self.addon.id)
assert cache.get(key) == self.reviewer.id
self.client.post(self.url, {'action': 'comment',
'comments': 'hello sailor'})
# Processing a review should instantly clear the review lock on it.
assert cache.get(key) is None
def test_viewing_queue(self):
response = self.client.post(reverse('reviewers.review_viewing'),
{'addon_id': self.addon.id})
data = json.loads(response.content)
assert data['current'] == self.reviewer.id
assert data['current_name'] == self.reviewer.name
assert data['is_user'] == 1
# Now, login as someone else and test.
self.login_as_admin()
response = self.client.get(reverse(
'reviewers.queue_viewing'),
{'addon_ids': '%s,4242' % self.addon.id})
assert response.status_code == 200
data = json.loads(response.content)
assert data[str(self.addon.id)] == self.reviewer.name
def test_display_same_files_only_once(self):
"""
Test whether identical files for different platforms
show up as one link with the appropriate text.
"""
version = version_factory(
addon=self.addon, version='0.2', file_kw=False)
file_mac = file_factory(version=version, platform=amo.PLATFORM_MAC.id)
file_android = file_factory(
version=version, platform=amo.PLATFORM_ANDROID.id)
# Signing causes the same uploaded file to be different
file_mac.update(hash='xyz789', original_hash='123abc')
file_android.update(hash='zyx987', original_hash='123abc')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
text = doc('.reviewers-install').eq(1).text()
assert text == "Mac OS X / Android"
def test_compare_no_link(self):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
info = doc('#versions-history .file-info')
assert info.length == 1
assert info.find('a.compare').length == 0
def test_file_info_for_static_themes(self):
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
info = doc('#versions-history .file-info')
assert info.length == 1
# Only the download/install link
assert info.find('a').length == 1
assert info.find('a')[0].text == u'Download'
assert b'Compatibility' not in response.content
def test_compare_link(self):
first_file = self.addon.current_version.files.all()[0]
first_file.update(status=amo.STATUS_APPROVED)
self.addon.current_version.update(created=self.days_ago(2))
first_version_pk = self.addon.current_version.pk
new_version = version_factory(addon=self.addon, version='0.2')
self.addon.update(_current_version=new_version)
assert self.addon.current_version == new_version
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert response.context['base_version']
links = doc('#versions-history .file-info .compare')
expected = [
code_manager_url(
'compare', addon_id=self.addon.pk,
base_version_id=first_version_pk, version_id=new_version.pk),
]
check_links(expected, links, verify=False)
def test_compare_link_auto_approved_ignored(self):
first_file = self.addon.current_version.files.all()[0]
first_file.update(status=amo.STATUS_APPROVED)
self.addon.current_version.update(created=self.days_ago(3))
first_version_pk = self.addon.current_version.pk
interim_version = version_factory(addon=self.addon, version='0.2')
interim_version.update(created=self.days_ago(2))
AutoApprovalSummary.objects.create(
version=interim_version, verdict=amo.AUTO_APPROVED)
new_version = version_factory(addon=self.addon, version='0.3')
self.addon.update(_current_version=new_version)
assert self.addon.current_version == new_version
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert response.context['base_version']
links = doc('#versions-history .file-info .compare')
# Comparison should be between the last version and the first,
# ignoring the interim version because it was auto-approved and not
# manually confirmed by a human.
expected = [
code_manager_url(
'compare', addon_id=self.addon.pk,
base_version_id=first_version_pk, version_id=new_version.pk),
]
check_links(expected, links, verify=False)
def test_compare_link_auto_approved_but_confirmed_not_ignored(self):
first_file = self.addon.current_version.files.all()[0]
first_file.update(status=amo.STATUS_APPROVED)
self.addon.current_version.update(created=self.days_ago(3))
confirmed_version = version_factory(addon=self.addon, version='0.2')
confirmed_version.update(created=self.days_ago(2))
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=confirmed_version,
confirmed=True)
interim_version = version_factory(addon=self.addon, version='0.3')
interim_version.update(created=self.days_ago(1))
AutoApprovalSummary.objects.create(
version=interim_version, verdict=amo.AUTO_APPROVED)
new_version = version_factory(addon=self.addon, version='0.4')
self.addon.update(_current_version=new_version)
assert self.addon.current_version == new_version
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert response.context['base_version']
links = doc('#versions-history .file-info .compare')
# Comparison should be between the last version and the second,
# ignoring the third version because it was auto-approved and not
# manually confirmed by a human (the second was auto-approved but
# was manually confirmed).
expected = [
code_manager_url(
'compare', addon_id=self.addon.pk,
base_version_id=confirmed_version.pk,
version_id=new_version.pk),
]
check_links(expected, links, verify=False)
def test_compare_link_not_auto_approved_but_confirmed(self):
first_file = self.addon.current_version.files.all()[0]
first_file.update(status=amo.STATUS_APPROVED)
self.addon.current_version.update(created=self.days_ago(3))
confirmed_version = version_factory(addon=self.addon, version='0.2')
confirmed_version.update(created=self.days_ago(2))
AutoApprovalSummary.objects.create(
verdict=amo.NOT_AUTO_APPROVED, version=confirmed_version
)
new_version = version_factory(addon=self.addon, version='0.3')
self.addon.update(_current_version=new_version)
assert self.addon.current_version == new_version
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert response.context['base_version']
links = doc('#versions-history .file-info .compare')
# Comparison should be between the last version and the second,
# because second was approved by human before auto-approval ran on it
expected = [
code_manager_url(
'compare', addon_id=self.addon.pk,
base_version_id=confirmed_version.pk,
version_id=new_version.pk),
]
check_links(expected, links, verify=False)
def test_download_sources_link(self):
version = self.addon.current_version
tdir = temp.gettempdir()
source_file = temp.NamedTemporaryFile(
suffix='.zip', dir=tdir, mode='r+')
source_file.write('a' * (2 ** 21))
source_file.seek(0)
version.source = DjangoFile(source_file)
version.save()
url = reverse('reviewers.review', args=[self.addon.pk])
# Admin reviewer: able to download sources.
user = UserProfile.objects.get(email='admin@mozilla.com')
self.client.login(email=user.email)
response = self.client.get(url, follow=True)
assert response.status_code == 200
assert b'Download files' in response.content
# Standard reviewer: should know that sources were provided.
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.client.login(email=user.email)
response = self.client.get(url, follow=True)
assert response.status_code == 200
assert b'The developer has provided source code.' in response.content
def test_translations(self):
self.addon.name = {
'de': None,
'en-CA': 'English Translation',
'en-GB': 'English Translation', # Duplicate
'es': '',
'fr': 'Traduction En Français',
}
self.addon.save()
response = self.client.get(self.url)
doc = pq(response.content)
translations = sorted(
[li.text_content() for li in doc('#name-translations li')]
)
expected = [
'English (Canadian), English (British): English Translation',
'English (US): Public',
'Français: Traduction En Français'
]
assert translations == expected
@mock.patch('olympia.reviewers.utils.sign_file')
def test_approve_recommended_addon(self, mock_sign_file):
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(status=amo.STATUS_NOMINATED)
self.make_addon_promoted(self.addon, RECOMMENDED)
self.grant_permission(self.reviewer, 'Addons:RecommendedReview')
response = self.client.post(self.url, {
'action': 'public',
'comments': 'all good'
})
assert response.status_code == 302
self.assert3xx(response, reverse('reviewers.queue_recommended'))
addon = self.get_addon()
assert addon.status == amo.STATUS_APPROVED
assert addon.current_version
assert addon.current_version.all_files[0].status == amo.STATUS_APPROVED
assert addon.current_version.promoted_approvals.filter(
group_id=RECOMMENDED.id).exists()
assert mock_sign_file.called
@mock.patch('olympia.reviewers.utils.sign_file')
def test_admin_flagged_addon_actions_as_admin(self, mock_sign_file):
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(status=amo.STATUS_NOMINATED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
self.login_as_admin()
response = self.client.post(self.url, self.get_dict(action='public'),
follow=True)
assert response.status_code == 200
addon = self.get_addon()
assert self.version == addon.current_version
assert addon.status == amo.STATUS_APPROVED
assert addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
assert mock_sign_file.called
def test_admin_flagged_addon_actions_as_reviewer(self):
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(status=amo.STATUS_NOMINATED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
self.login_as_reviewer()
response = self.client.post(self.url, self.get_dict(action='public'))
assert response.status_code == 200 # Form error.
# The add-on status must not change as non-admin reviewers are not
# allowed to review admin-flagged add-ons.
addon = self.get_addon()
assert addon.status == amo.STATUS_NOMINATED
assert self.version == addon.current_version
assert addon.current_version.files.all()[0].status == (
amo.STATUS_AWAITING_REVIEW)
assert response.context['form'].errors['action'] == (
[u'Select a valid choice. public is not one of the available '
u'choices.'])
# Same if it's the content review flag.
flags = AddonReviewerFlags.objects.get(addon=self.addon)
flags.update(
needs_admin_content_review=True, needs_admin_code_review=False)
response = self.client.post(self.url, self.get_dict(action='public'))
assert response.status_code == 200 # Form error.
addon = self.get_addon()
assert addon.status == amo.STATUS_NOMINATED
assert self.version == addon.current_version
assert addon.current_version.files.all()[0].status == (
amo.STATUS_AWAITING_REVIEW)
assert response.context['form'].errors['action'] == (
[u'Select a valid choice. public is not one of the available '
u'choices.'])
def test_admin_flagged_addon_actions_as_content_reviewer(self):
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
summary = AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.grant_permission(self.reviewer, 'Addons:ContentReview')
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
response = self.client.post(
self.url, self.get_dict(action='approve_content'))
assert response.status_code == 302
summary.reload()
assert summary.confirmed is None # We're only doing a content review.
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).count() == 1
a_log = ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).get()
assert a_log.details['version'] == self.addon.current_version.version
assert a_log.details['comments'] == ''
self.assert3xx(response, reverse('reviewers.queue_content_review'))
def test_approve_content_content_review(self):
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
summary = AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
self.grant_permission(self.reviewer, 'Addons:ContentReview')
response = self.client.post(self.url, {
'action': 'approve_content',
'comments': 'ignore me this action does not support comments'
})
assert response.status_code == 302
summary.reload()
assert summary.confirmed is None # We're only doing a content review.
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).count() == 1
a_log = ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).get()
assert a_log.details['version'] == self.addon.current_version.version
assert a_log.details['comments'] == ''
self.assert3xx(response, reverse('reviewers.queue_content_review'))
def test_cant_contentreview_if_admin_content_review_flag_is_set(self):
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_content_review=True)
self.grant_permission(self.reviewer, 'Addons:ContentReview')
response = self.client.post(self.url, {
'action': 'approve_content',
'comments': 'ignore me this action does not support comments'
})
assert response.status_code == 200 # Form error
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).count() == 0
def test_content_review_redirect_if_only_permission(self):
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.grant_permission(self.reviewer, 'Addons:ContentReview')
content_review_url = reverse(
'reviewers.review', args=['content', self.addon.pk])
response = self.client.get(self.url)
assert response.status_code == 302
self.assert3xx(response, content_review_url)
response = self.client.post(self.url, {'action': 'anything'})
assert response.status_code == 302
self.assert3xx(response, content_review_url)
def test_dont_content_review_redirect_if_theme_reviewer_only(self):
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
self.grant_permission(self.reviewer, 'Addons:ContentReview')
self.addon.update(type=amo.ADDON_STATICTHEME)
response = self.client.get(self.url)
assert response.status_code == 200
def test_cant_postreview_if_admin_content_review_flag_is_set(self):
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_content_review=True)
self.grant_permission(self.reviewer, 'Addons:Review')
for action in ['approve_content', 'reject_multiple_versions']:
response = self.client.post(self.url, self.get_dict(action=action))
assert response.status_code == 200 # Form error.
# The add-on status must not change as non-admin reviewers are not
# allowed to review admin-flagged add-ons.
addon = self.get_addon()
assert addon.status == amo.STATUS_APPROVED
assert self.version == addon.current_version
assert addon.current_version.files.all()[0].status == (
amo.STATUS_APPROVED)
assert response.context['form'].errors['action'] == (
[u'Select a valid choice. %s is not one of the available '
u'choices.' % action])
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.REJECT_VERSION.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_VERSION.id).count() == 0
def test_cant_review_static_theme_if_admin_theme_review_flag_is_set(self):
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(
type=amo.ADDON_STATICTHEME, status=amo.STATUS_NOMINATED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_theme_review=True)
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
for action in ['public', 'reject']:
response = self.client.post(self.url, self.get_dict(action=action))
assert response.status_code == 200 # Form error.
# The add-on status must not change as non-admin reviewers are not
# allowed to review admin-flagged add-ons.
addon = self.get_addon()
assert addon.status == amo.STATUS_NOMINATED
assert self.version == addon.current_version
assert addon.current_version.files.all()[0].status == (
amo.STATUS_AWAITING_REVIEW)
assert response.context['form'].errors['action'] == (
[u'Select a valid choice. %s is not one of the available '
u'choices.' % action])
assert ActivityLog.objects.filter(
action=amo.LOG.REJECT_VERSION.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_VERSION.id).count() == 0
@mock.patch('olympia.reviewers.utils.sign_file')
def test_admin_can_review_statictheme_if_admin_theme_review_flag_set(
self, mock_sign_file):
self.version.files.update(status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(
type=amo.ADDON_STATICTHEME, status=amo.STATUS_NOMINATED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_theme_review=True)
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
self.grant_permission(self.reviewer, 'Reviews:Admin')
response = self.client.post(self.url, {
'action': 'public',
'comments': 'it`s good'
})
assert response.status_code == 302
assert self.get_addon().status == amo.STATUS_APPROVED
assert mock_sign_file.called
def test_admin_can_contentreview_if_admin_content_review_flag_is_set(self):
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
summary = AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_content_review=True)
self.grant_permission(self.reviewer, 'Addons:ContentReview')
self.grant_permission(self.reviewer, 'Reviews:Admin')
response = self.client.post(self.url, {
'action': 'approve_content',
'comments': 'ignore me this action does not support comments'
})
assert response.status_code == 302
summary.reload()
assert summary.confirmed is None # We're only doing a content review.
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0
assert ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).count() == 1
a_log = ActivityLog.objects.filter(
action=amo.LOG.APPROVE_CONTENT.id).get()
assert a_log.details['version'] == self.addon.current_version.version
assert a_log.details['comments'] == ''
self.assert3xx(response, reverse('reviewers.queue_content_review'))
def test_confirm_auto_approval_with_permission(self):
summary = AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.grant_permission(self.reviewer, 'Addons:Review')
response = self.client.post(self.url, {
'action': 'confirm_auto_approved',
'comments': 'ignore me this action does not support comments'
})
summary.reload()
assert response.status_code == 302
assert summary.confirmed is True
assert ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 1
a_log = ActivityLog.objects.filter(
action=amo.LOG.CONFIRM_AUTO_APPROVED.id).get()
assert a_log.details['version'] == self.addon.current_version.version
assert a_log.details['comments'] == ''
self.assert3xx(response, reverse('reviewers.queue_auto_approved'))
def test_reject_multiple_versions(self):
old_version = self.version
old_version.update(needs_human_review=True)
self.version = version_factory(addon=self.addon, version='3.0')
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.grant_permission(self.reviewer, 'Addons:Review')
response = self.client.post(self.url, {
'action': 'reject_multiple_versions',
'comments': 'multireject!',
'versions': [old_version.pk, self.version.pk],
})
assert response.status_code == 302
for version in [old_version, self.version]:
version.reload()
assert not version.needs_human_review
file_ = version.files.all().get()
assert file_.status == amo.STATUS_DISABLED
assert not version.pending_rejection
def test_reject_multiple_versions_with_no_delay(self):
old_version = self.version
old_version.update(needs_human_review=True)
self.version = version_factory(addon=self.addon, version='3.0')
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.grant_permission(self.reviewer, 'Addons:Review')
response = self.client.post(self.url, {
'action': 'reject_multiple_versions',
'comments': 'multireject!',
'versions': [old_version.pk, self.version.pk],
'delayed_rejection': 'False',
'delayed_rejection_days': ( # Should be ignored.
REVIEWER_DELAYED_REJECTION_PERIOD_DAYS_DEFAULT
),
})
assert response.status_code == 302
for version in [old_version, self.version]:
version.reload()
assert not version.needs_human_review
file_ = version.files.all().get()
assert file_.status == amo.STATUS_DISABLED
assert not version.pending_rejection
def test_reject_multiple_versions_with_delay(self):
old_version = self.version
old_version.update(needs_human_review=True)
self.version = version_factory(addon=self.addon, version='3.0')
AutoApprovalSummary.objects.create(
version=self.addon.current_version, verdict=amo.AUTO_APPROVED)
GroupUser.objects.filter(user=self.reviewer).all().delete()
self.grant_permission(self.reviewer, 'Addons:Review')
response = self.client.post(self.url, {
'action': 'reject_multiple_versions',
'comments': 'multireject with delay!',
'versions': [old_version.pk, self.version.pk],
'delayed_rejection': 'True',
'delayed_rejection_days': (
REVIEWER_DELAYED_REJECTION_PERIOD_DAYS_DEFAULT
),
})
in_the_future = datetime.now() + timedelta(
days=REVIEWER_DELAYED_REJECTION_PERIOD_DAYS_DEFAULT)
assert response.status_code == 302
for version in [old_version, self.version]:
version.reload()
# The versions no longer need human review...
assert not version.needs_human_review
file_ = version.files.all().get()
# ... But their status shouldn't have changed yet ...
assert file_.status == amo.STATUS_APPROVED
# ... Because they are now pending rejection.
assert version.pending_rejection
self.assertCloseToNow(version.pending_rejection, now=in_the_future)
def test_block_multiple_versions(self):
self.url = reverse(
'reviewers.review', args=('unlisted', self.addon.slug))
old_version = self.version
old_version.update(needs_human_review=True)
self.version = version_factory(addon=self.addon, version='3.0')
self.make_addon_unlisted(self.addon)
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
self.grant_permission(self.reviewer, 'Reviews:Admin')
self.grant_permission(self.reviewer, 'Blocklist:Create')
response = self.client.post(
self.url, {
'action': 'block_multiple_versions',
'comments': 'multiblock!', # should be ignored anyway
'versions': [old_version.pk, self.version.pk],
}, follow=True)
new_block_url = (
reverse('admin:blocklist_blocklistsubmission_add') +
'?guids=%s&min_version=%s&max_version=%s' % (
self.addon.guid, old_version.version, self.version.version))
self.assertRedirects(response, new_block_url)
def test_user_changes_log(self):
# Activity logs related to user changes should be displayed.
# Create an activy log for each of the following: user addition, role
# change and deletion.
author = self.addon.addonuser_set.get()
core.set_user(author.user)
ActivityLog.create(
amo.LOG.ADD_USER_WITH_ROLE, author.user,
str(author.get_role_display()), self.addon)
ActivityLog.create(
amo.LOG.CHANGE_USER_WITH_ROLE, author.user,
str(author.get_role_display()), self.addon)
ActivityLog.create(
amo.LOG.REMOVE_USER_WITH_ROLE, author.user,
str(author.get_role_display()), self.addon)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert 'user_changes_log' in response.context
user_changes_log = response.context['user_changes_log']
actions = [log.action for log in user_changes_log]
assert actions == [
amo.LOG.ADD_USER_WITH_ROLE.id,
amo.LOG.CHANGE_USER_WITH_ROLE.id,
amo.LOG.REMOVE_USER_WITH_ROLE.id]
# Make sure the logs are displayed in the page.
user_changes = doc('#user-changes li')
assert len(user_changes) == 3
assert '(Owner) added to ' in user_changes[0].text
assert 'role changed to Owner for ' in user_changes[1].text
assert '(Owner) removed from ' in user_changes[2].text
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
@mock.patch('olympia.devhub.tasks.validate')
def test_validation_not_run_eagerly(self, validate):
"""Tests that validation is not run in eager mode."""
assert not self.file.has_been_validated
response = self.client.get(self.url)
assert response.status_code == 200
assert not validate.called
@override_settings(CELERY_TASK_ALWAYS_EAGER=False)
@mock.patch('olympia.devhub.tasks.validate')
def test_validation_run(self, validate):
"""Tests that validation is run if necessary."""
assert not self.file.has_been_validated
response = self.client.get(self.url)
assert response.status_code == 200
validate.assert_called_once_with(self.file)
@override_settings(CELERY_TASK_ALWAYS_EAGER=False)
@mock.patch('olympia.devhub.tasks.validate')
def test_validation_not_run_again(self, validate):
"""Tests that validation is not run for files which have cached
results."""
FileValidation.objects.create(file=self.file, validation=json.dumps(
amo.VALIDATOR_SKELETON_RESULTS))
response = self.client.get(self.url)
assert response.status_code == 200
assert not validate.called
def test_review_is_review_listed(self):
review_page = self.client.get(
reverse('reviewers.review', args=[self.addon.slug]))
listed_review_page = self.client.get(
reverse('reviewers.review', args=['listed', self.addon.slug]))
assert (pq(review_page.content)('#versions-history').text() ==
pq(listed_review_page.content)('#versions-history').text())
def test_approvals_info(self):
approval_info = AddonApprovalsCounter.objects.create(
addon=self.addon, last_human_review=datetime.now(), counter=42)
self.file.update(is_webextension=True)
AutoApprovalSummary.objects.create(
version=self.version, verdict=amo.AUTO_APPROVED)
self.grant_permission(self.reviewer, 'Addons:Review')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.last-approval-date')
approval_info.delete()
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# no AddonApprovalsCounter: nothing displayed.
assert not doc('.last-approval-date')
def test_no_auto_approval_summaries_since_everything_is_public(self):
self.grant_permission(self.reviewer, 'Addons:Review')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert not doc('.auto_approval')
def test_permissions_display(self):
permissions = ['bookmarks', 'high', 'voltage']
optional_permissions = ['optional', 'high', 'voltage']
self.file.update(is_webextension=True)
WebextPermission.objects.create(
optional_permissions=optional_permissions,
permissions=permissions,
file=self.file)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
info = doc('#versions-history .file-info div')
assert info.eq(1).text() == 'Permissions: ' + ', '.join(permissions)
assert info.eq(2).text() == 'Optional permissions: ' + \
', '.join(optional_permissions)
def test_abuse_reports(self):
report = AbuseReport.objects.create(
addon=self.addon,
message=u'Et mël mazim ludus.',
country_code='FR',
client_id='4815162342',
addon_name='Unused here',
addon_summary='Not used either',
addon_version='42.0',
addon_signature=AbuseReport.ADDON_SIGNATURES.UNSIGNED,
application=amo.ANDROID.id,
application_locale='fr_FR',
operating_system='Løst OS',
operating_system_version='20040922',
install_date=self.days_ago(1),
reason=AbuseReport.REASONS.POLICY,
addon_install_origin='https://example.com/',
addon_install_method=AbuseReport.ADDON_INSTALL_METHODS.LINK,
addon_install_source=AbuseReport.ADDON_INSTALL_SOURCES.UNKNOWN,
addon_install_source_url='https://source.example.com/',
report_entry_point=AbuseReport.REPORT_ENTRY_POINTS.MENU,
)
created_at = format_datetime(report.created)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.abuse_reports')
expected = [
'Developer/Addon',
'Application',
'Install date',
'Install origin / source',
'Category',
'Date',
'Reporter',
'Public 42.0',
'Firefox for Android fr_FR Løst OS 20040922',
'1\xa0day ago',
'Origin: https://example.com/',
'Method: Direct link',
'Source: Unknown',
'Source URL: https://source.example.com/',
'',
'Hateful, violent, or illegal content',
created_at,
'anonymous [FR]',
'Et mël mazim ludus.',
]
assert doc('.abuse_reports').text().split('\n') == expected
self.addon.delete()
self.url = reverse('reviewers.review', args=[self.addon.id])
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.abuse_reports')
assert doc('.abuse_reports').text().split('\n') == expected
def test_abuse_reports_unlisted_addon(self):
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.login_as_reviewer()
self.make_addon_unlisted(self.addon)
self.test_abuse_reports()
def test_abuse_reports_developers(self):
report = AbuseReport.objects.create(
user=self.addon.listed_authors[0], message=u'Foo, Bâr!',
country_code='DE')
created_at = format_datetime(report.created)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.abuse_reports')
expected = [
'Developer/Addon',
'Application',
'Install date',
'Install origin / source',
'Category',
'Date',
'Reporter',
'regularuser التطب',
'Firefox',
'None',
created_at,
'anonymous [DE]',
'Foo, Bâr!',
]
assert doc('.abuse_reports').text().split('\n') == expected
def test_abuse_reports_developers_unlisted_addon(self):
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.login_as_reviewer()
self.make_addon_unlisted(self.addon)
self.test_abuse_reports_developers()
def test_user_ratings(self):
user = user_factory()
rating = Rating.objects.create(
body=u'Lôrem ipsum dolor', rating=3, ip_address='10.5.6.7',
addon=self.addon, user=user)
created_at = format_date(rating.created)
Rating.objects.create( # Review with no body, ignored.
rating=1, addon=self.addon, user=user_factory())
Rating.objects.create( # Reply to a review, ignored.
body='Replyyyyy', reply_to=rating,
addon=self.addon, user=user_factory())
Rating.objects.create( # Review with high rating,, ignored.
body=u'Qui platônem temporibus in', rating=5, addon=self.addon,
user=user_factory())
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('.user_ratings')
assert (
doc('.user_ratings').text() ==
u'%s on %s [10.5.6.7]\n'
u'Rated 3 out of 5 stars\nLôrem ipsum dolor' % (
user.name, created_at
)
)
def test_user_ratings_unlisted_addon(self):
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.login_as_reviewer()
self.make_addon_unlisted(self.addon)
self.test_user_ratings()
def test_data_value_attributes(self):
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=self.version)
self.grant_permission(self.reviewer, 'Addons:Review')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected_actions_values = [
'confirm_auto_approved|', 'reject_multiple_versions|', 'reply|',
'super|', 'comment|']
assert [
act.attrib['data-value'] for act in
doc('.data-toggle.review-actions-desc')] == expected_actions_values
assert (
doc('select#id_versions.data-toggle')[0].attrib['data-value'] ==
'reject_multiple_versions|')
assert (
doc('.data-toggle.review-comments')[0].attrib['data-value'] ==
'reject_multiple_versions|reply|super|comment|')
# We don't have approve/reject actions so these have an empty
# data-value.
assert (
doc('.data-toggle.review-files')[0].attrib['data-value'] == '|')
assert (
doc('.data-toggle.review-tested')[0].attrib['data-value'] == '|')
elm = doc('.data-toggle.review-delayed-rejection')[0]
assert elm.attrib['data-value'] == 'reject_multiple_versions|'
def test_data_value_attributes_unlisted(self):
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=self.version)
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
unlisted_url = reverse(
'reviewers.review', args=['unlisted', self.addon.slug]
)
response = self.client.get(unlisted_url)
assert response.status_code == 200
doc = pq(response.content)
expected_actions_values = [
'reject_multiple_versions|', 'block_multiple_versions|',
'confirm_multiple_versions|', 'reply|', 'super|', 'comment|',
]
assert [
act.attrib['data-value'] for act in
doc('.data-toggle.review-actions-desc')] == expected_actions_values
assert (
doc('select#id_versions.data-toggle')[0].attrib['data-value'] ==
'reject_multiple_versions|'
'block_multiple_versions|'
'confirm_multiple_versions|')
assert (
doc('.data-toggle.review-comments')[0].attrib['data-value'] ==
'reject_multiple_versions|reply|super|comment|')
# We don't have approve/reject actions so these have an empty
# data-value.
assert (
doc('.data-toggle.review-files')[0].attrib['data-value'] == '|')
assert (
doc('.data-toggle.review-tested')[0].attrib['data-value'] == '|')
# Unlisted versions can't be rejected with a delay so the data-value of
# the field is empty as well.
elm = doc('.data-toggle.review-delayed-rejection')[0]
assert elm.attrib['data-value'] == '|'
def test_data_value_attributes_unreviewed(self):
self.file.update(status=amo.STATUS_AWAITING_REVIEW)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected_actions_values = [
'public|', 'reject|', 'reject_multiple_versions|', 'reply|',
'super|', 'comment|'
]
assert [
act.attrib['data-value'] for act in
doc('.data-toggle.review-actions-desc')] == expected_actions_values
assert 'data-value' not in doc('select#id_versions.data-toggle')[0]
assert (
doc('.data-toggle.review-comments')[0].attrib['data-value'] ==
'public|reject|reject_multiple_versions|reply|super|comment|')
assert (
doc('.data-toggle.review-files')[0].attrib['data-value'] ==
'public|reject|')
assert (
doc('.data-toggle.review-tested')[0].attrib['data-value'] ==
'public|reject|')
def test_data_value_attributes_static_theme(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
self.file.update(status=amo.STATUS_AWAITING_REVIEW)
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
expected_actions_values = [
'public|', 'reject|', 'reject_multiple_versions|', 'reply|',
'super|', 'comment|']
assert [
act.attrib['data-value'] for act in
doc('.data-toggle.review-actions-desc')] == expected_actions_values
assert 'data-value' not in doc('select#id_versions.data-toggle')[0]
assert (
doc('.data-toggle.review-comments')[0].attrib['data-value'] ==
'public|reject|reject_multiple_versions|reply|super|comment|')
# we don't show files and tested with for any static theme actions
assert (
doc('.data-toggle.review-files')[0].attrib['data-value'] ==
'|')
assert (
doc('.data-toggle.review-tested')[0].attrib['data-value'] ==
'|')
def test_post_review_ignore_disabled(self):
# Though the latest version will be disabled, the add-on is public and
# was auto-approved so the confirmation action is available.
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=self.version)
version_factory(
addon=self.addon, file_kw={'status': amo.STATUS_DISABLED})
self.grant_permission(self.reviewer, 'Addons:Review')
response = self.client.get(self.url)
assert response.status_code == 200
expected_actions = [
'confirm_auto_approved', 'reject_multiple_versions', 'reply',
'super', 'comment']
assert (
[action[0] for action in response.context['actions']] ==
expected_actions)
def test_content_review_ignore_disabled(self):
# Though the latest version will be disabled, the add-on is public and
# was auto-approved so the content approval action is available.
AutoApprovalSummary.objects.create(
verdict=amo.AUTO_APPROVED, version=self.version)
version_factory(
addon=self.addon, file_kw={'status': amo.STATUS_DISABLED})
self.grant_permission(self.reviewer, 'Addons:ContentReview')
self.url = reverse(
'reviewers.review', args=['content', self.addon.slug])
response = self.client.get(self.url)
assert response.status_code == 200
expected_actions = [
'approve_content', 'reject_multiple_versions', 'reply',
'super', 'comment']
assert (
[action[0] for action in response.context['actions']] ==
expected_actions)
def test_static_theme_backgrounds(self):
self.addon.update(type=amo.ADDON_STATICTHEME)
self.grant_permission(self.reviewer, 'Addons:ThemeReview')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
backgrounds_div = doc('div.all-backgrounds')
assert backgrounds_div.attr('data-backgrounds-url') == (
reverse('reviewers.theme_background_images',
args=[self.addon.current_version.id])
)
def test_reused_guid_from_previous_deleted_addon(self):
response = self.client.get(self.url)
assert response.status_code == 200
assert b'Previously deleted entries' not in response.content
old_one = addon_factory(status=amo.STATUS_DELETED)
old_two = addon_factory(status=amo.STATUS_DELETED)
old_other = addon_factory(status=amo.STATUS_DELETED)
old_noguid = addon_factory(status=amo.STATUS_DELETED)
old_one.addonguid.update(guid='reuse@')
old_two.addonguid.update(guid='reuse@')
old_other.addonguid.update(guid='other@')
old_noguid.addonguid.update(guid='')
self.addon.update(guid='reuse@')
response = self.client.get(self.url)
assert response.status_code == 200
assert b'Previously deleted entries' in response.content
expected = [
(f'{old_one.id}', reverse('reviewers.review', args=[old_one.id])),
(f'{old_two.id}', reverse('reviewers.review', args=[old_two.id])),
]
doc = pq(response.content)
check_links(
expected, doc('div.results table.item-history a'), verify=False)
# test unlisted review pages link to unlisted review pages
self.make_addon_unlisted(self.addon)
self.login_as_admin()
response = self.client.get(
reverse('reviewers.review', args=['unlisted', self.addon.slug]))
assert response.status_code == 200
expected = [
(f'{old_one.id}', reverse(
'reviewers.review', args=['unlisted', old_one.id])),
(f'{old_two.id}', reverse(
'reviewers.review', args=['unlisted', old_two.id])),
]
doc = pq(response.content)
check_links(
expected, doc('div.results table.item-history a'), verify=False)
# make sure an empty guid isn't considered (e.g. search plugins)
self.addon.update(guid=None)
response = self.client.get(self.url)
assert response.status_code == 200
assert b'Previously deleted entries' not in response.content
self.addon.update(guid='')
response = self.client.get(self.url)
assert response.status_code == 200
assert b'Previously deleted entries' not in response.content
def test_versions_that_are_flagged_by_scanners_are_highlighted(self):
self.addon.current_version.update(created=self.days_ago(366))
for i in range(0, 10):
# Add versions 1.0 to 1.9. Flag a few of them as needing human
# review.
version_factory(
addon=self.addon, version=f'1.{i}',
needs_human_review=not bool(i % 3),
created=self.days_ago(365 - i))
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
tds = doc('#versions-history .review-files td.files')
assert tds.length == 10
# Original version should not be there any more, it's on the second
# page. Versions on the page should be displayed in chronological order
# Versions 1.0, 1.3, 1.6, 1.9 are flagged for human review.
assert 'Flagged by scanners' in tds.eq(0).text()
assert 'Flagged by scanners' in tds.eq(3).text()
assert 'Flagged by scanners' in tds.eq(6).text()
assert 'Flagged by scanners' in tds.eq(9).text()
# There are no other flagged versions in the other page.
span = doc('#review-files-header .risk-high')
assert span.length == 0
# Load the second page. This time there should be a message indicating
# there are flagged versions in other pages.
response = self.client.get(self.url, {'page': 2})
assert response.status_code == 200
doc = pq(response.content)
span = doc('#review-files-header .risk-high')
assert span.length == 1
assert span.text() == '4 versions flagged by scanners on other pages.'
def test_versions_that_needs_human_review_are_highlighted(self):
self.addon.current_version.update(created=self.days_ago(366))
for i in range(0, 10):
# Add versions 1.0 to 1.9. Flag a few of them as needing human
# review.
version = version_factory(
addon=self.addon,
version=f'1.{i}',
created=self.days_ago(365 - i)
)
VersionReviewerFlags.objects.create(
version=version,
needs_human_review_by_mad=not bool(i % 3)
)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
tds = doc('#versions-history .review-files td.files')
assert tds.length == 10
# Original version should not be there any more, it's on the second
# page. Versions on the page should be displayed in chronological order
# Versions 1.0, 1.3, 1.6, 1.9 are flagged for human review.
assert 'Flagged for human review' in tds.eq(0).text()
assert 'Flagged for human review' in tds.eq(3).text()
assert 'Flagged for human review' in tds.eq(6).text()
assert 'Flagged for human review' in tds.eq(9).text()
# There are no other flagged versions in the other page.
span = doc('#review-files-header .risk-medium')
assert span.length == 0
# Load the second page. This time there should be a message indicating
# there are flagged versions in other pages.
response = self.client.get(self.url, {'page': 2})
assert response.status_code == 200
doc = pq(response.content)
span = doc('#review-files-header .risk-medium')
assert span.length == 1
assert (span.text() ==
'4 versions flagged for human review on other pages.')
def test_blocked_versions(self):
response = self.client.get(self.url)
assert response.status_code == 200
assert b'Blocked' not in response.content
block = Block.objects.create(
guid=self.addon.guid, updated_by=user_factory())
response = self.client.get(self.url)
assert b'Blocked' in response.content
span = pq(response.content)('#versions-history .blocked-version')
assert span.text() == 'Blocked'
assert span.length == 1 # addon only has 1 version
version_factory(addon=self.addon, version='99')
response = self.client.get(self.url)
span = pq(response.content)('#versions-history .blocked-version')
assert span.text() == 'Blocked Blocked'
assert span.length == 2 # a new version is blocked too
block.update(max_version='98')
response = self.client.get(self.url)
span = pq(response.content)('#versions-history .blocked-version')
assert span.text() == 'Blocked'
assert span.length == 1
def test_redirect_after_review_unlisted(self):
self.url = reverse(
'reviewers.review', args=('unlisted', self.addon.slug))
self.version = version_factory(addon=self.addon, version='3.0')
self.make_addon_unlisted(self.addon)
self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted')
response = self.client.post(
self.url, {
'action': 'reply',
'comments': 'Reply!',
}, follow=True)
self.assertRedirects(response, self.url)
class TestAbuseReportsView(ReviewerTest):
def setUp(self):
self.addon_developer = user_factory()
self.addon = addon_factory(name='Flôp', users=[self.addon_developer])
self.url = reverse('reviewers.abuse_reports', args=[self.addon.slug])
self.login_as_reviewer()
def test_abuse_reports(self):
report = AbuseReport.objects.create(
addon=self.addon,
message='Et mël mazim ludus.',
country_code='FR',
client_id='4815162342',
addon_name='Unused here',
addon_summary='Not used either',
addon_version='42.0',
addon_signature=AbuseReport.ADDON_SIGNATURES.UNSIGNED,
application=amo.ANDROID.id,
application_locale='fr_FR',
operating_system='Løst OS',
operating_system_version='20040922',
install_date=self.days_ago(1),
reason=AbuseReport.REASONS.POLICY,
addon_install_origin='https://example.com/',
addon_install_method=AbuseReport.ADDON_INSTALL_METHODS.LINK,
addon_install_source=AbuseReport.ADDON_INSTALL_SOURCES.UNKNOWN,
addon_install_source_url='https://source.example.com/',
report_entry_point=AbuseReport.REPORT_ENTRY_POINTS.MENU,
)
created_at = format_datetime(report.created)
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.abuse_reports')) == 1
expected = [
'Developer/Addon',
'Application',
'Install date',
'Install origin / source',
'Category',
'Date',
'Reporter',
'Flôp 42.0',
'Firefox for Android fr_FR Løst OS 20040922',
'1\xa0day ago',
'Origin: https://example.com/',
'Method: Direct link',
'Source: Unknown',
'Source URL: https://source.example.com/',
'',
'Hateful, violent, or illegal content',
created_at,
'anonymous [FR]',
'Et mël mazim ludus.',
]
assert doc('.abuse_reports').text().split('\n') == expected
self.addon.delete()
self.url = reverse('reviewers.abuse_reports', args=[self.addon.id])
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.abuse_reports')) == 1
assert doc('.abuse_reports').text().split('\n') == expected
def test_queries(self):
AbuseReport.objects.create(addon=self.addon, message='One')
AbuseReport.objects.create(addon=self.addon, message='Two')
AbuseReport.objects.create(addon=self.addon, message='Three')
AbuseReport.objects.create(user=self.addon_developer, message='Four')
with self.assertNumQueries(21):
# - 2 savepoint/release savepoint
# - 2 for user and groups
# - 1 for the add-on
# - 1 for its translations
# - 7 for the add-on default transformer
# - 1 for reviewer motd config
# - 1 for site notice config
# - 2 for add-ons from logged in user and its collections
# - 1 for abuse reports count (pagination)
# - 1 for the abuse reports
# - 2 for the add-on and its translations (duplicate, but it's
# coming from the abuse reports queryset, annoying to get rid
# of)
response = self.client.get(self.url)
assert response.status_code == 200
class TestReviewPending(ReviewBase):
def setUp(self):
super(TestReviewPending, self).setUp()
self.file = file_factory(version=self.version,
status=amo.STATUS_AWAITING_REVIEW,
is_webextension=True)
self.addon.update(status=amo.STATUS_APPROVED)
def pending_dict(self):
return self.get_dict(action='public')
@mock.patch('olympia.reviewers.utils.sign_file')
def test_pending_to_public(self, mock_sign):
statuses = (self.version.files.values_list('status', flat=True)
.order_by('status'))
assert list(statuses) == [
amo.STATUS_AWAITING_REVIEW, amo.STATUS_APPROVED]
response = self.client.post(self.url, self.pending_dict())
assert self.get_addon().status == amo.STATUS_APPROVED
self.assert3xx(response, reverse('reviewers.queue_extension'))
statuses = (self.version.files.values_list('status', flat=True)
.order_by('status'))
assert list(statuses) == [amo.STATUS_APPROVED, amo.STATUS_APPROVED]
assert mock_sign.called
@override_settings(ENABLE_ADDON_SIGNING=True)
def test_pending_to_public_search(self):
# sign_file() is *not* mocked here. We shouldn't need to, it should
# just avoid signing search plugins silently.
self.version.files.all().update(is_webextension=False)
self.addon.update(type=amo.ADDON_SEARCH)
response = self.client.post(self.url, self.pending_dict())
self.assert3xx(response, reverse('reviewers.queue_extension'))
assert self.get_addon().status == amo.STATUS_APPROVED
statuses = (self.version.files.values_list('status', flat=True)
.order_by('status'))
assert list(statuses) == [amo.STATUS_APPROVED, amo.STATUS_APPROVED]
def test_display_only_unreviewed_files(self):
"""Only the currently unreviewed files are displayed."""
self.file.update(filename=b'somefilename.xpi')
reviewed = File.objects.create(version=self.version,
status=amo.STATUS_APPROVED,
filename=b'file_reviewed.xpi')
disabled = File.objects.create(version=self.version,
status=amo.STATUS_DISABLED,
filename=b'file_disabled.xpi')
unreviewed = File.objects.create(version=self.version,
status=amo.STATUS_AWAITING_REVIEW,
filename=b'file_unreviewed.xpi')
response = self.client.get(self.url, self.pending_dict())
assert response.status_code == 200
doc = pq(response.content)
assert len(doc('.review-actions-files ul li')) == 2
assert reviewed.filename not in response.content
assert disabled.filename not in response.content
assert unreviewed.filename in response.content
assert self.file.filename in response.content
@mock.patch('olympia.reviewers.utils.sign_file')
def test_review_unreviewed_files(self, mock_sign):
"""Review all the unreviewed files when submitting a review."""
reviewed = File.objects.create(version=self.version,
status=amo.STATUS_APPROVED)
disabled = File.objects.create(version=self.version,
status=amo.STATUS_DISABLED)
unreviewed = File.objects.create(version=self.version,
status=amo.STATUS_AWAITING_REVIEW)
self.login_as_admin()
response = self.client.post(self.url, self.pending_dict())
self.assert3xx(response, reverse('reviewers.queue_extension'))
assert self.addon.reload().status == amo.STATUS_APPROVED
assert reviewed.reload().status == amo.STATUS_APPROVED
assert disabled.reload().status == amo.STATUS_DISABLED
assert unreviewed.reload().status == amo.STATUS_APPROVED
assert self.file.reload().status == amo.STATUS_APPROVED
assert mock_sign.called
def test_auto_approval_summary_with_post_review(self):
AutoApprovalSummary.objects.create(
version=self.version,
verdict=amo.NOT_AUTO_APPROVED,
is_locked=True,
)
self.grant_permission(self.reviewer, 'Addons:Review')
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
# Locked by a reviewer is shown.
assert len(doc('.auto_approval li')) == 1
assert doc('.auto_approval li').eq(0).text() == (
'Is locked by a reviewer')
def test_comments_box_doesnt_have_required_html_attribute(self):
"""Regression test
https://github.com/mozilla/addons-server/issues/8907"""
response = self.client.get(self.url)
doc = pq(response.content)
assert doc('#id_comments').attr('required') is None
class TestReviewerMOTD(ReviewerTest):
def get_url(self, save=False):
return reverse('reviewers.%smotd' % ('save_' if save else ''))
def test_change_motd(self):
self.login_as_admin()
motd = "Let's get crazy"
response = self.client.post(self.get_url(save=True), {'motd': motd})
url = self.get_url()
self.assert3xx(response, url)
response = self.client.get(url)
assert response.status_code == 200
assert pq(response.content)('.daily-message p').text() == motd
def test_require_reviewer_to_view(self):
url = self.get_url()
self.assertLoginRedirects(self.client.head(url), to=url)
def test_require_admin_to_change_motd(self):
self.login_as_reviewer()
response = self.client.get(self.get_url())
assert response.status_code == 403
response = self.client.post(reverse('reviewers.save_motd'),
{'motd': "I'm a sneaky reviewer"})
assert response.status_code == 403
def test_motd_edit_group(self):
user = UserProfile.objects.get(email='reviewer@mozilla.com')
group = Group.objects.create(name='Add-on Reviewer MOTD',
rules='AddonReviewerMOTD:Edit')
GroupUser.objects.create(user=user, group=group)
self.login_as_reviewer()
response = self.client.post(reverse('reviewers.save_motd'),
{'motd': 'I am the keymaster.'})
assert response.status_code == 302
assert get_config('reviewers_review_motd') == 'I am the keymaster.'
def test_form_errors(self):
self.login_as_admin()
response = self.client.post(self.get_url(save=True))
doc = pq(response.content)
assert doc('#reviewer-motd .errorlist').text() == (
'This field is required.')
class TestStatusFile(ReviewBase):
def get_file(self):
return self.version.files.all()[0]
def check_status(self, expected):
response = self.client.get(self.url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#versions-history .file-info div').text() == expected
def test_status_full(self):
self.get_file().update(status=amo.STATUS_AWAITING_REVIEW)
for status in [amo.STATUS_NOMINATED, amo.STATUS_APPROVED]:
self.addon.update(status=status)
self.check_status('Awaiting Review')
def test_status_full_reviewed(self):
self.get_file().update(status=amo.STATUS_APPROVED)
self.addon.update(status=amo.STATUS_APPROVED)
self.check_status('Approved')
class TestWhiteboard(ReviewBase):
@property
def addon_param(self):
return self.addon.pk if self.addon.is_deleted else self.addon.slug
def test_whiteboard_addition(self):
public_whiteboard_info = u'Public whiteboard info.'
private_whiteboard_info = u'Private whiteboard info.'
url = reverse(
'reviewers.whiteboard', args=['listed', self.addon_param])
self.client.login(email='regular@mozilla.com') # No permissions.
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
assert response.status_code == 403 # Not a reviewer.
self.login_as_reviewer()
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
self.assert3xx(response, reverse(
'reviewers.review', args=('listed', self.addon_param)))
whiteboard = self.addon.whiteboard.reload()
assert whiteboard.public == public_whiteboard_info
assert whiteboard.private == private_whiteboard_info
def test_whiteboard_addition_content_review(self):
public_whiteboard_info = 'Public whiteboard info for content.'
private_whiteboard_info = 'Private whiteboard info for content.'
url = reverse(
'reviewers.whiteboard', args=['content', self.addon_param])
self.client.login(email='regular@mozilla.com') # No permissions.
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
assert response.status_code == 403 # Not a reviewer.
self.login_as_reviewer()
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
self.assert3xx(response, reverse(
'reviewers.review', args=('content', self.addon_param)))
addon = self.addon.reload()
assert addon.whiteboard.public == public_whiteboard_info
assert addon.whiteboard.private == private_whiteboard_info
public_whiteboard_info = 'New content for public'
private_whiteboard_info = 'New content for private'
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.grant_permission(user, 'Addons:ContentReview')
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
self.assert3xx(response, reverse(
'reviewers.review', args=('content', self.addon_param)))
whiteboard = self.addon.whiteboard.reload()
assert whiteboard.public == public_whiteboard_info
assert whiteboard.private == private_whiteboard_info
def test_whiteboard_addition_unlisted_addon(self):
self.make_addon_unlisted(self.addon)
public_whiteboard_info = 'Public whiteboard info unlisted.'
private_whiteboard_info = 'Private whiteboard info unlisted.'
url = reverse(
'reviewers.whiteboard', args=['unlisted', self.addon_param])
self.client.login(email='regular@mozilla.com') # No permissions.
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
assert response.status_code == 403 # Not a reviewer.
self.login_as_reviewer()
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
# Not an unlisted reviewer, we'll get a 404 from the
# @reviewer_addon_view_factory decorator as it uses addon_view
# under the hood.
assert response.status_code == 404 # Not an unlisted reviewer.
# Now the addon is not purely unlisted, but because we've requested the
# unlisted channel we'll still get an error - this time it's a 403 from
# the view itself
version_factory(
addon=self.addon, version='9.99',
channel=amo.RELEASE_CHANNEL_LISTED)
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
assert response.status_code == 403
# Everything works once you have permission.
user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.grant_permission(user, 'Addons:ReviewUnlisted')
response = self.client.post(url, {
'whiteboard-private': private_whiteboard_info,
'whiteboard-public': public_whiteboard_info
})
self.assert3xx(response, reverse(
'reviewers.review', args=('unlisted', self.addon_param)))
whiteboard = self.addon.whiteboard.reload()
assert whiteboard.public == public_whiteboard_info
assert whiteboard.private == private_whiteboard_info
def test_delete_empty(self):
url = reverse(
'reviewers.whiteboard', args=['listed', self.addon_param])
response = self.client.post(url, {
'whiteboard-private': '',
'whiteboard-public': ''
})
self.assert3xx(response, reverse(
'reviewers.review', args=('listed', self.addon_param)))
assert not Whiteboard.objects.filter(pk=self.addon.pk)
class TestWhiteboardDeleted(TestWhiteboard):
def setUp(self):
super(TestWhiteboardDeleted, self).setUp()
self.addon.delete()
class TestLeaderboard(ReviewerTest):
fixtures = ['base/users']
def setUp(self):
super(TestLeaderboard, self).setUp()
self.url = reverse('reviewers.leaderboard')
self.user = UserProfile.objects.get(email='reviewer@mozilla.com')
self.login_as_reviewer()
core.set_user(self.user)
def _award_points(self, user, score):
ReviewerScore.objects.create(user=user, note_key=amo.REVIEWED_MANUAL,
score=score, note='Thing.')
def test_leaderboard_ranks(self):
other_reviewer = UserProfile.objects.create(
username='other_reviewer',
display_name='', # No display_name, will fall back on name.
email='other_reviewer@mozilla.com')
self.grant_permission(
other_reviewer, 'Addons:Review',
name='Reviewers: Add-ons' # The name of the group matters here.
)
users = (self.user,
UserProfile.objects.get(email='theme_reviewer@mozilla.com'),
other_reviewer)
self._award_points(users[0], amo.REVIEWED_LEVELS[0]['points'] - 1)
self._award_points(users[1], amo.REVIEWED_LEVELS[0]['points'] + 1)
self._award_points(users[2], amo.REVIEWED_LEVELS[0]['points'] + 2)
def get_cells():
doc = pq(self.client.get(self.url).content.decode('utf-8'))
cells = doc('#leaderboard > tbody > tr > .name, '
'#leaderboard > tbody > tr > .level')
return [cells.eq(i).text() for i in range(0, cells.length)]
assert get_cells() == (
[users[2].name,
users[1].name,
str(amo.REVIEWED_LEVELS[0]['name']),
users[0].name])
self._award_points(users[0], 1)
assert get_cells() == (
[users[2].name,
users[1].name,
users[0].name,
str(amo.REVIEWED_LEVELS[0]['name'])])
self._award_points(users[0], -1)
self._award_points(users[2], (amo.REVIEWED_LEVELS[1]['points'] -
amo.REVIEWED_LEVELS[0]['points']))
assert get_cells() == (
[users[2].name,
str(amo.REVIEWED_LEVELS[1]['name']),
users[1].name,
str(amo.REVIEWED_LEVELS[0]['name']),
users[0].name])
class TestXssOnAddonName(amo.tests.TestXss):
def test_reviewers_abuse_report_page(self):
url = reverse('reviewers.abuse_reports', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
def test_reviewers_review_page(self):
url = reverse('reviewers.review', args=[self.addon.slug])
self.assertNameAndNoXSS(url)
class TestPolicyView(ReviewerTest):
def setUp(self):
super(TestPolicyView, self).setUp()
self.addon = addon_factory()
self.eula_url = reverse('reviewers.eula', args=[self.addon.slug])
self.privacy_url = reverse('reviewers.privacy', args=[self.addon.slug])
self.login_as_reviewer()
self.review_url = reverse(
'reviewers.review', args=('listed', self.addon.slug,))
def test_eula(self):
assert not bool(self.addon.eula)
response = self.client.get(self.eula_url)
assert response.status_code == 404
self.addon.eula = u'Eulá!'
self.addon.save()
assert bool(self.addon.eula)
response = self.client.get(self.eula_url)
assert response.status_code == 200
self.assertContains(
response,
'{addon} – EULA'.format(addon=self.addon.name))
self.assertContains(response, u'End-User License Agreement')
self.assertContains(response, u'Eulá!')
self.assertContains(response, str(self.review_url))
def test_eula_with_channel(self):
self.make_addon_unlisted(self.addon)
unlisted_review_url = reverse(
'reviewers.review', args=('unlisted', self.addon.slug,))
self.addon.eula = u'Eulá!'
self.addon.save()
assert bool(self.addon.eula)
response = self.client.get(self.eula_url + '?channel=unlisted')
assert response.status_code == 404
user = UserProfile.objects.get(email='regular@mozilla.com')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.client.login(email=user.email)
response = self.client.get(self.eula_url + '?channel=unlisted')
assert response.status_code == 200
self.assertContains(response, u'Eulá!')
self.assertContains(response, str(unlisted_review_url))
def test_privacy(self):
assert not bool(self.addon.privacy_policy)
response = self.client.get(self.privacy_url)
assert response.status_code == 404
self.addon.privacy_policy = u'Prívacy Pólicy?'
self.addon.save()
assert bool(self.addon.privacy_policy)
response = self.client.get(self.privacy_url)
assert response.status_code == 200
self.assertContains(
response,
'{addon} – Privacy Policy'.format(addon=self.addon.name))
self.assertContains(response, 'Privacy Policy')
self.assertContains(response, u'Prívacy Pólicy?')
self.assertContains(response, str(self.review_url))
def test_privacy_with_channel(self):
self.make_addon_unlisted(self.addon)
unlisted_review_url = reverse(
'reviewers.review', args=('unlisted', self.addon.slug,))
self.addon.privacy_policy = u'Prívacy Pólicy?'
self.addon.save()
assert bool(self.addon.privacy_policy)
response = self.client.get(self.privacy_url + '?channel=unlisted')
assert response.status_code == 404
user = UserProfile.objects.get(email='regular@mozilla.com')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.client.login(email=user.email)
response = self.client.get(self.privacy_url + '?channel=unlisted')
assert response.status_code == 200
self.assertContains(response, u'Prívacy Pólicy?')
self.assertContains(response, str(unlisted_review_url))
class TestAddonReviewerViewSet(TestCase):
client_class = APITestClient
def setUp(self):
super(TestAddonReviewerViewSet, self).setUp()
self.user = user_factory()
self.addon = addon_factory()
self.subscribe_url_listed = reverse_ns(
'reviewers-addon-subscribe-listed', kwargs={'pk': self.addon.pk})
self.unsubscribe_url_listed = reverse_ns(
'reviewers-addon-unsubscribe-listed',
kwargs={'pk': self.addon.pk})
self.subscribe_url_unlisted = reverse_ns(
'reviewers-addon-subscribe-unlisted', kwargs={'pk': self.addon.pk})
self.unsubscribe_url_unlisted = reverse_ns(
'reviewers-addon-unsubscribe-unlisted',
kwargs={'pk': self.addon.pk})
self.enable_url = reverse_ns(
'reviewers-addon-enable', kwargs={'pk': self.addon.pk})
self.disable_url = reverse_ns(
'reviewers-addon-disable', kwargs={'pk': self.addon.pk})
self.flags_url = reverse_ns(
'reviewers-addon-flags', kwargs={'pk': self.addon.pk})
self.deny_resubmission_url = reverse_ns(
'reviewers-addon-deny-resubmission', kwargs={'pk': self.addon.pk})
self.allow_resubmission_url = reverse_ns(
'reviewers-addon-allow-resubmission', kwargs={'pk': self.addon.pk})
self.clear_pending_rejections_url = reverse_ns(
'reviewers-addon-clear-pending-rejections',
kwargs={'pk': self.addon.pk})
def test_subscribe_not_logged_in(self):
response = self.client.post(self.subscribe_url_listed)
assert response.status_code == 401
response = self.client.post(self.subscribe_url_unlisted)
assert response.status_code == 401
def test_subscribe_no_rights(self):
self.client.login_api(self.user)
response = self.client.post(self.subscribe_url_listed)
assert response.status_code == 403
response = self.client.post(self.subscribe_url_unlisted)
assert response.status_code == 403
def test_subscribe_addon_does_not_exist(self):
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
self.subscribe_url_listed = reverse_ns(
'reviewers-addon-subscribe-listed',
kwargs={'pk': self.addon.pk + 42})
response = self.client.post(self.subscribe_url_listed)
assert response.status_code == 404
def test_subscribe_already_subscribed_listed(self):
ReviewerSubscription.objects.create(
user=self.user, addon=self.addon,
channel=amo.RELEASE_CHANNEL_LISTED)
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
response = self.client.post(self.subscribe_url_listed)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 1
def test_subscribe_already_subscribed_unlisted(self):
ReviewerSubscription.objects.create(
user=self.user, addon=self.addon,
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
self.client.login_api(self.user)
response = self.client.post(self.subscribe_url_unlisted)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 1
def test_subscribe(self):
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
subscribe_url = reverse_ns(
'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk})
response = self.client.post(subscribe_url)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 1
def test_subscribe_listed(self):
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
response = self.client.post(self.subscribe_url_listed)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 1
def test_subscribe_unlisted(self):
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
self.client.login_api(self.user)
response = self.client.post(self.subscribe_url_unlisted)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 1
def test_unsubscribe_not_logged_in(self):
response = self.client.post(self.unsubscribe_url_listed)
assert response.status_code == 401
response = self.client.post(self.unsubscribe_url_unlisted)
assert response.status_code == 401
def test_unsubscribe_no_rights(self):
self.client.login_api(self.user)
response = self.client.post(self.unsubscribe_url_listed)
assert response.status_code == 403
response = self.client.post(self.unsubscribe_url_unlisted)
assert response.status_code == 403
def test_unsubscribe_addon_does_not_exist(self):
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
self.unsubscribe_url = reverse_ns(
'reviewers-addon-subscribe-listed',
kwargs={'pk': self.addon.pk + 42})
response = self.client.post(self.unsubscribe_url)
assert response.status_code == 404
def test_unsubscribe_not_subscribed(self):
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
response = self.client.post(self.unsubscribe_url_listed)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 0
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
self.client.login_api(self.user)
response = self.client.post(self.unsubscribe_url_unlisted)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 0
def test_unsubscribe(self):
ReviewerSubscription.objects.create(
user=self.user, addon=self.addon,
channel=amo.RELEASE_CHANNEL_LISTED)
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
unsubscribe_url = reverse_ns(
'reviewers-addon-unsubscribe', kwargs={'pk': self.addon.pk})
response = self.client.post(unsubscribe_url)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 0
def test_unsubscribe_listed(self):
ReviewerSubscription.objects.create(
user=self.user, addon=self.addon,
channel=amo.RELEASE_CHANNEL_LISTED)
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
response = self.client.post(self.unsubscribe_url_listed)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 0
def test_unsubscribe_unlisted(self):
ReviewerSubscription.objects.create(
user=self.user, addon=self.addon,
channel=amo.RELEASE_CHANNEL_UNLISTED)
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
self.client.login_api(self.user)
response = self.client.post(self.unsubscribe_url_unlisted)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 0
def test_unsubscribe_dont_touch_another(self):
another_user = user_factory()
another_addon = addon_factory()
ReviewerSubscription.objects.create(
user=self.user, addon=self.addon,
channel=amo.RELEASE_CHANNEL_LISTED)
ReviewerSubscription.objects.create(
user=self.user, addon=another_addon,
channel=amo.RELEASE_CHANNEL_LISTED)
ReviewerSubscription.objects.create(
user=another_user, addon=self.addon,
channel=amo.RELEASE_CHANNEL_LISTED)
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
response = self.client.post(self.unsubscribe_url_listed)
assert response.status_code == 202
assert ReviewerSubscription.objects.count() == 2
assert not ReviewerSubscription.objects.filter(
addon=self.addon, user=self.user).exists()
def test_enable_not_logged_in(self):
response = self.client.post(self.enable_url)
assert response.status_code == 401
def test_enable_no_rights(self):
self.client.login_api(self.user)
response = self.client.post(self.enable_url)
assert response.status_code == 403
# Being a reviewer is not enough.
self.grant_permission(self.user, 'Addons:Review')
response = self.client.post(self.enable_url)
assert response.status_code == 403
def test_enable_addon_does_not_exist(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.enable_url = reverse_ns(
'reviewers-addon-enable', kwargs={'pk': self.addon.pk + 42})
response = self.client.post(self.enable_url)
assert response.status_code == 404
def test_enable(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.post(self.enable_url)
assert response.status_code == 202
self.addon.reload()
assert self.addon.status == amo.STATUS_APPROVED
assert ActivityLog.objects.count() == 1
activity_log = ActivityLog.objects.latest('pk')
assert activity_log.action == amo.LOG.CHANGE_STATUS.id
assert activity_log.arguments[0] == self.addon
def test_enable_already_public(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
response = self.client.post(self.enable_url)
assert response.status_code == 202
self.addon.reload()
assert self.addon.status == amo.STATUS_APPROVED
assert ActivityLog.objects.count() == 1
activity_log = ActivityLog.objects.latest('pk')
assert activity_log.action == amo.LOG.CHANGE_STATUS.id
assert activity_log.arguments[0] == self.addon
def test_enable_no_public_versions_should_fall_back_to_incomplete(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.addon.update(status=amo.STATUS_DISABLED)
self.addon.versions.all().delete()
response = self.client.post(self.enable_url)
assert response.status_code == 202
self.addon.reload()
assert self.addon.status == amo.STATUS_NULL
def test_enable_version_is_awaiting_review_fall_back_to_nominated(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.addon.current_version.files.all().update(
status=amo.STATUS_AWAITING_REVIEW)
self.addon.update(status=amo.STATUS_DISABLED)
response = self.client.post(self.enable_url)
assert response.status_code == 202
self.addon.reload()
assert self.addon.status == amo.STATUS_NOMINATED
def test_disable_not_logged_in(self):
response = self.client.post(self.disable_url)
assert response.status_code == 401
def test_disable_no_rights(self):
self.client.login_api(self.user)
response = self.client.post(self.disable_url)
assert response.status_code == 403
# Being a reviewer is not enough.
self.grant_permission(self.user, 'Addons:Review')
response = self.client.post(self.disable_url)
assert response.status_code == 403
def test_disable_addon_does_not_exist(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.disable_url = reverse_ns(
'reviewers-addon-enable', kwargs={'pk': self.addon.pk + 42})
response = self.client.post(self.disable_url)
assert response.status_code == 404
def test_disable(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.addon.versions.all().delete()
response = self.client.post(self.disable_url)
assert response.status_code == 202
self.addon.reload()
assert self.addon.status == amo.STATUS_DISABLED
assert ActivityLog.objects.count() == 1
activity_log = ActivityLog.objects.latest('pk')
assert activity_log.action == amo.LOG.CHANGE_STATUS.id
assert activity_log.arguments[0] == self.addon
def test_patch_flags_not_logged_in(self):
response = self.client.patch(
self.flags_url, {'auto_approval_disabled': True})
assert response.status_code == 401
def test_patch_flags_no_permissions(self):
self.client.login_api(self.user)
response = self.client.patch(
self.flags_url, {'auto_approval_disabled': True})
assert response.status_code == 403
# Being a reviewer is not enough.
self.grant_permission(self.user, 'Addons:Review')
response = self.client.patch(
self.flags_url, {'auto_approval_disabled': True})
assert response.status_code == 403
def test_patch_flags_addon_does_not_exist(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.flags_url = reverse_ns(
'reviewers-addon-flags', kwargs={'pk': self.addon.pk + 42})
response = self.client.patch(
self.flags_url, {'auto_approval_disabled': True})
assert response.status_code == 404
def test_patch_flags_no_flags_yet_still_works_transparently(self):
assert not AddonReviewerFlags.objects.filter(addon=self.addon).exists()
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
response = self.client.patch(
self.flags_url, {'auto_approval_disabled': True})
assert response.status_code == 200
assert AddonReviewerFlags.objects.filter(addon=self.addon).exists()
reviewer_flags = AddonReviewerFlags.objects.get(addon=self.addon)
assert reviewer_flags.auto_approval_disabled
assert ActivityLog.objects.count() == 0
def test_patch_flags_change_everything(self):
AddonReviewerFlags.objects.create(
addon=self.addon,
auto_approval_disabled=True,
auto_approval_delayed_until=self.days_ago(42))
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
data = {
'auto_approval_disabled': False,
'auto_approval_disabled_until_next_approval': True,
'auto_approval_delayed_until': None,
'needs_admin_code_review': True,
'needs_admin_content_review': True,
'needs_admin_theme_review': True,
}
response = self.client.patch(self.flags_url, data)
assert response.status_code == 200
assert AddonReviewerFlags.objects.filter(addon=self.addon).exists()
reviewer_flags = AddonReviewerFlags.objects.get(addon=self.addon)
assert reviewer_flags.auto_approval_disabled is False
assert (
reviewer_flags.auto_approval_disabled_until_next_approval is True
)
assert reviewer_flags.auto_approval_delayed_until is None
assert reviewer_flags.needs_admin_code_review is True
assert reviewer_flags.needs_admin_content_review is True
assert reviewer_flags.needs_admin_theme_review is True
def test_deny_resubmission(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
assert DeniedGuid.objects.count() == 0
response = self.client.post(self.deny_resubmission_url)
assert response.status_code == 202
assert DeniedGuid.objects.count() == 1
def test_deny_resubmission_with_denied_guid(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.addon.deny_resubmission()
assert DeniedGuid.objects.count() == 1
response = self.client.post(self.deny_resubmission_url)
assert response.status_code == 409
assert DeniedGuid.objects.count() == 1
def test_allow_resubmission(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
self.addon.deny_resubmission()
assert DeniedGuid.objects.count() == 1
response = self.client.post(self.allow_resubmission_url)
assert response.status_code == 202
assert DeniedGuid.objects.count() == 0
def test_allow_resubmission_with_non_denied_guid(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
response = self.client.post(self.allow_resubmission_url)
assert response.status_code == 409
assert DeniedGuid.objects.count() == 0
def test_clear_pending_rejections(self):
self.grant_permission(self.user, 'Reviews:Admin')
self.client.login_api(self.user)
version_factory(addon=self.addon)
for version in self.addon.versions.all():
VersionReviewerFlags.objects.create(
version=version,
pending_rejection=datetime.now() + timedelta(days=7))
response = self.client.post(self.clear_pending_rejections_url)
assert response.status_code == 202
assert not VersionReviewerFlags.objects.filter(
version__addon=self.addon,
pending_rejection__isnull=False).exists()
class TestAddonReviewerViewSetJsonValidation(TestCase):
client_class = APITestClient
fixtures = ['devhub/addon-validation-1']
def setUp(self):
super(TestAddonReviewerViewSetJsonValidation, self).setUp()
self.user = user_factory()
file_validation = FileValidation.objects.get(pk=1)
self.file = file_validation.file
self.addon = self.file.version.addon
self.url = reverse_ns(
'reviewers-addon-json-file-validation',
kwargs={
'pk': self.addon.pk,
'file_id': self.file.pk
})
def test_reviewer_can_see_json_results(self):
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
assert self.client.get(self.url).status_code == 200
def test_deleted_addon(self):
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
self.addon.delete()
assert self.client.get(self.url).status_code == 200
def test_unlisted_reviewer_can_see_results_for_unlisted(self):
self.grant_permission(self.user, 'Addons:ReviewUnlisted')
self.client.login_api(self.user)
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 200
def test_non_reviewer_cannot_see_json_results(self):
self.client.login_api(self.user)
assert self.client.get(self.url).status_code == 403
@mock.patch.object(acl, 'is_reviewer', lambda request, addon: False)
def test_wrong_type_of_reviewer_cannot_see_json_results(self):
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
assert self.client.get(self.url).status_code == 403
def test_non_unlisted_reviewer_cannot_see_results_for_unlisted(self):
self.grant_permission(self.user, 'Addons:Review')
self.client.login_api(self.user)
self.make_addon_unlisted(self.addon)
assert self.client.get(self.url).status_code == 403
class AddonReviewerViewSetPermissionMixin(object):
__test__ = False
def test_disabled_version_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url()
def test_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 403
def test_disabled_version_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url()
def test_disabled_version_user(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
response = self.client.get(self.url)
assert response.status_code == 403
def test_deleted_version_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.version.delete()
response = self.client.get(self.url)
assert response.status_code == 404
def test_deleted_version_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login_api(user)
self.version.delete()
self._test_url()
def test_deleted_version_user(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
self.version.delete()
response = self.client.get(self.url)
assert response.status_code == 404
def test_unlisted_version_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 404
def test_unlisted_version_unlisted_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.client.login_api(user)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self._test_url()
def test_unlisted_version_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login_api(user)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self._test_url()
def test_unlisted_version_user(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 404
class TestReviewAddonVersionViewSetDetail(
TestCase, AddonReviewerViewSetPermissionMixin):
client_class = APITestClient
__test__ = True
def setUp(self):
super(TestReviewAddonVersionViewSetDetail, self).setUp()
# TODO: Most of the initial setup could be moved to
# setUpTestData but unfortunately paths are setup in pytest via a
# regular autouse fixture that has function-scope so functions in
# setUpTestData doesn't use proper paths (cgrebs)
self.addon = addon_factory(
name=u'My Addôn', slug='my-addon',
file_kw={'filename': 'webextension_no_id.xpi',
'is_webextension': True})
extract_version_to_git(self.addon.current_version.pk)
self.version = self.addon.current_version
self.version.refresh_from_db()
self._set_tested_url()
def _test_url(self):
response = self.client.get(self.url)
assert response.status_code == 200
result = json.loads(response.content)
assert result['id'] == self.version.pk
assert result['file']['id'] == self.version.current_file.pk
# part of manifest.json
assert '"name": "Beastify"' in result['file']['content']
def _set_tested_url(self):
self.url = reverse_ns('reviewers-versions-detail', kwargs={
'addon_pk': self.addon.pk,
'pk': self.version.pk})
def test_anonymous(self):
response = self.client.get(self.url)
assert response.status_code == 401
def test_requested_file(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
with self.assertNumQueries(10):
# - 2 savepoints because tests
# - 2 user and groups
# - 2 add-on and translations
# - 1 add-on author check
# - 1 version
# - 2 file and file validation
response = self.client.get(self.url + '?file=README.md&lang=en-US')
assert response.status_code == 200
result = json.loads(response.content)
assert result['addon']['name'] == str(self.addon.name)
assert result['file']['content'] == '# beastify\n'
assert result['file_entries'] is not None
# make sure the correct download url is correctly generated
assert result['file']['download_url'] == absolutify(reverse(
'reviewers.download_git_file',
kwargs={
'version_id': self.version.pk,
'filename': 'README.md'
}
))
def test_non_existent_requested_file_returns_404(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
response = self.client.get(self.url + '?file=UNKNOWN_FILE')
assert response.status_code == 404
def test_requested_file_contains_whitespace(self):
new_version = version_factory(
addon=self.addon, file_kw={'filename': 'webextension_no_id.xpi',
'is_webextension': True})
repo = AddonGitRepository.extract_and_commit_from_version(new_version)
apply_changes(
repo, new_version, '(function() {})\n', 'content script.js')
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
url = reverse_ns('reviewers-versions-detail', kwargs={
'addon_pk': self.addon.pk,
'pk': new_version.pk})
response = self.client.get(url + '?file=content script.js')
assert response.status_code == 200
result = json.loads(response.content)
assert result['file']['content'] == '(function() {})\n'
# make sure the correct download url is correctly generated
assert result['file']['download_url'] == absolutify(reverse(
'reviewers.download_git_file',
kwargs={
'version_id': new_version.pk,
'filename': 'content script.js'
}
))
def test_version_get_not_found(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.url = reverse_ns('reviewers-versions-detail', kwargs={
'addon_pk': self.addon.pk,
'pk': self.version.current_file.pk + 42})
response = self.client.get(self.url)
assert response.status_code == 404
def test_mixed_channel_only_listed_without_unlisted_perm(self):
user = UserProfile.objects.create(username='admin')
# User doesn't have ReviewUnlisted permission
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
# Add an unlisted version to the mix
unlisted_version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
# Now the add-on has both, listed and unlisted versions
# but only reviewers with Addons:ReviewUnlisted are able
# to see them
url = reverse_ns('reviewers-versions-detail', kwargs={
'addon_pk': self.addon.pk,
'pk': self.version.pk})
response = self.client.get(url)
assert response.status_code == 200
url = reverse_ns('reviewers-versions-detail', kwargs={
'addon_pk': self.addon.pk,
'pk': unlisted_version.pk})
response = self.client.get(url)
assert response.status_code == 404
def test_file_only_requested_file(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
with self.assertNumQueries(10):
# - 2 savepoints because tests
# - 2 user and groups
# - 2 add-on and translations
# - 1 add-on author check
# - 1 version
# - 2 file and file validation
response = self.client.get(
self.url + '?file=README.md&lang=en-US&file_only=true')
assert response.status_code == 200
result = json.loads(response.content)
assert result['id'] == self.version.pk
assert result['file']['content'] == '# beastify\n'
# make sure the correct download url is correctly generated
assert result['file']['download_url'] == absolutify(reverse(
'reviewers.download_git_file',
kwargs={
'version_id': self.version.pk,
'filename': 'README.md'
}
))
# make sure we only returned `id` and `file` properties
assert len(result.keys()) == 2
def test_file_only_false(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
response = self.client.get(
self.url + '?file=README.md&lang=en-US&file_only=false')
result = json.loads(response.content)
assert result['id'] == self.version.pk
assert result['file']['content'] == '# beastify\n'
# make sure we returned more than just the `id` and `file` properties
assert len(result.keys()) > 2
def test_deleted_addon(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.grant_permission(user, 'Addons:ViewDeleted')
self.client.login_api(user)
self.addon.delete()
response = self.client.get(self.url)
assert response.status_code == 200
result = json.loads(response.content)
assert result['id'] == self.version.pk
class TestReviewAddonVersionViewSetList(TestCase):
client_class = APITestClient
def setUp(self):
super(TestReviewAddonVersionViewSetList, self).setUp()
self.addon = addon_factory(
name=u'My Addôn', slug='my-addon',
file_kw={'filename': 'webextension_no_id.xpi',
'is_webextension': True})
extract_version_to_git(self.addon.current_version.pk)
self.version = self.addon.current_version
self.version.refresh_from_db()
self._set_tested_url()
def _test_url(self):
response = self.client.get(self.url)
assert response.status_code == 200
result = json.loads(response.content)
assert result == [{
'version': self.version.version,
'id': self.version.id,
'channel': u'listed',
}]
def _set_tested_url(self):
self.url = reverse_ns('reviewers-versions-list', kwargs={
'addon_pk': self.addon.pk})
def test_anonymous(self):
response = self.client.get(self.url)
assert response.status_code == 401
def test_permissions_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self._test_url()
def test_permissions_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 403
def test_permissions_disabled_version_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url()
def test_permissions_disabled_version_user(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
response = self.client.get(self.url)
assert response.status_code == 403
def test_show_only_listed_without_unlisted_permission(self):
user = UserProfile.objects.create(username='admin')
# User doesn't have ReviewUnlisted permission
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
response = self.client.get(self.url)
assert response.status_code == 200
result = json.loads(response.content)
assert result == [
{
'version': self.version.version,
'id': self.version.id,
'channel': u'listed'
},
]
def test_show_listed_and_unlisted_with_permissions(self):
user = UserProfile.objects.create(username='admin')
# User doesn't have Review permission
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.client.login_api(user)
unlisted_version = version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
with self.assertNumQueries(8):
# - 2 savepoints because of tests
# - 2 user and groups
# - 1 add-on
# - 1 add-on translations (not needed, could be avoided, but we
# currently re-use the same get_addon_object() implementation
# for other APIs where we do need the add-on name)
# - 1 versions exists to figure out if add-on is listed
# - 1 versions
response = self.client.get(self.url)
assert response.status_code == 200
result = json.loads(response.content)
assert result == [
{
'version': unlisted_version.version,
'id': unlisted_version.id,
'channel': u'unlisted'
},
{
'version': self.version.version,
'id': self.version.id,
'channel': u'listed'
},
]
class TestDraftCommentViewSet(TestCase):
client_class = APITestClient
def setUp(self):
super().setUp()
self.addon = addon_factory(
name=u'My Addôn', slug='my-addon',
file_kw={'filename': 'webextension_no_id.xpi',
'is_webextension': True})
extract_version_to_git(self.addon.current_version.pk)
self.version = self.addon.current_version
self.version.refresh_from_db()
def test_create_and_retrieve(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
data = {
'comment': 'Some really fancy comment',
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
response = self.client.post(url, data)
comment_id = response.json()['id']
assert response.status_code == 201
response = self.client.post(url, data)
assert response.status_code == 201
assert DraftComment.objects.count() == 2
response = self.client.get(url)
request = APIRequestFactory().get('/')
request.user = user
assert response.json()['count'] == 2
assert response.json()['results'][0] == {
'id': comment_id,
'filename': 'manifest.json',
'lineno': 20,
'comment': 'Some really fancy comment',
'canned_response': None,
'version_id': self.version.pk,
'user': json.loads(json.dumps(
BaseUserSerializer(
user, context={'request': request}).data,
cls=amo.utils.AMOJSONEncoder))
}
def test_list_queries(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
DraftComment.objects.create(
version=self.version, comment='test1', user=user,
lineno=0, filename='manifest.json')
DraftComment.objects.create(
version=self.version, comment='test2', user=user,
lineno=1, filename='manifest.json')
DraftComment.objects.create(
version=self.version, comment='test3', user=user,
lineno=2, filename='manifest.json')
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
with self.assertNumQueries(9):
# - 2 savepoints because of tests
# - 2 user and groups
# - 2 addon and translations
# - 1 version
# - 1 count
# - 1 drafts
response = self.client.get(url, {'lang': 'en-US'})
assert response.json()['count'] == 3
def test_create_retrieve_and_update(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
data = {
'comment': 'Some really fancy comment',
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
response = self.client.post(url, data)
assert response.status_code == 201
comment = DraftComment.objects.first()
response = self.client.get(url)
assert response.json()['count'] == 1
assert (
response.json()['results'][0]['comment'] ==
'Some really fancy comment')
url = reverse_ns('reviewers-versions-draft-comment-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
'pk': comment.pk
})
response = self.client.patch(url, {
'comment': 'Updated comment!'
})
assert response.status_code == 200
response = self.client.get(url)
assert response.json()['comment'] == 'Updated comment!'
assert response.json()['lineno'] == 20
response = self.client.patch(url, {
'lineno': 18
})
assert response.status_code == 200
response = self.client.get(url)
assert response.json()['lineno'] == 18
# Patch two fields at the same time
response = self.client.patch(url, {
'lineno': 16,
'filename': 'new_manifest.json'
})
assert response.status_code == 200
response = self.client.get(url)
assert response.json()['lineno'] == 16
assert response.json()['filename'] == 'new_manifest.json'
def test_draft_optional_fields(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
data = {
'comment': 'Some really fancy comment',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
response = self.client.post(url, data)
comment_id = response.json()['id']
assert response.status_code == 201
url = reverse_ns('reviewers-versions-draft-comment-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
'pk': comment_id
})
response = self.client.get(url)
assert response.json()['comment'] == 'Some really fancy comment'
assert response.json()['lineno'] is None
assert response.json()['filename'] is None
assert response.json()['canned_response'] is None
def test_delete(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
comment = DraftComment.objects.create(
version=self.version, comment='test', user=user,
lineno=0, filename='manifest.json')
url = reverse_ns('reviewers-versions-draft-comment-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
'pk': comment.pk
})
response = self.client.delete(url)
assert response.status_code == 204
assert DraftComment.objects.first() is None
def test_canned_response_and_comment_not_together(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
canned_response = CannedResponse.objects.create(
name=u'Terms of services',
response=u'doesn\'t regard our terms of services',
category=amo.CANNED_RESPONSE_CATEGORY_OTHER,
type=amo.CANNED_RESPONSE_TYPE_ADDON)
data = {
'comment': 'Some really fancy comment',
'canned_response': canned_response.pk,
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
})
response = self.client.post(url, data)
assert response.status_code == 400
assert (
str(response.data['comment'][0]) ==
"You can't submit a comment if `canned_response` is defined.")
def test_doesnt_allow_empty_comment(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
data = {
'comment': '',
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
})
response = self.client.post(url, data)
assert response.status_code == 400
assert (
str(response.data['comment'][0]) ==
"You can't submit an empty comment.")
def test_disallow_lineno_without_filename(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
data = {
'comment': 'Some really fancy comment',
'lineno': 20,
'filename': None,
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
})
response = self.client.post(url, data)
assert response.status_code == 400
assert (
str(response.data['comment'][0]) ==
'You can\'t submit a line number without associating it to a '
'filename.')
def test_allows_explicit_canned_response_null(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
data = {
'comment': 'Some random comment',
'canned_response': None,
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
})
response = self.client.post(url, data)
assert response.status_code == 201
def test_canned_response(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
canned_response = CannedResponse.objects.create(
name=u'Terms of services',
response=u'doesn\'t regard our terms of services',
category=amo.CANNED_RESPONSE_CATEGORY_OTHER,
type=amo.CANNED_RESPONSE_TYPE_ADDON)
data = {
'canned_response': canned_response.pk,
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
})
response = self.client.post(url, data)
comment_id = response.json()['id']
assert response.status_code == 201
assert DraftComment.objects.count() == 1
response = self.client.get(url)
request = APIRequestFactory().get('/')
request.user = user
assert response.json()['count'] == 1
assert response.json()['results'][0] == {
'id': comment_id,
'filename': 'manifest.json',
'lineno': 20,
'comment': '',
'canned_response': json.loads(json.dumps(
CannedResponseSerializer(canned_response).data,
cls=amo.utils.AMOJSONEncoder)),
'version_id': self.version.id,
'user': json.loads(json.dumps(
BaseUserSerializer(
user, context={'request': request}).data,
cls=amo.utils.AMOJSONEncoder))
}
def test_delete_not_comment_owner(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
comment = DraftComment.objects.create(
version=self.version, comment='test', user=user,
lineno=0, filename='manifest.json')
# Let's login as someone else who is also a reviewer
other_reviewer = user_factory(username='reviewer2')
# Let's give the user admin permissions which doesn't help
self.grant_permission(other_reviewer, '*:*')
self.client.login_api(other_reviewer)
url = reverse_ns('reviewers-versions-draft-comment-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
'pk': comment.pk
})
response = self.client.delete(url)
assert response.status_code == 404
def test_disabled_version_user(self):
user = user_factory(username='simpleuser')
self.client.login_api(user)
self.version.files.update(status=amo.STATUS_DISABLED)
data = {
'comment': 'Some really fancy comment',
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
response = self.client.post(url, data)
assert response.status_code == 403
def test_deleted_version_reviewer(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.version.delete()
data = {
'comment': 'Some really fancy comment',
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
response = self.client.post(url, data)
assert response.status_code == 404
def test_deleted_version_author(self):
user = user_factory(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
self.version.delete()
data = {
'comment': 'Some really fancy comment',
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
response = self.client.post(url, data)
assert response.status_code == 404
def test_deleted_version_reviewer_who_can_view_deleted_versions(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.grant_permission(user, 'Addons:ViewDeleted')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login_api(user)
self.version.delete()
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
response = self.client.get(url)
assert response.status_code == 200
data = {
'comment': 'Some really fancy comment',
'lineno': 20,
'filename': 'manifest.json',
}
response = self.client.post(url, data)
assert response.status_code == 201
assert DraftComment.objects.count() == 1
def test_deleted_version_user(self):
user = user_factory(username='simpleuser')
self.client.login_api(user)
self.version.delete()
data = {
'comment': 'Some really fancy comment',
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
response = self.client.post(url, data)
assert response.status_code == 404
def test_unlisted_version_reviewer(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
data = {
'comment': 'Some really fancy comment',
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
response = self.client.post(url, data)
assert response.status_code == 403
def test_unlisted_version_user(self):
user = user_factory(username='simpleuser')
self.client.login_api(user)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
data = {
'comment': 'Some really fancy comment',
'lineno': 20,
'filename': 'manifest.json',
}
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
response = self.client.post(url, data)
assert response.status_code == 403
def test_not_reviewer_or_admin(self):
reviewer_user = user_factory(username='reviewer')
self.grant_permission(reviewer_user, 'Addons:Review')
# Create a comment from a reviewer.
comment = DraftComment.objects.create(
version=self.version, comment='test1', user=reviewer_user,
lineno=0, filename='manifest.json')
user = user_factory(username='simpleuser')
self.client.login_api(user)
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
# Should not be able to retrieve comments.
response = self.client.get(url)
assert response.status_code == 403
# Should not be able to add comments.
data = {
'comment': 'Some really fancy comment',
'lineno': 20,
'filename': 'manifest.json',
}
response = self.client.post(url, data)
assert response.status_code == 403
# Should not be able to edit comments.
url = reverse_ns('reviewers-versions-draft-comment-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
'pk': comment.pk
})
response = self.client.patch(url, {
'comment': 'Updated comment!'
})
assert response.status_code == 403
# Should not be able to delete comments.
response = self.client.delete(url)
assert response.status_code == 403
def test_deleted_addon(self):
user = user_factory(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.grant_permission(user, 'Addons:ViewDeleted')
self.client.login_api(user)
DraftComment.objects.create(
version=self.version, comment='test', user=user,
lineno=0, filename='manifest.json')
url = reverse_ns('reviewers-versions-draft-comment-list', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk
})
self.addon.delete()
response = self.client.get(url)
assert response.status_code == 200
assert response.json()['count'] == 1
class TestReviewAddonVersionCompareViewSet(
TestCase, AddonReviewerViewSetPermissionMixin):
client_class = APITestClient
__test__ = True
def setUp(self):
super(TestReviewAddonVersionCompareViewSet, self).setUp()
self.addon = addon_factory(
name=u'My Addôn', slug='my-addon',
file_kw={'filename': 'webextension_no_id.xpi',
'is_webextension': True})
extract_version_to_git(self.addon.current_version.pk)
self.version = self.addon.current_version
self.version.refresh_from_db()
# Default to initial commit for simplicity
self.compare_to_version = self.version
self._set_tested_url()
def _test_url(self):
response = self.client.get(self.url)
assert response.status_code == 200
result = json.loads(response.content)
assert result['id'] == self.version.pk
assert result['file']['id'] == self.version.current_file.pk
assert result['file']['diff']['path'] == 'manifest.json'
change = result['file']['diff']['hunks'][0]['changes'][3]
assert '"name": "Beastify"' in change['content']
assert change['type'] == 'insert'
def _set_tested_url(self):
self.url = reverse_ns('reviewers-versions-compare-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
'pk': self.compare_to_version.pk})
def test_anonymous(self):
response = self.client.get(self.url)
assert response.status_code == 401
def test_requested_file(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
response = self.client.get(self.url + '?file=README.md')
assert response.status_code == 200
result = json.loads(response.content)
assert result['file']['diff']['path'] == 'README.md'
change = result['file']['diff']['hunks'][0]['changes'][0]
assert change['content'] == '# beastify'
assert change['type'] == 'insert'
def test_requested_file_contains_whitespace(self):
new_version = version_factory(
addon=self.addon, file_kw={'filename': 'webextension_no_id.xpi',
'is_webextension': True})
repo = AddonGitRepository.extract_and_commit_from_version(new_version)
apply_changes(
repo, new_version, '(function() {})\n', 'content script.js')
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
url = reverse_ns('reviewers-versions-compare-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
'pk': new_version.pk})
response = self.client.get(url + '?file=content script.js')
assert response.status_code == 200
result = json.loads(response.content)
change = result['file']['diff']['hunks'][0]['changes'][0]
assert result['file']['diff']['path'] == 'content script.js'
assert change['content'] == '(function() {})'
assert change['type'] == 'insert'
def test_version_get_not_found(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.url = reverse_ns('reviewers-versions-compare-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk + 42,
'pk': self.compare_to_version.pk})
response = self.client.get(self.url)
assert response.status_code == 404
def test_compare_basic(self):
new_version = version_factory(
addon=self.addon, file_kw={'filename': 'webextension_no_id.xpi',
'is_webextension': True})
repo = AddonGitRepository.extract_and_commit_from_version(new_version)
apply_changes(repo, new_version, '{"id": "random"}\n', 'manifest.json')
apply_changes(repo, new_version, 'Updated readme\n', 'README.md')
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.url = reverse_ns('reviewers-versions-compare-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
'pk': new_version.pk})
with self.assertNumQueries(10):
# - 2 savepoints because of tests
# - 2 user and groups
# - 2 add-on and translations
# - 1 add-on author check
# - 1 all versions
# - 1 all files
# - 1 all file validation
response = self.client.get(self.url + '?file=README.md&lang=en-US')
assert response.status_code == 200
result = json.loads(response.content)
assert result['addon']['name'] == str(self.addon.name)
assert result['file']['diff']['path'] == 'README.md'
assert result['file']['diff']['hunks'][0]['changes'] == [
{
'content': '# beastify',
'new_line_number': -1,
'old_line_number': 1,
'type': 'delete'
},
{
'content': 'Updated readme',
'new_line_number': 1,
'old_line_number': -1,
'type': 'insert'
}
]
assert result['file_entries'] is not None
def test_compare_with_deleted_file(self):
new_version = version_factory(
addon=self.addon, file_kw={'filename': 'webextension_no_id.xpi',
'is_webextension': True})
repo = AddonGitRepository.extract_and_commit_from_version(new_version)
deleted_file = 'README.md'
apply_changes(repo, new_version, '', deleted_file, delete=True)
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.url = reverse_ns('reviewers-versions-compare-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
'pk': new_version.pk})
response = self.client.get(self.url + '?file=' + deleted_file)
assert response.status_code == 200
result = json.loads(response.content)
assert result['file']['download_url'] is None
def test_dont_servererror_on_binary_file(self):
"""Regression test for
https://github.com/mozilla/addons-server/issues/11712"""
new_version = version_factory(
addon=self.addon, file_kw={
'filename': 'webextension_no_id.xpi',
'is_webextension': True,
}
)
repo = AddonGitRepository.extract_and_commit_from_version(new_version)
apply_changes(repo, new_version, EMPTY_PNG, 'foo.png')
next_version = version_factory(
addon=self.addon, file_kw={
'filename': 'webextension_no_id.xpi',
'is_webextension': True,
}
)
repo = AddonGitRepository.extract_and_commit_from_version(next_version)
apply_changes(repo, next_version, EMPTY_PNG, 'foo.png')
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self.url = reverse_ns('reviewers-versions-compare-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': new_version.pk,
'pk': next_version.pk})
response = self.client.get(self.url + '?file=foo.png')
assert response.status_code == 200
result = json.loads(response.content)
assert result['file']['download_url']
def test_compare_with_deleted_version(self):
new_version = version_factory(
addon=self.addon, file_kw={'filename': 'webextension_no_id.xpi',
'is_webextension': True})
# We need to run extraction first and delete afterwards, otherwise
# we'll end up with errors because files don't exist anymore.
AddonGitRepository.extract_and_commit_from_version(new_version)
new_version.delete()
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
# A reviewer needs the `Addons:ViewDeleted` permission to view and
# compare deleted versions
self.grant_permission(user, 'Addons:ViewDeleted')
self.client.login_api(user)
self.url = reverse_ns('reviewers-versions-compare-detail', kwargs={
'addon_pk': self.addon.pk,
'version_pk': self.version.pk,
'pk': new_version.pk})
response = self.client.get(self.url)
assert response.status_code == 200
result = json.loads(response.content)
assert result['file']['download_url']
def test_file_only_requested_file(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
response = self.client.get(self.url + '?file=README.md&file_only=true')
assert response.status_code == 200
result = json.loads(response.content)
assert result['id'] == self.version.pk
assert result['file']['diff']['path'] == 'README.md'
change = result['file']['diff']['hunks'][0]['changes'][0]
assert change['content'] == '# beastify'
assert change['type'] == 'insert'
# make sure we only returned `id` and `file` properties
assert len(result.keys()) == 2
def test_file_only_false(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
response = self.client.get(
self.url + '?file=README.md&file_only=false')
assert response.status_code == 200
result = json.loads(response.content)
assert result['id'] == self.version.pk
# make sure we returned more than just the `id` and `file` properties
assert len(result.keys()) > 2
class TestDownloadGitFileView(TestCase):
def setUp(self):
super(TestDownloadGitFileView, self).setUp()
self.addon = addon_factory(
name=u'My Addôn', slug='my-addon',
file_kw={'filename': 'webextension_no_id.xpi',
'is_webextension': True})
extract_version_to_git(self.addon.current_version.pk)
self.version = self.addon.current_version
self.version.refresh_from_db()
def test_download_basic(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login(email=user.email)
url = reverse('reviewers.download_git_file', kwargs={
'version_id': self.version.pk,
'filename': 'manifest.json'
})
response = self.client.get(url)
assert response.status_code == 200
assert (
response['Content-Disposition'] ==
'attachment; filename="manifest.json"')
content = response.content.decode('utf-8')
assert content.startswith('{')
assert '"manifest_version": 2' in content
@override_settings(CSP_REPORT_ONLY=False)
def test_download_respects_csp(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login(email=user.email)
url = reverse('reviewers.download_git_file', kwargs={
'version_id': self.version.pk,
'filename': 'manifest.json'
})
response = self.client.get(url)
assert response.status_code == 200
# Make sure a default-src is set.
assert "default-src 'none'" in response['content-security-policy']
# Make sure things are as locked down as possible,
# as per https://bugzilla.mozilla.org/show_bug.cgi?id=1566954
assert "object-src 'none'" in response['content-security-policy']
assert "base-uri 'none'" in response['content-security-policy']
assert "form-action 'none'" in response['content-security-policy']
assert "frame-ancestors 'none'" in response['content-security-policy']
# The report-uri should be set.
assert "report-uri" in response['content-security-policy']
# Other properties that we defined by default aren't set
assert "style-src" not in response['content-security-policy']
assert "font-src" not in response['content-security-policy']
assert "frame-src" not in response['content-security-policy']
assert "child-src" not in response['content-security-policy']
def test_download_emoji_filename(self):
new_version = version_factory(
addon=self.addon, file_kw={'filename': 'webextension_no_id.xpi',
'is_webextension': True})
repo = AddonGitRepository.extract_and_commit_from_version(new_version)
apply_changes(repo, new_version, u'\n', u'😀❤.txt')
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login(email=user.email)
url = reverse('reviewers.download_git_file', kwargs={
'version_id': new_version.pk,
'filename': u'😀❤.txt'
})
response = self.client.get(url)
assert response.status_code == 200
assert (
response['Content-Disposition'] ==
"attachment; filename*=utf-8''%F0%9F%98%80%E2%9D%A4.txt")
def test_download_notfound(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login(email=user.email)
url = reverse('reviewers.download_git_file', kwargs={
'version_id': self.version.pk,
'filename': 'doesnotexist.json'
})
response = self.client.get(url)
assert response.status_code == 404
def _test_url_success(self):
url = reverse('reviewers.download_git_file', kwargs={
'version_id': self.version.pk,
'filename': 'manifest.json'
})
response = self.client.get(url)
assert response.status_code == 200
content = response.content.decode('utf-8')
assert content.startswith('{')
assert '"manifest_version": 2' in content
def test_disabled_version_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login(email=user.email)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url_success()
def test_disabled_version_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login(email=user.email)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url_success()
def test_disabled_version_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login(email=user.email)
self.version.files.update(status=amo.STATUS_DISABLED)
self._test_url_success()
def test_disabled_version_user_but_not_author(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login(email=user.email)
self.version.files.update(status=amo.STATUS_DISABLED)
url = reverse('reviewers.download_git_file', kwargs={
'version_id': self.version.pk,
'filename': 'manifest.json'
})
response = self.client.get(url)
assert response.status_code == 403
def test_unlisted_version_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login(email=user.email)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
url = reverse('reviewers.download_git_file', kwargs={
'version_id': self.version.pk,
'filename': 'manifest.json'
})
response = self.client.get(url)
assert response.status_code == 404
def test_unlisted_version_unlisted_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.client.login(email=user.email)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self._test_url_success()
def test_unlisted_version_author(self):
user = UserProfile.objects.create(username='author')
AddonUser.objects.create(user=user, addon=self.addon)
self.client.login(email=user.email)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self._test_url_success()
def test_unlisted_version_admin(self):
user = UserProfile.objects.create(username='admin')
self.grant_permission(user, '*:*')
self.client.login(email=user.email)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
self._test_url_success()
def test_unlisted_version_user_but_not_author(self):
user = UserProfile.objects.create(username='simpleuser')
self.client.login(email=user.email)
self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED)
url = reverse('reviewers.download_git_file', kwargs={
'version_id': self.version.pk,
'filename': 'manifest.json'
})
response = self.client.get(url)
assert response.status_code == 404
class TestCannedResponseViewSet(TestCase):
client_class = APITestClient
def setUp(self):
super(TestCannedResponseViewSet, self).setUp()
self.canned_response = CannedResponse.objects.create(
name=u'Terms of services',
response=u'doesn\'t regard our terms of services',
category=amo.CANNED_RESPONSE_CATEGORY_OTHER,
type=amo.CANNED_RESPONSE_TYPE_ADDON)
self.url = reverse_ns('reviewers-canned-response-list')
def _test_url(self):
response = self.client.get(self.url)
assert response.status_code == 200
result = json.loads(response.content)
category = self.canned_response.category
assert result == [{
'id': self.canned_response.id,
'title': self.canned_response.name,
'response': self.canned_response.response,
'category': amo.CANNED_RESPONSE_CATEGORY_CHOICES[category],
}]
def test_anonymous(self):
response = self.client.get(self.url)
assert response.status_code == 401
def test_permissions_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:Review')
self.client.login_api(user)
self._test_url()
def test_permissions_authenticated_but_not_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.client.login_api(user)
response = self.client.get(self.url)
assert response.status_code == 403
def test_admin(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, '*:*')
self.client.login_api(user)
self._test_url()
def test_unlisted_reviewer(self):
user = UserProfile.objects.create(username='reviewer')
self.grant_permission(user, 'Addons:ReviewUnlisted')
self.client.login_api(user)
self._test_url()
class TestThemeBackgroundImages(ReviewBase):
def setUp(self):
super(TestThemeBackgroundImages, self).setUp()
self.url = reverse(
'reviewers.theme_background_images',
args=[self.addon.current_version.id])
def test_not_reviewer(self):
user_factory(email='irregular@mozilla.com')
assert self.client.login(email='irregular@mozilla.com')
response = self.client.post(self.url, follow=True)
assert response.status_code == 403
def test_no_header_image(self):
response = self.client.post(self.url, follow=True)
assert response.status_code == 200
data = json.loads(response.content)
assert data == {}
def test_header_images(self):
destination = self.addon.current_version.all_files[0].current_file_path
zip_file = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/static_theme_tiled.zip')
copy_stored_file(zip_file, destination)
response = self.client.post(self.url, follow=True)
assert response.status_code == 200
data = json.loads(response.content)
assert data
assert len(data.items()) == 3
assert 'empty.png' in data
assert len(data['empty.png']) == 444 # base64-encoded size
assert 'weta_for_tiling.png' in data
assert len(data['weta_for_tiling.png']) == 124496 # b64-encoded size
assert 'transparent.gif' in data
assert len(data['transparent.gif']) == 56 # base64-encoded size
class TestMadQueue(QueueTest):
fixtures = ['base/users']
def setUp(self):
super().setUp()
self.url = reverse('reviewers.queue_mad')
# This add-on should be listed once, even with two versions.
listed_addon = addon_factory(created=self.days_ago(15))
VersionReviewerFlags.objects.create(
version=version_factory(addon=listed_addon,
channel=amo.RELEASE_CHANNEL_LISTED),
needs_human_review_by_mad=True
)
VersionReviewerFlags.objects.create(
version=version_factory(addon=listed_addon,
channel=amo.RELEASE_CHANNEL_LISTED),
needs_human_review_by_mad=True
)
# This add-on should be listed once, even with two versions.
unlisted_addon = addon_factory(created=self.days_ago(5))
VersionReviewerFlags.objects.create(
version=version_factory(addon=unlisted_addon,
channel=amo.RELEASE_CHANNEL_UNLISTED),
needs_human_review_by_mad=True
)
VersionReviewerFlags.objects.create(
version=version_factory(addon=unlisted_addon,
channel=amo.RELEASE_CHANNEL_UNLISTED),
needs_human_review_by_mad=True
)
# This add-on should not be listed, because the latest version is not
# flagged.
listed_addon_previous = addon_factory(created=self.days_ago(15))
VersionReviewerFlags.objects.create(
version=version_factory(addon=listed_addon_previous,
channel=amo.RELEASE_CHANNEL_LISTED),
needs_human_review_by_mad=True
)
VersionReviewerFlags.objects.create(
version=version_factory(addon=listed_addon_previous,
channel=amo.RELEASE_CHANNEL_LISTED),
needs_human_review_by_mad=False
)
unflagged_addon = addon_factory()
version_factory(addon=unflagged_addon)
VersionReviewerFlags.objects.create(
version=version_factory(addon=addon_factory()),
needs_human_review_by_mad=False
)
# Needs admin code review, so wouldn't show up for regular reviewers.
addon_admin_only = addon_factory(created=self.days_ago(1))
VersionReviewerFlags.objects.create(
version=version_factory(addon=addon_admin_only),
needs_human_review_by_mad=True
)
AddonReviewerFlags.objects.create(
addon=addon_admin_only,
needs_admin_code_review=True,
)
# Mixed listed and unlisted versions. Should not show up in queue.
mixed_addon = addon_factory(created=self.days_ago(5))
VersionReviewerFlags.objects.create(
version=version_factory(addon=mixed_addon,
channel=amo.RELEASE_CHANNEL_UNLISTED),
needs_human_review_by_mad=False
)
VersionReviewerFlags.objects.create(
version=version_factory(addon=mixed_addon,
channel=amo.RELEASE_CHANNEL_LISTED),
needs_human_review_by_mad=True
)
VersionReviewerFlags.objects.create(
version=version_factory(addon=mixed_addon,
channel=amo.RELEASE_CHANNEL_LISTED),
needs_human_review_by_mad=False
)
# Mixed listed and unlisted versions. Only the unlisted should show up.
mixed_addon2 = addon_factory(created=self.days_ago(4))
VersionReviewerFlags.objects.create(
version=version_factory(addon=mixed_addon2,
channel=amo.RELEASE_CHANNEL_UNLISTED),
needs_human_review_by_mad=True
)
VersionReviewerFlags.objects.create(
version=version_factory(addon=mixed_addon2,
channel=amo.RELEASE_CHANNEL_LISTED),
needs_human_review_by_mad=True
)
VersionReviewerFlags.objects.create(
version=version_factory(addon=mixed_addon2,
channel=amo.RELEASE_CHANNEL_LISTED),
needs_human_review_by_mad=False
)
# Mixed listed and unlisted versions. Both channels should show up.
mixed_addon_both = addon_factory(created=self.days_ago(2))
VersionReviewerFlags.objects.create(
version=version_factory(addon=mixed_addon_both,
channel=amo.RELEASE_CHANNEL_UNLISTED),
needs_human_review_by_mad=True
)
VersionReviewerFlags.objects.create(
version=version_factory(addon=mixed_addon_both,
channel=amo.RELEASE_CHANNEL_LISTED),
needs_human_review_by_mad=True
)
self.expected_addons = [listed_addon, unlisted_addon, mixed_addon2,
mixed_addon_both]
def test_results(self):
with self.assertNumQueries(24):
# That's a lot of queries. Some of them are unfortunately scaling
# with the number of add-ons in the queue.
# - 2 for savepoints because we're in tests
# - 2 for user/groups
# - 1 for the current queue count for pagination purposes
# - 3 for the addons in the queue and their files (regardless of
# how many are in the queue - that's the important bit)
# - 2 for config items (motd / site notice)
# - 2 for my add-ons / my collection in user menu
# - 4 for reviewer scores and user stuff displayed above the queue
# - 2 queries for first add-on to get listed/unlisted count of
# versions with needs human review flag
# - 2 queries for second add-on to get listed/unlisted count of
# versions with needs human review flag
# - 2 queries for third add-on to get listed/unlisted count of
# versions with needs human review flag
# - 2 queries for fourth add-on to get listed/unlisted count of
# versions with needs human review flag
response = self.client.get(self.url)
assert response.status_code == 200
# listed
expected = []
addon = self.expected_addons[0]
expected.append((
'Listed version',
reverse('reviewers.review', args=[addon.slug])
))
# unlisted
addon = self.expected_addons[1]
expected.append((
'Unlisted versions (2)',
reverse('reviewers.review', args=['unlisted', addon.slug])
))
# mixed, only unlisted flagged
addon = self.expected_addons[2]
expected.append((
'Unlisted versions (1)',
reverse('reviewers.review', args=['unlisted', addon.slug])
))
# mixed, both channels flagged
addon = self.expected_addons[3]
expected.append((
'Listed version',
reverse('reviewers.review', args=[addon.slug])
))
expected.append((
'Unlisted versions (1)',
reverse('reviewers.review', args=['unlisted', addon.slug])
))
doc = pq(response.content)
links = doc('#addon-queue tr.addon-row td a:not(.app-icon)')
assert len(links) == len(expected)
check_links(expected, links, verify=False)
def test_only_viewable_with_specific_permission(self):
# Content reviewer does not have access.
self.user.groupuser_set.all().delete() # Remove all permissions
self.grant_permission(self.user, 'Addons:ContentReview')
response = self.client.get(self.url)
assert response.status_code == 403
# Regular user doesn't have access.
self.client.logout()
assert self.client.login(email='regular@mozilla.com')
response = self.client.get(self.url)
assert response.status_code == 403
def test_queue_layout(self):
self._test_queue_layout('Flagged for Human Review', tab_position=2,
total_addons=4, total_queues=4, per_page=1)
def test_queue_layout_admin(self):
# Admins should see the extra add-on that needs admin content review.
self.grant_permission(self.user, 'Reviews:Admin')
self._test_queue_layout('Flagged for Human Review', tab_position=2,
total_addons=5, total_queues=5, per_page=1)
| {
"content_hash": "47aa150d598cdb375de3c43fdd28ad13",
"timestamp": "",
"source": "github",
"line_count": 8318,
"max_line_length": 97,
"avg_line_length": 40.983649915845156,
"alnum_prop": 0.6069339575596506,
"repo_name": "eviljeff/olympia",
"id": "f27945683b14ca9892f3dc11e397881e4b4c7626",
"size": "341075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/olympia/reviewers/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "251925"
},
{
"name": "Dockerfile",
"bytes": "4063"
},
{
"name": "HTML",
"bytes": "314372"
},
{
"name": "JavaScript",
"bytes": "865804"
},
{
"name": "Less",
"bytes": "307222"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6146705"
},
{
"name": "Shell",
"bytes": "8000"
},
{
"name": "Smarty",
"bytes": "1413"
}
],
"symlink_target": ""
} |
from django.utils.unittest.case import TestCase
from stackclient.forms import UserRegistrationForm
class UserRegistrationFormTestCase(TestCase):
def test_that_username_is_required(self):
form = UserRegistrationForm(data={"password": "si", "username": "", "confirmPassword": "ads"})
self.assertFalse(form.is_valid())
self.assertEquals("This field is required.", form.errors["username"][0])
def test_that_password_is_required(self):
form = UserRegistrationForm(data={"password": "", "username": "klasdjlkasjkld", "confirmPassword": "ads"})
self.assertFalse(form.is_valid())
self.assertEquals("This field is required.", form.errors["password"][0])
def test_that_confirm_password_is_required(self):
form = UserRegistrationForm(
data={"password": "ajskjaklsd", "username": "klasdjlkasjkld", "confirmPassword": ""})
self.assertFalse(form.is_valid())
self.assertEquals("This field is required.", form.errors["confirmPassword"][0])
def test_that_confirm_password_should_match(self):
form = UserRegistrationForm(
data={"password": "passed123", "username": "klasdjlkasjkld", "confirmPassword": "passed124"})
self.assertFalse(form.is_valid())
self.assertEquals("Passwords do not match.", form.errors["confirmPassword"][0])
def test_that_if_all_are_correct_validationSucceeds(self):
form = UserRegistrationForm(
data={"password": "StrongPass", "username": "klasdjlkasjkld", "confirmPassword": "StrongPass"})
self.assertTrue(form.is_valid())
def test_that_password_is_longer_than_6(self):
form = UserRegistrationForm(data={"password": "pass", "username": "klasdjlkasjkld", "confirmPassword": "pass"})
self.assertFalse(form.is_valid())
self.assertEquals("Ensure this value has at least 6 characters (it has 4).", form.errors["password"][0])
def test_that_confirm_password_is_longer_than_6(self):
form = UserRegistrationForm(
data={"password": "past", "username": "klasdjlkasjkld", "confirmPassword": "past"})
self.assertFalse(form.is_valid())
self.assertEquals("Ensure this value has at least 6 characters (it has 4).", form.errors["confirmPassword"][0]) | {
"content_hash": "14bf10ee490e9bef7cfb6d6b5815b646",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 119,
"avg_line_length": 53.13953488372093,
"alnum_prop": 0.67527352297593,
"repo_name": "JamesMura/furry-meme-py",
"id": "bc5367dad7e2b278245c2d16b424f04b027da81f",
"size": "2285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stackclient/tests/forms/test_user_registration_form.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6870"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.